Commit | Line | Data |
---|---|---|
f7749a54 | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
e6303f32 DK |
2 | /* |
3 | * Copyright (c) 2022 Red Hat. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice shall be included in | |
13 | * all copies or substantial portions of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
21 | * OTHER DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Danilo Krummrich <dakr@redhat.com> | |
25 | * | |
26 | */ | |
27 | ||
f72c2db4 | 28 | #include <drm/drm_gpuvm.h> |
e6303f32 DK |
29 | |
30 | #include <linux/interval_tree_generic.h> | |
31 | #include <linux/mm.h> | |
32 | ||
33 | /** | |
34 | * DOC: Overview | |
35 | * | |
f72c2db4 DK |
36 | * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a |
37 | * GPU's virtual address (VA) space and manages the corresponding virtual | |
e6303f32 DK |
38 | * mappings represented by &drm_gpuva objects. It also keeps track of the |
39 | * mapping's backing &drm_gem_object buffers. | |
40 | * | |
41 | * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing | |
42 | * all existent GPU VA mappings using this &drm_gem_object as backing buffer. | |
43 | * | |
44 | * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also | |
45 | * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'. | |
46 | * | |
47 | * The GPU VA manager internally uses a rb-tree to manage the | |
48 | * &drm_gpuva mappings within a GPU's virtual address space. | |
49 | * | |
f72c2db4 | 50 | * The &drm_gpuvm structure contains a special &drm_gpuva representing the |
e6303f32 DK |
51 | * portion of VA space reserved by the kernel. This node is initialized together |
52 | * with the GPU VA manager instance and removed when the GPU VA manager is | |
53 | * destroyed. | |
54 | * | |
f72c2db4 | 55 | * In a typical application drivers would embed struct drm_gpuvm and |
e6303f32 DK |
56 | * struct drm_gpuva within their own driver specific structures, there won't be |
57 | * any memory allocations of its own nor memory allocations of &drm_gpuva | |
58 | * entries. | |
59 | * | |
f72c2db4 DK |
60 | * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are |
61 | * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva | |
62 | * entries from within dma-fence signalling critical sections it is enough to | |
63 | * pre-allocate the &drm_gpuva structures. | |
e6303f32 DK |
64 | */ |
65 | ||
66 | /** | |
67 | * DOC: Split and Merge | |
68 | * | |
69 | * Besides its capability to manage and represent a GPU VA space, the | |
f72c2db4 DK |
70 | * GPU VA manager also provides functions to let the &drm_gpuvm calculate a |
71 | * sequence of operations to satisfy a given map or unmap request. | |
e6303f32 DK |
72 | * |
73 | * Therefore the DRM GPU VA manager provides an algorithm implementing splitting | |
74 | * and merging of existent GPU VA mappings with the ones that are requested to | |
75 | * be mapped or unmapped. This feature is required by the Vulkan API to | |
76 | * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this | |
77 | * as VM BIND. | |
78 | * | |
f72c2db4 | 79 | * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks |
e6303f32 DK |
80 | * containing map, unmap and remap operations for a given newly requested |
81 | * mapping. The sequence of callbacks represents the set of operations to | |
82 | * execute in order to integrate the new mapping cleanly into the current state | |
83 | * of the GPU VA space. | |
84 | * | |
85 | * Depending on how the new GPU VA mapping intersects with the existent mappings | |
f72c2db4 DK |
86 | * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount |
87 | * of unmap operations, a maximum of two remap operations and a single map | |
88 | * operation. The caller might receive no callback at all if no operation is | |
e6303f32 DK |
89 | * required, e.g. if the requested mapping already exists in the exact same way. |
90 | * | |
91 | * The single map operation represents the original map operation requested by | |
92 | * the caller. | |
93 | * | |
94 | * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the | |
95 | * &drm_gpuva to unmap is physically contiguous with the original mapping | |
96 | * request. Optionally, if 'keep' is set, drivers may keep the actual page table | |
97 | * entries for this &drm_gpuva, adding the missing page table entries only and | |
f72c2db4 | 98 | * update the &drm_gpuvm's view of things accordingly. |
e6303f32 DK |
99 | * |
100 | * Drivers may do the same optimization, namely delta page table updates, also | |
101 | * for remap operations. This is possible since &drm_gpuva_op_remap consists of | |
102 | * one unmap operation and one or two map operations, such that drivers can | |
103 | * derive the page table update delta accordingly. | |
104 | * | |
105 | * Note that there can't be more than two existent mappings to split up, one at | |
106 | * the beginning and one at the end of the new mapping, hence there is a | |
107 | * maximum of two remap operations. | |
108 | * | |
f72c2db4 DK |
109 | * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to |
110 | * call back into the driver in order to unmap a range of GPU VA space. The | |
e6303f32 DK |
111 | * logic behind this function is way simpler though: For all existent mappings |
112 | * enclosed by the given range unmap operations are created. For mappings which | |
113 | * are only partically located within the given range, remap operations are | |
114 | * created such that those mappings are split up and re-mapped partically. | |
115 | * | |
f72c2db4 DK |
116 | * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(), |
117 | * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used | |
e6303f32 DK |
118 | * to directly obtain an instance of struct drm_gpuva_ops containing a list of |
119 | * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list | |
120 | * contains the &drm_gpuva_ops analogous to the callbacks one would receive when | |
f72c2db4 | 121 | * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires |
e6303f32 DK |
122 | * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to |
123 | * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory | |
124 | * allocations are possible (e.g. to allocate GPU page tables) and once in the | |
125 | * dma-fence signalling critical path. | |
126 | * | |
f72c2db4 DK |
127 | * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and |
128 | * drm_gpuva_remove() may be used. These functions can safely be used from | |
129 | * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or | |
130 | * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the | |
131 | * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and | |
132 | * drm_gpuva_unmap() instead. | |
e6303f32 DK |
133 | * |
134 | * The following diagram depicts the basic relationships of existent GPU VA | |
135 | * mappings, a newly requested mapping and the resulting mappings as implemented | |
f72c2db4 | 136 | * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these. |
e6303f32 DK |
137 | * |
138 | * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs | |
139 | * could be kept. | |
140 | * | |
141 | * :: | |
142 | * | |
143 | * 0 a 1 | |
144 | * old: |-----------| (bo_offset=n) | |
145 | * | |
146 | * 0 a 1 | |
147 | * req: |-----------| (bo_offset=n) | |
148 | * | |
149 | * 0 a 1 | |
150 | * new: |-----------| (bo_offset=n) | |
151 | * | |
152 | * | |
153 | * 2) Requested mapping is identical, except for the BO offset, hence replace | |
154 | * the mapping. | |
155 | * | |
156 | * :: | |
157 | * | |
158 | * 0 a 1 | |
159 | * old: |-----------| (bo_offset=n) | |
160 | * | |
161 | * 0 a 1 | |
162 | * req: |-----------| (bo_offset=m) | |
163 | * | |
164 | * 0 a 1 | |
165 | * new: |-----------| (bo_offset=m) | |
166 | * | |
167 | * | |
168 | * 3) Requested mapping is identical, except for the backing BO, hence replace | |
169 | * the mapping. | |
170 | * | |
171 | * :: | |
172 | * | |
173 | * 0 a 1 | |
174 | * old: |-----------| (bo_offset=n) | |
175 | * | |
176 | * 0 b 1 | |
177 | * req: |-----------| (bo_offset=n) | |
178 | * | |
179 | * 0 b 1 | |
180 | * new: |-----------| (bo_offset=n) | |
181 | * | |
182 | * | |
183 | * 4) Existent mapping is a left aligned subset of the requested one, hence | |
184 | * replace the existent one. | |
185 | * | |
186 | * :: | |
187 | * | |
188 | * 0 a 1 | |
189 | * old: |-----| (bo_offset=n) | |
190 | * | |
191 | * 0 a 2 | |
192 | * req: |-----------| (bo_offset=n) | |
193 | * | |
194 | * 0 a 2 | |
195 | * new: |-----------| (bo_offset=n) | |
196 | * | |
197 | * .. note:: | |
198 | * We expect to see the same result for a request with a different BO | |
199 | * and/or non-contiguous BO offset. | |
200 | * | |
201 | * | |
202 | * 5) Requested mapping's range is a left aligned subset of the existent one, | |
203 | * but backed by a different BO. Hence, map the requested mapping and split | |
204 | * the existent one adjusting its BO offset. | |
205 | * | |
206 | * :: | |
207 | * | |
208 | * 0 a 2 | |
209 | * old: |-----------| (bo_offset=n) | |
210 | * | |
211 | * 0 b 1 | |
212 | * req: |-----| (bo_offset=n) | |
213 | * | |
214 | * 0 b 1 a' 2 | |
215 | * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1) | |
216 | * | |
217 | * .. note:: | |
218 | * We expect to see the same result for a request with a different BO | |
219 | * and/or non-contiguous BO offset. | |
220 | * | |
221 | * | |
222 | * 6) Existent mapping is a superset of the requested mapping. Split it up, but | |
223 | * indicate that the backing PTEs could be kept. | |
224 | * | |
225 | * :: | |
226 | * | |
227 | * 0 a 2 | |
228 | * old: |-----------| (bo_offset=n) | |
229 | * | |
230 | * 0 a 1 | |
231 | * req: |-----| (bo_offset=n) | |
232 | * | |
233 | * 0 a 1 a' 2 | |
234 | * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1) | |
235 | * | |
236 | * | |
237 | * 7) Requested mapping's range is a right aligned subset of the existent one, | |
238 | * but backed by a different BO. Hence, map the requested mapping and split | |
239 | * the existent one, without adjusting the BO offset. | |
240 | * | |
241 | * :: | |
242 | * | |
243 | * 0 a 2 | |
244 | * old: |-----------| (bo_offset=n) | |
245 | * | |
246 | * 1 b 2 | |
247 | * req: |-----| (bo_offset=m) | |
248 | * | |
249 | * 0 a 1 b 2 | |
250 | * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m) | |
251 | * | |
252 | * | |
253 | * 8) Existent mapping is a superset of the requested mapping. Split it up, but | |
254 | * indicate that the backing PTEs could be kept. | |
255 | * | |
256 | * :: | |
257 | * | |
258 | * 0 a 2 | |
259 | * old: |-----------| (bo_offset=n) | |
260 | * | |
261 | * 1 a 2 | |
262 | * req: |-----| (bo_offset=n+1) | |
263 | * | |
264 | * 0 a' 1 a 2 | |
265 | * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1) | |
266 | * | |
267 | * | |
268 | * 9) Existent mapping is overlapped at the end by the requested mapping backed | |
269 | * by a different BO. Hence, map the requested mapping and split up the | |
270 | * existent one, without adjusting the BO offset. | |
271 | * | |
272 | * :: | |
273 | * | |
274 | * 0 a 2 | |
275 | * old: |-----------| (bo_offset=n) | |
276 | * | |
277 | * 1 b 3 | |
278 | * req: |-----------| (bo_offset=m) | |
279 | * | |
280 | * 0 a 1 b 3 | |
281 | * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m) | |
282 | * | |
283 | * | |
284 | * 10) Existent mapping is overlapped by the requested mapping, both having the | |
285 | * same backing BO with a contiguous offset. Indicate the backing PTEs of | |
286 | * the old mapping could be kept. | |
287 | * | |
288 | * :: | |
289 | * | |
290 | * 0 a 2 | |
291 | * old: |-----------| (bo_offset=n) | |
292 | * | |
293 | * 1 a 3 | |
294 | * req: |-----------| (bo_offset=n+1) | |
295 | * | |
296 | * 0 a' 1 a 3 | |
297 | * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1) | |
298 | * | |
299 | * | |
300 | * 11) Requested mapping's range is a centered subset of the existent one | |
301 | * having a different backing BO. Hence, map the requested mapping and split | |
302 | * up the existent one in two mappings, adjusting the BO offset of the right | |
303 | * one accordingly. | |
304 | * | |
305 | * :: | |
306 | * | |
307 | * 0 a 3 | |
308 | * old: |-----------------| (bo_offset=n) | |
309 | * | |
310 | * 1 b 2 | |
311 | * req: |-----| (bo_offset=m) | |
312 | * | |
313 | * 0 a 1 b 2 a' 3 | |
314 | * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2) | |
315 | * | |
316 | * | |
317 | * 12) Requested mapping is a contiguous subset of the existent one. Split it | |
318 | * up, but indicate that the backing PTEs could be kept. | |
319 | * | |
320 | * :: | |
321 | * | |
322 | * 0 a 3 | |
323 | * old: |-----------------| (bo_offset=n) | |
324 | * | |
325 | * 1 a 2 | |
326 | * req: |-----| (bo_offset=n+1) | |
327 | * | |
328 | * 0 a' 1 a 2 a'' 3 | |
329 | * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2) | |
330 | * | |
331 | * | |
332 | * 13) Existent mapping is a right aligned subset of the requested one, hence | |
333 | * replace the existent one. | |
334 | * | |
335 | * :: | |
336 | * | |
337 | * 1 a 2 | |
338 | * old: |-----| (bo_offset=n+1) | |
339 | * | |
340 | * 0 a 2 | |
341 | * req: |-----------| (bo_offset=n) | |
342 | * | |
343 | * 0 a 2 | |
344 | * new: |-----------| (bo_offset=n) | |
345 | * | |
346 | * .. note:: | |
347 | * We expect to see the same result for a request with a different bo | |
348 | * and/or non-contiguous bo_offset. | |
349 | * | |
350 | * | |
351 | * 14) Existent mapping is a centered subset of the requested one, hence | |
352 | * replace the existent one. | |
353 | * | |
354 | * :: | |
355 | * | |
356 | * 1 a 2 | |
357 | * old: |-----| (bo_offset=n+1) | |
358 | * | |
359 | * 0 a 3 | |
360 | * req: |----------------| (bo_offset=n) | |
361 | * | |
362 | * 0 a 3 | |
363 | * new: |----------------| (bo_offset=n) | |
364 | * | |
365 | * .. note:: | |
366 | * We expect to see the same result for a request with a different bo | |
367 | * and/or non-contiguous bo_offset. | |
368 | * | |
369 | * | |
370 | * 15) Existent mappings is overlapped at the beginning by the requested mapping | |
371 | * backed by a different BO. Hence, map the requested mapping and split up | |
372 | * the existent one, adjusting its BO offset accordingly. | |
373 | * | |
374 | * :: | |
375 | * | |
376 | * 1 a 3 | |
377 | * old: |-----------| (bo_offset=n) | |
378 | * | |
379 | * 0 b 2 | |
380 | * req: |-----------| (bo_offset=m) | |
381 | * | |
382 | * 0 b 2 a' 3 | |
383 | * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2) | |
384 | */ | |
385 | ||
386 | /** | |
387 | * DOC: Locking | |
388 | * | |
389 | * Generally, the GPU VA manager does not take care of locking itself, it is | |
390 | * the drivers responsibility to take care about locking. Drivers might want to | |
391 | * protect the following operations: inserting, removing and iterating | |
392 | * &drm_gpuva objects as well as generating all kinds of operations, such as | |
393 | * split / merge or prefetch. | |
394 | * | |
395 | * The GPU VA manager also does not take care of the locking of the backing | |
396 | * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to | |
397 | * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively | |
398 | * a driver specific external lock. For the latter see also | |
399 | * drm_gem_gpuva_set_lock(). | |
400 | * | |
401 | * However, the GPU VA manager contains lockdep checks to ensure callers of its | |
402 | * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is | |
403 | * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). | |
404 | */ | |
405 | ||
406 | /** | |
407 | * DOC: Examples | |
408 | * | |
409 | * This section gives two examples on how to let the DRM GPUVA Manager generate | |
410 | * &drm_gpuva_op in order to satisfy a given map or unmap request and how to | |
411 | * make use of them. | |
412 | * | |
413 | * The below code is strictly limited to illustrate the generic usage pattern. | |
414 | * To maintain simplicitly, it doesn't make use of any abstractions for common | |
415 | * code, different (asyncronous) stages with fence signalling critical paths, | |
416 | * any other helpers or error handling in terms of freeing memory and dropping | |
417 | * previously taken locks. | |
418 | * | |
419 | * 1) Obtain a list of &drm_gpuva_op to create a new mapping:: | |
420 | * | |
421 | * // Allocates a new &drm_gpuva. | |
422 | * struct drm_gpuva * driver_gpuva_alloc(void); | |
423 | * | |
f72c2db4 | 424 | * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva |
e6303f32 DK |
425 | * // structure in individual driver structures and lock the dma-resv with |
426 | * // drm_exec or similar helpers. | |
f72c2db4 | 427 | * int driver_mapping_create(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
428 | * u64 addr, u64 range, |
429 | * struct drm_gem_object *obj, u64 offset) | |
430 | * { | |
431 | * struct drm_gpuva_ops *ops; | |
432 | * struct drm_gpuva_op *op | |
433 | * | |
434 | * driver_lock_va_space(); | |
f72c2db4 | 435 | * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, |
e6303f32 DK |
436 | * obj, offset); |
437 | * if (IS_ERR(ops)) | |
438 | * return PTR_ERR(ops); | |
439 | * | |
440 | * drm_gpuva_for_each_op(op, ops) { | |
441 | * struct drm_gpuva *va; | |
442 | * | |
443 | * switch (op->op) { | |
444 | * case DRM_GPUVA_OP_MAP: | |
445 | * va = driver_gpuva_alloc(); | |
446 | * if (!va) | |
447 | * ; // unwind previous VA space updates, | |
448 | * // free memory and unlock | |
449 | * | |
450 | * driver_vm_map(); | |
f72c2db4 | 451 | * drm_gpuva_map(gpuvm, va, &op->map); |
e6303f32 DK |
452 | * drm_gpuva_link(va); |
453 | * | |
454 | * break; | |
455 | * case DRM_GPUVA_OP_REMAP: { | |
456 | * struct drm_gpuva *prev = NULL, *next = NULL; | |
457 | * | |
458 | * va = op->remap.unmap->va; | |
459 | * | |
460 | * if (op->remap.prev) { | |
461 | * prev = driver_gpuva_alloc(); | |
462 | * if (!prev) | |
463 | * ; // unwind previous VA space | |
464 | * // updates, free memory and | |
465 | * // unlock | |
466 | * } | |
467 | * | |
468 | * if (op->remap.next) { | |
469 | * next = driver_gpuva_alloc(); | |
470 | * if (!next) | |
471 | * ; // unwind previous VA space | |
472 | * // updates, free memory and | |
473 | * // unlock | |
474 | * } | |
475 | * | |
476 | * driver_vm_remap(); | |
477 | * drm_gpuva_remap(prev, next, &op->remap); | |
478 | * | |
479 | * drm_gpuva_unlink(va); | |
480 | * if (prev) | |
481 | * drm_gpuva_link(prev); | |
482 | * if (next) | |
483 | * drm_gpuva_link(next); | |
484 | * | |
485 | * break; | |
486 | * } | |
487 | * case DRM_GPUVA_OP_UNMAP: | |
488 | * va = op->unmap->va; | |
489 | * | |
490 | * driver_vm_unmap(); | |
491 | * drm_gpuva_unlink(va); | |
492 | * drm_gpuva_unmap(&op->unmap); | |
493 | * | |
494 | * break; | |
495 | * default: | |
496 | * break; | |
497 | * } | |
498 | * } | |
499 | * driver_unlock_va_space(); | |
500 | * | |
501 | * return 0; | |
502 | * } | |
503 | * | |
504 | * 2) Receive a callback for each &drm_gpuva_op to create a new mapping:: | |
505 | * | |
506 | * struct driver_context { | |
f72c2db4 | 507 | * struct drm_gpuvm *gpuvm; |
e6303f32 DK |
508 | * struct drm_gpuva *new_va; |
509 | * struct drm_gpuva *prev_va; | |
510 | * struct drm_gpuva *next_va; | |
511 | * }; | |
512 | * | |
f72c2db4 DK |
513 | * // ops to pass to drm_gpuvm_init() |
514 | * static const struct drm_gpuvm_ops driver_gpuvm_ops = { | |
e6303f32 DK |
515 | * .sm_step_map = driver_gpuva_map, |
516 | * .sm_step_remap = driver_gpuva_remap, | |
517 | * .sm_step_unmap = driver_gpuva_unmap, | |
518 | * }; | |
519 | * | |
f72c2db4 | 520 | * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva |
e6303f32 DK |
521 | * // structure in individual driver structures and lock the dma-resv with |
522 | * // drm_exec or similar helpers. | |
f72c2db4 | 523 | * int driver_mapping_create(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
524 | * u64 addr, u64 range, |
525 | * struct drm_gem_object *obj, u64 offset) | |
526 | * { | |
527 | * struct driver_context ctx; | |
528 | * struct drm_gpuva_ops *ops; | |
529 | * struct drm_gpuva_op *op; | |
530 | * int ret = 0; | |
531 | * | |
f72c2db4 | 532 | * ctx.gpuvm = gpuvm; |
e6303f32 DK |
533 | * |
534 | * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); | |
535 | * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); | |
536 | * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); | |
537 | * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { | |
538 | * ret = -ENOMEM; | |
539 | * goto out; | |
540 | * } | |
541 | * | |
542 | * driver_lock_va_space(); | |
f72c2db4 | 543 | * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset); |
e6303f32 DK |
544 | * driver_unlock_va_space(); |
545 | * | |
546 | * out: | |
547 | * kfree(ctx.new_va); | |
548 | * kfree(ctx.prev_va); | |
549 | * kfree(ctx.next_va); | |
550 | * return ret; | |
551 | * } | |
552 | * | |
553 | * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx) | |
554 | * { | |
555 | * struct driver_context *ctx = __ctx; | |
556 | * | |
f72c2db4 | 557 | * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map); |
e6303f32 DK |
558 | * |
559 | * drm_gpuva_link(ctx->new_va); | |
560 | * | |
561 | * // prevent the new GPUVA from being freed in | |
562 | * // driver_mapping_create() | |
563 | * ctx->new_va = NULL; | |
564 | * | |
565 | * return 0; | |
566 | * } | |
567 | * | |
568 | * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) | |
569 | * { | |
570 | * struct driver_context *ctx = __ctx; | |
571 | * | |
572 | * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); | |
573 | * | |
574 | * drm_gpuva_unlink(op->remap.unmap->va); | |
575 | * kfree(op->remap.unmap->va); | |
576 | * | |
577 | * if (op->remap.prev) { | |
578 | * drm_gpuva_link(ctx->prev_va); | |
579 | * ctx->prev_va = NULL; | |
580 | * } | |
581 | * | |
582 | * if (op->remap.next) { | |
583 | * drm_gpuva_link(ctx->next_va); | |
584 | * ctx->next_va = NULL; | |
585 | * } | |
586 | * | |
587 | * return 0; | |
588 | * } | |
589 | * | |
590 | * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx) | |
591 | * { | |
592 | * drm_gpuva_unlink(op->unmap.va); | |
593 | * drm_gpuva_unmap(&op->unmap); | |
594 | * kfree(op->unmap.va); | |
595 | * | |
596 | * return 0; | |
597 | * } | |
598 | */ | |
599 | ||
600 | #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) | |
601 | ||
602 | #define GPUVA_START(node) ((node)->va.addr) | |
603 | #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1) | |
604 | ||
605 | /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain | |
606 | * about this. | |
607 | */ | |
608 | INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last, | |
609 | GPUVA_START, GPUVA_LAST, static __maybe_unused, | |
610 | drm_gpuva_it) | |
611 | ||
f72c2db4 | 612 | static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
613 | struct drm_gpuva *va); |
614 | static void __drm_gpuva_remove(struct drm_gpuva *va); | |
615 | ||
616 | static bool | |
546ca4d3 | 617 | drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
e6303f32 DK |
618 | { |
619 | u64 end; | |
620 | ||
546ca4d3 DK |
621 | return drm_WARN(gpuvm->drm, check_add_overflow(addr, range, &end), |
622 | "GPUVA address limited to %zu bytes.\n", sizeof(end)); | |
e6303f32 DK |
623 | } |
624 | ||
625 | static bool | |
f72c2db4 | 626 | drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
e6303f32 DK |
627 | { |
628 | u64 end = addr + range; | |
f72c2db4 DK |
629 | u64 mm_start = gpuvm->mm_start; |
630 | u64 mm_end = mm_start + gpuvm->mm_range; | |
e6303f32 DK |
631 | |
632 | return addr >= mm_start && end <= mm_end; | |
633 | } | |
634 | ||
635 | static bool | |
f72c2db4 | 636 | drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
e6303f32 DK |
637 | { |
638 | u64 end = addr + range; | |
f72c2db4 DK |
639 | u64 kstart = gpuvm->kernel_alloc_node.va.addr; |
640 | u64 krange = gpuvm->kernel_alloc_node.va.range; | |
e6303f32 DK |
641 | u64 kend = kstart + krange; |
642 | ||
643 | return krange && addr < kend && kstart < end; | |
644 | } | |
645 | ||
646 | static bool | |
f72c2db4 | 647 | drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
648 | u64 addr, u64 range) |
649 | { | |
546ca4d3 | 650 | return !drm_gpuvm_check_overflow(gpuvm, addr, range) && |
f72c2db4 DK |
651 | drm_gpuvm_in_mm_range(gpuvm, addr, range) && |
652 | !drm_gpuvm_in_kernel_node(gpuvm, addr, range); | |
e6303f32 DK |
653 | } |
654 | ||
655 | /** | |
f72c2db4 DK |
656 | * drm_gpuvm_init() - initialize a &drm_gpuvm |
657 | * @gpuvm: pointer to the &drm_gpuvm to initialize | |
e6303f32 | 658 | * @name: the name of the GPU VA space |
546ca4d3 | 659 | * @drm: the &drm_device this VM resides in |
e6303f32 DK |
660 | * @start_offset: the start offset of the GPU VA space |
661 | * @range: the size of the GPU VA space | |
662 | * @reserve_offset: the start of the kernel reserved GPU VA area | |
663 | * @reserve_range: the size of the kernel reserved GPU VA area | |
f72c2db4 | 664 | * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap |
e6303f32 | 665 | * |
f72c2db4 | 666 | * The &drm_gpuvm must be initialized with this function before use. |
e6303f32 | 667 | * |
f72c2db4 | 668 | * Note that @gpuvm must be cleared to 0 before calling this function. The given |
e6303f32 DK |
669 | * &name is expected to be managed by the surrounding driver structures. |
670 | */ | |
671 | void | |
546ca4d3 DK |
672 | drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, |
673 | struct drm_device *drm, | |
f72c2db4 DK |
674 | u64 start_offset, u64 range, |
675 | u64 reserve_offset, u64 reserve_range, | |
676 | const struct drm_gpuvm_ops *ops) | |
e6303f32 | 677 | { |
f72c2db4 DK |
678 | gpuvm->rb.tree = RB_ROOT_CACHED; |
679 | INIT_LIST_HEAD(&gpuvm->rb.list); | |
e6303f32 | 680 | |
f72c2db4 DK |
681 | gpuvm->name = name ? name : "unknown"; |
682 | gpuvm->ops = ops; | |
546ca4d3 | 683 | gpuvm->drm = drm; |
e6303f32 | 684 | |
546ca4d3 DK |
685 | drm_gpuvm_check_overflow(gpuvm, start_offset, range); |
686 | gpuvm->mm_start = start_offset; | |
687 | gpuvm->mm_range = range; | |
e6303f32 | 688 | |
546ca4d3 | 689 | memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); |
e6303f32 | 690 | if (reserve_range) { |
f72c2db4 DK |
691 | gpuvm->kernel_alloc_node.va.addr = reserve_offset; |
692 | gpuvm->kernel_alloc_node.va.range = reserve_range; | |
e6303f32 | 693 | |
546ca4d3 | 694 | if (likely(!drm_gpuvm_check_overflow(gpuvm, reserve_offset, |
e6303f32 | 695 | reserve_range))) |
f72c2db4 | 696 | __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node); |
e6303f32 DK |
697 | } |
698 | } | |
f72c2db4 | 699 | EXPORT_SYMBOL_GPL(drm_gpuvm_init); |
e6303f32 DK |
700 | |
701 | /** | |
f72c2db4 DK |
702 | * drm_gpuvm_destroy() - cleanup a &drm_gpuvm |
703 | * @gpuvm: pointer to the &drm_gpuvm to clean up | |
e6303f32 DK |
704 | * |
705 | * Note that it is a bug to call this function on a manager that still | |
706 | * holds GPU VA mappings. | |
707 | */ | |
708 | void | |
f72c2db4 | 709 | drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) |
e6303f32 | 710 | { |
f72c2db4 | 711 | gpuvm->name = NULL; |
e6303f32 | 712 | |
f72c2db4 DK |
713 | if (gpuvm->kernel_alloc_node.va.range) |
714 | __drm_gpuva_remove(&gpuvm->kernel_alloc_node); | |
e6303f32 | 715 | |
546ca4d3 DK |
716 | drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), |
717 | "GPUVA tree is not empty, potentially leaking memory.\n"); | |
e6303f32 | 718 | } |
f72c2db4 | 719 | EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); |
e6303f32 DK |
720 | |
721 | static int | |
f72c2db4 | 722 | __drm_gpuva_insert(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
723 | struct drm_gpuva *va) |
724 | { | |
725 | struct rb_node *node; | |
726 | struct list_head *head; | |
727 | ||
f72c2db4 | 728 | if (drm_gpuva_it_iter_first(&gpuvm->rb.tree, |
e6303f32 DK |
729 | GPUVA_START(va), |
730 | GPUVA_LAST(va))) | |
731 | return -EEXIST; | |
732 | ||
f72c2db4 | 733 | va->vm = gpuvm; |
e6303f32 | 734 | |
f72c2db4 | 735 | drm_gpuva_it_insert(va, &gpuvm->rb.tree); |
e6303f32 DK |
736 | |
737 | node = rb_prev(&va->rb.node); | |
738 | if (node) | |
739 | head = &(to_drm_gpuva(node))->rb.entry; | |
740 | else | |
f72c2db4 | 741 | head = &gpuvm->rb.list; |
e6303f32 DK |
742 | |
743 | list_add(&va->rb.entry, head); | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | /** | |
749 | * drm_gpuva_insert() - insert a &drm_gpuva | |
f72c2db4 | 750 | * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in |
e6303f32 DK |
751 | * @va: the &drm_gpuva to insert |
752 | * | |
753 | * Insert a &drm_gpuva with a given address and range into a | |
f72c2db4 | 754 | * &drm_gpuvm. |
e6303f32 DK |
755 | * |
756 | * It is safe to use this function using the safe versions of iterating the GPU | |
f72c2db4 DK |
757 | * VA space, such as drm_gpuvm_for_each_va_safe() and |
758 | * drm_gpuvm_for_each_va_range_safe(). | |
e6303f32 DK |
759 | * |
760 | * Returns: 0 on success, negative error code on failure. | |
761 | */ | |
762 | int | |
f72c2db4 | 763 | drm_gpuva_insert(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
764 | struct drm_gpuva *va) |
765 | { | |
766 | u64 addr = va->va.addr; | |
767 | u64 range = va->va.range; | |
768 | ||
f72c2db4 | 769 | if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) |
e6303f32 DK |
770 | return -EINVAL; |
771 | ||
f72c2db4 | 772 | return __drm_gpuva_insert(gpuvm, va); |
e6303f32 DK |
773 | } |
774 | EXPORT_SYMBOL_GPL(drm_gpuva_insert); | |
775 | ||
776 | static void | |
777 | __drm_gpuva_remove(struct drm_gpuva *va) | |
778 | { | |
f72c2db4 | 779 | drm_gpuva_it_remove(va, &va->vm->rb.tree); |
e6303f32 DK |
780 | list_del_init(&va->rb.entry); |
781 | } | |
782 | ||
783 | /** | |
784 | * drm_gpuva_remove() - remove a &drm_gpuva | |
785 | * @va: the &drm_gpuva to remove | |
786 | * | |
787 | * This removes the given &va from the underlaying tree. | |
788 | * | |
789 | * It is safe to use this function using the safe versions of iterating the GPU | |
f72c2db4 DK |
790 | * VA space, such as drm_gpuvm_for_each_va_safe() and |
791 | * drm_gpuvm_for_each_va_range_safe(). | |
e6303f32 DK |
792 | */ |
793 | void | |
794 | drm_gpuva_remove(struct drm_gpuva *va) | |
795 | { | |
f72c2db4 | 796 | struct drm_gpuvm *gpuvm = va->vm; |
e6303f32 | 797 | |
f72c2db4 | 798 | if (unlikely(va == &gpuvm->kernel_alloc_node)) { |
546ca4d3 DK |
799 | drm_WARN(gpuvm->drm, 1, |
800 | "Can't destroy kernel reserved node.\n"); | |
e6303f32 DK |
801 | return; |
802 | } | |
803 | ||
804 | __drm_gpuva_remove(va); | |
805 | } | |
806 | EXPORT_SYMBOL_GPL(drm_gpuva_remove); | |
807 | ||
808 | /** | |
809 | * drm_gpuva_link() - link a &drm_gpuva | |
810 | * @va: the &drm_gpuva to link | |
811 | * | |
812 | * This adds the given &va to the GPU VA list of the &drm_gem_object it is | |
813 | * associated with. | |
814 | * | |
815 | * This function expects the caller to protect the GEM's GPUVA list against | |
816 | * concurrent access using the GEMs dma_resv lock. | |
817 | */ | |
818 | void | |
819 | drm_gpuva_link(struct drm_gpuva *va) | |
820 | { | |
821 | struct drm_gem_object *obj = va->gem.obj; | |
822 | ||
823 | if (unlikely(!obj)) | |
824 | return; | |
825 | ||
826 | drm_gem_gpuva_assert_lock_held(obj); | |
827 | ||
828 | list_add_tail(&va->gem.entry, &obj->gpuva.list); | |
829 | } | |
830 | EXPORT_SYMBOL_GPL(drm_gpuva_link); | |
831 | ||
832 | /** | |
833 | * drm_gpuva_unlink() - unlink a &drm_gpuva | |
834 | * @va: the &drm_gpuva to unlink | |
835 | * | |
836 | * This removes the given &va from the GPU VA list of the &drm_gem_object it is | |
837 | * associated with. | |
838 | * | |
839 | * This function expects the caller to protect the GEM's GPUVA list against | |
840 | * concurrent access using the GEMs dma_resv lock. | |
841 | */ | |
842 | void | |
843 | drm_gpuva_unlink(struct drm_gpuva *va) | |
844 | { | |
845 | struct drm_gem_object *obj = va->gem.obj; | |
846 | ||
847 | if (unlikely(!obj)) | |
848 | return; | |
849 | ||
850 | drm_gem_gpuva_assert_lock_held(obj); | |
851 | ||
852 | list_del_init(&va->gem.entry); | |
853 | } | |
854 | EXPORT_SYMBOL_GPL(drm_gpuva_unlink); | |
855 | ||
856 | /** | |
857 | * drm_gpuva_find_first() - find the first &drm_gpuva in the given range | |
f72c2db4 | 858 | * @gpuvm: the &drm_gpuvm to search in |
e6303f32 DK |
859 | * @addr: the &drm_gpuvas address |
860 | * @range: the &drm_gpuvas range | |
861 | * | |
862 | * Returns: the first &drm_gpuva within the given range | |
863 | */ | |
864 | struct drm_gpuva * | |
f72c2db4 | 865 | drm_gpuva_find_first(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
866 | u64 addr, u64 range) |
867 | { | |
868 | u64 last = addr + range - 1; | |
869 | ||
f72c2db4 | 870 | return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last); |
e6303f32 DK |
871 | } |
872 | EXPORT_SYMBOL_GPL(drm_gpuva_find_first); | |
873 | ||
874 | /** | |
875 | * drm_gpuva_find() - find a &drm_gpuva | |
f72c2db4 | 876 | * @gpuvm: the &drm_gpuvm to search in |
e6303f32 DK |
877 | * @addr: the &drm_gpuvas address |
878 | * @range: the &drm_gpuvas range | |
879 | * | |
880 | * Returns: the &drm_gpuva at a given &addr and with a given &range | |
881 | */ | |
882 | struct drm_gpuva * | |
f72c2db4 | 883 | drm_gpuva_find(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
884 | u64 addr, u64 range) |
885 | { | |
886 | struct drm_gpuva *va; | |
887 | ||
f72c2db4 | 888 | va = drm_gpuva_find_first(gpuvm, addr, range); |
e6303f32 DK |
889 | if (!va) |
890 | goto out; | |
891 | ||
892 | if (va->va.addr != addr || | |
893 | va->va.range != range) | |
894 | goto out; | |
895 | ||
896 | return va; | |
897 | ||
898 | out: | |
899 | return NULL; | |
900 | } | |
901 | EXPORT_SYMBOL_GPL(drm_gpuva_find); | |
902 | ||
903 | /** | |
904 | * drm_gpuva_find_prev() - find the &drm_gpuva before the given address | |
f72c2db4 | 905 | * @gpuvm: the &drm_gpuvm to search in |
e6303f32 DK |
906 | * @start: the given GPU VA's start address |
907 | * | |
908 | * Find the adjacent &drm_gpuva before the GPU VA with given &start address. | |
909 | * | |
910 | * Note that if there is any free space between the GPU VA mappings no mapping | |
911 | * is returned. | |
912 | * | |
913 | * Returns: a pointer to the found &drm_gpuva or NULL if none was found | |
914 | */ | |
915 | struct drm_gpuva * | |
f72c2db4 | 916 | drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start) |
e6303f32 | 917 | { |
f72c2db4 | 918 | if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1)) |
e6303f32 DK |
919 | return NULL; |
920 | ||
f72c2db4 | 921 | return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start); |
e6303f32 DK |
922 | } |
923 | EXPORT_SYMBOL_GPL(drm_gpuva_find_prev); | |
924 | ||
925 | /** | |
926 | * drm_gpuva_find_next() - find the &drm_gpuva after the given address | |
f72c2db4 | 927 | * @gpuvm: the &drm_gpuvm to search in |
e6303f32 DK |
928 | * @end: the given GPU VA's end address |
929 | * | |
930 | * Find the adjacent &drm_gpuva after the GPU VA with given &end address. | |
931 | * | |
932 | * Note that if there is any free space between the GPU VA mappings no mapping | |
933 | * is returned. | |
934 | * | |
935 | * Returns: a pointer to the found &drm_gpuva or NULL if none was found | |
936 | */ | |
937 | struct drm_gpuva * | |
f72c2db4 | 938 | drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end) |
e6303f32 | 939 | { |
f72c2db4 | 940 | if (!drm_gpuvm_range_valid(gpuvm, end, 1)) |
e6303f32 DK |
941 | return NULL; |
942 | ||
f72c2db4 | 943 | return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1); |
e6303f32 DK |
944 | } |
945 | EXPORT_SYMBOL_GPL(drm_gpuva_find_next); | |
946 | ||
947 | /** | |
f72c2db4 | 948 | * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space |
e6303f32 | 949 | * is empty |
f72c2db4 | 950 | * @gpuvm: the &drm_gpuvm to check the range for |
e6303f32 DK |
951 | * @addr: the start address of the range |
952 | * @range: the range of the interval | |
953 | * | |
954 | * Returns: true if the interval is empty, false otherwise | |
955 | */ | |
956 | bool | |
f72c2db4 | 957 | drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
e6303f32 | 958 | { |
f72c2db4 | 959 | return !drm_gpuva_find_first(gpuvm, addr, range); |
e6303f32 | 960 | } |
f72c2db4 | 961 | EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty); |
e6303f32 DK |
962 | |
963 | /** | |
964 | * drm_gpuva_map() - helper to insert a &drm_gpuva according to a | |
965 | * &drm_gpuva_op_map | |
f72c2db4 | 966 | * @gpuvm: the &drm_gpuvm |
e6303f32 DK |
967 | * @va: the &drm_gpuva to insert |
968 | * @op: the &drm_gpuva_op_map to initialize @va with | |
969 | * | |
f72c2db4 | 970 | * Initializes the @va from the @op and inserts it into the given @gpuvm. |
e6303f32 DK |
971 | */ |
972 | void | |
f72c2db4 | 973 | drm_gpuva_map(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
974 | struct drm_gpuva *va, |
975 | struct drm_gpuva_op_map *op) | |
976 | { | |
977 | drm_gpuva_init_from_op(va, op); | |
f72c2db4 | 978 | drm_gpuva_insert(gpuvm, va); |
e6303f32 DK |
979 | } |
980 | EXPORT_SYMBOL_GPL(drm_gpuva_map); | |
981 | ||
982 | /** | |
983 | * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a | |
984 | * &drm_gpuva_op_remap | |
985 | * @prev: the &drm_gpuva to remap when keeping the start of a mapping | |
986 | * @next: the &drm_gpuva to remap when keeping the end of a mapping | |
987 | * @op: the &drm_gpuva_op_remap to initialize @prev and @next with | |
988 | * | |
989 | * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or | |
990 | * @next. | |
991 | */ | |
992 | void | |
993 | drm_gpuva_remap(struct drm_gpuva *prev, | |
994 | struct drm_gpuva *next, | |
995 | struct drm_gpuva_op_remap *op) | |
996 | { | |
997 | struct drm_gpuva *curr = op->unmap->va; | |
f72c2db4 | 998 | struct drm_gpuvm *gpuvm = curr->vm; |
e6303f32 DK |
999 | |
1000 | drm_gpuva_remove(curr); | |
1001 | ||
1002 | if (op->prev) { | |
1003 | drm_gpuva_init_from_op(prev, op->prev); | |
f72c2db4 | 1004 | drm_gpuva_insert(gpuvm, prev); |
e6303f32 DK |
1005 | } |
1006 | ||
1007 | if (op->next) { | |
1008 | drm_gpuva_init_from_op(next, op->next); | |
f72c2db4 | 1009 | drm_gpuva_insert(gpuvm, next); |
e6303f32 DK |
1010 | } |
1011 | } | |
1012 | EXPORT_SYMBOL_GPL(drm_gpuva_remap); | |
1013 | ||
1014 | /** | |
1015 | * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a | |
1016 | * &drm_gpuva_op_unmap | |
1017 | * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove | |
1018 | * | |
1019 | * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap. | |
1020 | */ | |
1021 | void | |
1022 | drm_gpuva_unmap(struct drm_gpuva_op_unmap *op) | |
1023 | { | |
1024 | drm_gpuva_remove(op->va); | |
1025 | } | |
1026 | EXPORT_SYMBOL_GPL(drm_gpuva_unmap); | |
1027 | ||
1028 | static int | |
f72c2db4 | 1029 | op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, |
e6303f32 DK |
1030 | u64 addr, u64 range, |
1031 | struct drm_gem_object *obj, u64 offset) | |
1032 | { | |
1033 | struct drm_gpuva_op op = {}; | |
1034 | ||
1035 | op.op = DRM_GPUVA_OP_MAP; | |
1036 | op.map.va.addr = addr; | |
1037 | op.map.va.range = range; | |
1038 | op.map.gem.obj = obj; | |
1039 | op.map.gem.offset = offset; | |
1040 | ||
1041 | return fn->sm_step_map(&op, priv); | |
1042 | } | |
1043 | ||
1044 | static int | |
f72c2db4 | 1045 | op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv, |
e6303f32 DK |
1046 | struct drm_gpuva_op_map *prev, |
1047 | struct drm_gpuva_op_map *next, | |
1048 | struct drm_gpuva_op_unmap *unmap) | |
1049 | { | |
1050 | struct drm_gpuva_op op = {}; | |
1051 | struct drm_gpuva_op_remap *r; | |
1052 | ||
1053 | op.op = DRM_GPUVA_OP_REMAP; | |
1054 | r = &op.remap; | |
1055 | r->prev = prev; | |
1056 | r->next = next; | |
1057 | r->unmap = unmap; | |
1058 | ||
1059 | return fn->sm_step_remap(&op, priv); | |
1060 | } | |
1061 | ||
1062 | static int | |
f72c2db4 | 1063 | op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, |
e6303f32 DK |
1064 | struct drm_gpuva *va, bool merge) |
1065 | { | |
1066 | struct drm_gpuva_op op = {}; | |
1067 | ||
1068 | op.op = DRM_GPUVA_OP_UNMAP; | |
1069 | op.unmap.va = va; | |
1070 | op.unmap.keep = merge; | |
1071 | ||
1072 | return fn->sm_step_unmap(&op, priv); | |
1073 | } | |
1074 | ||
1075 | static int | |
f72c2db4 DK |
1076 | __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, |
1077 | const struct drm_gpuvm_ops *ops, void *priv, | |
e6303f32 DK |
1078 | u64 req_addr, u64 req_range, |
1079 | struct drm_gem_object *req_obj, u64 req_offset) | |
1080 | { | |
cdf4100e | 1081 | struct drm_gpuva *va, *next; |
e6303f32 DK |
1082 | u64 req_end = req_addr + req_range; |
1083 | int ret; | |
1084 | ||
f72c2db4 | 1085 | if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range))) |
e6303f32 DK |
1086 | return -EINVAL; |
1087 | ||
f72c2db4 | 1088 | drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { |
e6303f32 DK |
1089 | struct drm_gem_object *obj = va->gem.obj; |
1090 | u64 offset = va->gem.offset; | |
1091 | u64 addr = va->va.addr; | |
1092 | u64 range = va->va.range; | |
1093 | u64 end = addr + range; | |
1094 | bool merge = !!va->gem.obj; | |
1095 | ||
1096 | if (addr == req_addr) { | |
1097 | merge &= obj == req_obj && | |
1098 | offset == req_offset; | |
1099 | ||
1100 | if (end == req_end) { | |
1101 | ret = op_unmap_cb(ops, priv, va, merge); | |
1102 | if (ret) | |
1103 | return ret; | |
1104 | break; | |
1105 | } | |
1106 | ||
1107 | if (end < req_end) { | |
1108 | ret = op_unmap_cb(ops, priv, va, merge); | |
1109 | if (ret) | |
1110 | return ret; | |
cdf4100e | 1111 | continue; |
e6303f32 DK |
1112 | } |
1113 | ||
1114 | if (end > req_end) { | |
1115 | struct drm_gpuva_op_map n = { | |
1116 | .va.addr = req_end, | |
1117 | .va.range = range - req_range, | |
1118 | .gem.obj = obj, | |
1119 | .gem.offset = offset + req_range, | |
1120 | }; | |
1121 | struct drm_gpuva_op_unmap u = { | |
1122 | .va = va, | |
1123 | .keep = merge, | |
1124 | }; | |
1125 | ||
1126 | ret = op_remap_cb(ops, priv, NULL, &n, &u); | |
1127 | if (ret) | |
1128 | return ret; | |
1129 | break; | |
1130 | } | |
1131 | } else if (addr < req_addr) { | |
1132 | u64 ls_range = req_addr - addr; | |
1133 | struct drm_gpuva_op_map p = { | |
1134 | .va.addr = addr, | |
1135 | .va.range = ls_range, | |
1136 | .gem.obj = obj, | |
1137 | .gem.offset = offset, | |
1138 | }; | |
1139 | struct drm_gpuva_op_unmap u = { .va = va }; | |
1140 | ||
1141 | merge &= obj == req_obj && | |
1142 | offset + ls_range == req_offset; | |
1143 | u.keep = merge; | |
1144 | ||
1145 | if (end == req_end) { | |
1146 | ret = op_remap_cb(ops, priv, &p, NULL, &u); | |
1147 | if (ret) | |
1148 | return ret; | |
1149 | break; | |
1150 | } | |
1151 | ||
1152 | if (end < req_end) { | |
1153 | ret = op_remap_cb(ops, priv, &p, NULL, &u); | |
1154 | if (ret) | |
1155 | return ret; | |
cdf4100e | 1156 | continue; |
e6303f32 DK |
1157 | } |
1158 | ||
1159 | if (end > req_end) { | |
1160 | struct drm_gpuva_op_map n = { | |
1161 | .va.addr = req_end, | |
1162 | .va.range = end - req_end, | |
1163 | .gem.obj = obj, | |
1164 | .gem.offset = offset + ls_range + | |
1165 | req_range, | |
1166 | }; | |
1167 | ||
1168 | ret = op_remap_cb(ops, priv, &p, &n, &u); | |
1169 | if (ret) | |
1170 | return ret; | |
1171 | break; | |
1172 | } | |
1173 | } else if (addr > req_addr) { | |
1174 | merge &= obj == req_obj && | |
1175 | offset == req_offset + | |
1176 | (addr - req_addr); | |
1177 | ||
1178 | if (end == req_end) { | |
1179 | ret = op_unmap_cb(ops, priv, va, merge); | |
1180 | if (ret) | |
1181 | return ret; | |
1182 | break; | |
1183 | } | |
1184 | ||
1185 | if (end < req_end) { | |
1186 | ret = op_unmap_cb(ops, priv, va, merge); | |
1187 | if (ret) | |
1188 | return ret; | |
cdf4100e | 1189 | continue; |
e6303f32 DK |
1190 | } |
1191 | ||
1192 | if (end > req_end) { | |
1193 | struct drm_gpuva_op_map n = { | |
1194 | .va.addr = req_end, | |
1195 | .va.range = end - req_end, | |
1196 | .gem.obj = obj, | |
1197 | .gem.offset = offset + req_end - addr, | |
1198 | }; | |
1199 | struct drm_gpuva_op_unmap u = { | |
1200 | .va = va, | |
1201 | .keep = merge, | |
1202 | }; | |
1203 | ||
1204 | ret = op_remap_cb(ops, priv, NULL, &n, &u); | |
1205 | if (ret) | |
1206 | return ret; | |
1207 | break; | |
1208 | } | |
1209 | } | |
e6303f32 DK |
1210 | } |
1211 | ||
1212 | return op_map_cb(ops, priv, | |
1213 | req_addr, req_range, | |
1214 | req_obj, req_offset); | |
1215 | } | |
1216 | ||
1217 | static int | |
f72c2db4 DK |
1218 | __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, |
1219 | const struct drm_gpuvm_ops *ops, void *priv, | |
e6303f32 DK |
1220 | u64 req_addr, u64 req_range) |
1221 | { | |
1222 | struct drm_gpuva *va, *next; | |
1223 | u64 req_end = req_addr + req_range; | |
1224 | int ret; | |
1225 | ||
f72c2db4 | 1226 | if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range))) |
e6303f32 DK |
1227 | return -EINVAL; |
1228 | ||
f72c2db4 | 1229 | drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { |
e6303f32 DK |
1230 | struct drm_gpuva_op_map prev = {}, next = {}; |
1231 | bool prev_split = false, next_split = false; | |
1232 | struct drm_gem_object *obj = va->gem.obj; | |
1233 | u64 offset = va->gem.offset; | |
1234 | u64 addr = va->va.addr; | |
1235 | u64 range = va->va.range; | |
1236 | u64 end = addr + range; | |
1237 | ||
1238 | if (addr < req_addr) { | |
1239 | prev.va.addr = addr; | |
1240 | prev.va.range = req_addr - addr; | |
1241 | prev.gem.obj = obj; | |
1242 | prev.gem.offset = offset; | |
1243 | ||
1244 | prev_split = true; | |
1245 | } | |
1246 | ||
1247 | if (end > req_end) { | |
1248 | next.va.addr = req_end; | |
1249 | next.va.range = end - req_end; | |
1250 | next.gem.obj = obj; | |
1251 | next.gem.offset = offset + (req_end - addr); | |
1252 | ||
1253 | next_split = true; | |
1254 | } | |
1255 | ||
1256 | if (prev_split || next_split) { | |
1257 | struct drm_gpuva_op_unmap unmap = { .va = va }; | |
1258 | ||
1259 | ret = op_remap_cb(ops, priv, | |
1260 | prev_split ? &prev : NULL, | |
1261 | next_split ? &next : NULL, | |
1262 | &unmap); | |
1263 | if (ret) | |
1264 | return ret; | |
1265 | } else { | |
1266 | ret = op_unmap_cb(ops, priv, va, false); | |
1267 | if (ret) | |
1268 | return ret; | |
1269 | } | |
1270 | } | |
1271 | ||
1272 | return 0; | |
1273 | } | |
1274 | ||
1275 | /** | |
f72c2db4 DK |
1276 | * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps |
1277 | * @gpuvm: the &drm_gpuvm representing the GPU VA space | |
e6303f32 DK |
1278 | * @req_addr: the start address of the new mapping |
1279 | * @req_range: the range of the new mapping | |
1280 | * @req_obj: the &drm_gem_object to map | |
1281 | * @req_offset: the offset within the &drm_gem_object | |
1282 | * @priv: pointer to a driver private data structure | |
1283 | * | |
1284 | * This function iterates the given range of the GPU VA space. It utilizes the | |
f72c2db4 | 1285 | * &drm_gpuvm_ops to call back into the driver providing the split and merge |
e6303f32 DK |
1286 | * steps. |
1287 | * | |
1288 | * Drivers may use these callbacks to update the GPU VA space right away within | |
1289 | * the callback. In case the driver decides to copy and store the operations for | |
f72c2db4 DK |
1290 | * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to |
1291 | * be called before the &drm_gpuvm's view of the GPU VA space was | |
e6303f32 | 1292 | * updated with the previous set of operations. To update the |
f72c2db4 | 1293 | * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), |
e6303f32 DK |
1294 | * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be |
1295 | * used. | |
1296 | * | |
1297 | * A sequence of callbacks can contain map, unmap and remap operations, but | |
1298 | * the sequence of callbacks might also be empty if no operation is required, | |
1299 | * e.g. if the requested mapping already exists in the exact same way. | |
1300 | * | |
1301 | * There can be an arbitrary amount of unmap operations, a maximum of two remap | |
1302 | * operations and a single map operation. The latter one represents the original | |
1303 | * map operation requested by the caller. | |
1304 | * | |
1305 | * Returns: 0 on success or a negative error code | |
1306 | */ | |
1307 | int | |
f72c2db4 | 1308 | drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, |
e6303f32 DK |
1309 | u64 req_addr, u64 req_range, |
1310 | struct drm_gem_object *req_obj, u64 req_offset) | |
1311 | { | |
f72c2db4 | 1312 | const struct drm_gpuvm_ops *ops = gpuvm->ops; |
e6303f32 DK |
1313 | |
1314 | if (unlikely(!(ops && ops->sm_step_map && | |
1315 | ops->sm_step_remap && | |
1316 | ops->sm_step_unmap))) | |
1317 | return -EINVAL; | |
1318 | ||
f72c2db4 | 1319 | return __drm_gpuvm_sm_map(gpuvm, ops, priv, |
e6303f32 DK |
1320 | req_addr, req_range, |
1321 | req_obj, req_offset); | |
1322 | } | |
f72c2db4 | 1323 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); |
e6303f32 DK |
1324 | |
1325 | /** | |
f72c2db4 DK |
1326 | * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap |
1327 | * @gpuvm: the &drm_gpuvm representing the GPU VA space | |
e6303f32 DK |
1328 | * @priv: pointer to a driver private data structure |
1329 | * @req_addr: the start address of the range to unmap | |
1330 | * @req_range: the range of the mappings to unmap | |
1331 | * | |
1332 | * This function iterates the given range of the GPU VA space. It utilizes the | |
f72c2db4 | 1333 | * &drm_gpuvm_ops to call back into the driver providing the operations to |
e6303f32 DK |
1334 | * unmap and, if required, split existent mappings. |
1335 | * | |
1336 | * Drivers may use these callbacks to update the GPU VA space right away within | |
1337 | * the callback. In case the driver decides to copy and store the operations for | |
f72c2db4 DK |
1338 | * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be |
1339 | * called before the &drm_gpuvm's view of the GPU VA space was updated | |
1340 | * with the previous set of operations. To update the &drm_gpuvm's view | |
e6303f32 DK |
1341 | * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or |
1342 | * drm_gpuva_destroy_unlocked() should be used. | |
1343 | * | |
1344 | * A sequence of callbacks can contain unmap and remap operations, depending on | |
1345 | * whether there are actual overlapping mappings to split. | |
1346 | * | |
1347 | * There can be an arbitrary amount of unmap operations and a maximum of two | |
1348 | * remap operations. | |
1349 | * | |
1350 | * Returns: 0 on success or a negative error code | |
1351 | */ | |
1352 | int | |
f72c2db4 | 1353 | drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, |
e6303f32 DK |
1354 | u64 req_addr, u64 req_range) |
1355 | { | |
f72c2db4 | 1356 | const struct drm_gpuvm_ops *ops = gpuvm->ops; |
e6303f32 DK |
1357 | |
1358 | if (unlikely(!(ops && ops->sm_step_remap && | |
1359 | ops->sm_step_unmap))) | |
1360 | return -EINVAL; | |
1361 | ||
f72c2db4 | 1362 | return __drm_gpuvm_sm_unmap(gpuvm, ops, priv, |
e6303f32 DK |
1363 | req_addr, req_range); |
1364 | } | |
f72c2db4 | 1365 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap); |
e6303f32 DK |
1366 | |
1367 | static struct drm_gpuva_op * | |
f72c2db4 | 1368 | gpuva_op_alloc(struct drm_gpuvm *gpuvm) |
e6303f32 | 1369 | { |
f72c2db4 | 1370 | const struct drm_gpuvm_ops *fn = gpuvm->ops; |
e6303f32 DK |
1371 | struct drm_gpuva_op *op; |
1372 | ||
1373 | if (fn && fn->op_alloc) | |
1374 | op = fn->op_alloc(); | |
1375 | else | |
1376 | op = kzalloc(sizeof(*op), GFP_KERNEL); | |
1377 | ||
1378 | if (unlikely(!op)) | |
1379 | return NULL; | |
1380 | ||
1381 | return op; | |
1382 | } | |
1383 | ||
1384 | static void | |
f72c2db4 | 1385 | gpuva_op_free(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
1386 | struct drm_gpuva_op *op) |
1387 | { | |
f72c2db4 | 1388 | const struct drm_gpuvm_ops *fn = gpuvm->ops; |
e6303f32 DK |
1389 | |
1390 | if (fn && fn->op_free) | |
1391 | fn->op_free(op); | |
1392 | else | |
1393 | kfree(op); | |
1394 | } | |
1395 | ||
1396 | static int | |
1397 | drm_gpuva_sm_step(struct drm_gpuva_op *__op, | |
1398 | void *priv) | |
1399 | { | |
1400 | struct { | |
f72c2db4 | 1401 | struct drm_gpuvm *vm; |
e6303f32 DK |
1402 | struct drm_gpuva_ops *ops; |
1403 | } *args = priv; | |
f72c2db4 | 1404 | struct drm_gpuvm *gpuvm = args->vm; |
e6303f32 DK |
1405 | struct drm_gpuva_ops *ops = args->ops; |
1406 | struct drm_gpuva_op *op; | |
1407 | ||
f72c2db4 | 1408 | op = gpuva_op_alloc(gpuvm); |
e6303f32 DK |
1409 | if (unlikely(!op)) |
1410 | goto err; | |
1411 | ||
1412 | memcpy(op, __op, sizeof(*op)); | |
1413 | ||
1414 | if (op->op == DRM_GPUVA_OP_REMAP) { | |
1415 | struct drm_gpuva_op_remap *__r = &__op->remap; | |
1416 | struct drm_gpuva_op_remap *r = &op->remap; | |
1417 | ||
1418 | r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap), | |
1419 | GFP_KERNEL); | |
1420 | if (unlikely(!r->unmap)) | |
1421 | goto err_free_op; | |
1422 | ||
1423 | if (__r->prev) { | |
1424 | r->prev = kmemdup(__r->prev, sizeof(*r->prev), | |
1425 | GFP_KERNEL); | |
1426 | if (unlikely(!r->prev)) | |
1427 | goto err_free_unmap; | |
1428 | } | |
1429 | ||
1430 | if (__r->next) { | |
1431 | r->next = kmemdup(__r->next, sizeof(*r->next), | |
1432 | GFP_KERNEL); | |
1433 | if (unlikely(!r->next)) | |
1434 | goto err_free_prev; | |
1435 | } | |
1436 | } | |
1437 | ||
1438 | list_add_tail(&op->entry, &ops->list); | |
1439 | ||
1440 | return 0; | |
1441 | ||
1442 | err_free_unmap: | |
1443 | kfree(op->remap.unmap); | |
1444 | err_free_prev: | |
1445 | kfree(op->remap.prev); | |
1446 | err_free_op: | |
f72c2db4 | 1447 | gpuva_op_free(gpuvm, op); |
e6303f32 DK |
1448 | err: |
1449 | return -ENOMEM; | |
1450 | } | |
1451 | ||
f72c2db4 | 1452 | static const struct drm_gpuvm_ops gpuvm_list_ops = { |
e6303f32 DK |
1453 | .sm_step_map = drm_gpuva_sm_step, |
1454 | .sm_step_remap = drm_gpuva_sm_step, | |
1455 | .sm_step_unmap = drm_gpuva_sm_step, | |
1456 | }; | |
1457 | ||
1458 | /** | |
f72c2db4 DK |
1459 | * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge |
1460 | * @gpuvm: the &drm_gpuvm representing the GPU VA space | |
e6303f32 DK |
1461 | * @req_addr: the start address of the new mapping |
1462 | * @req_range: the range of the new mapping | |
1463 | * @req_obj: the &drm_gem_object to map | |
1464 | * @req_offset: the offset within the &drm_gem_object | |
1465 | * | |
1466 | * This function creates a list of operations to perform splitting and merging | |
1467 | * of existent mapping(s) with the newly requested one. | |
1468 | * | |
1469 | * The list can be iterated with &drm_gpuva_for_each_op and must be processed | |
1470 | * in the given order. It can contain map, unmap and remap operations, but it | |
1471 | * also can be empty if no operation is required, e.g. if the requested mapping | |
1472 | * already exists is the exact same way. | |
1473 | * | |
1474 | * There can be an arbitrary amount of unmap operations, a maximum of two remap | |
1475 | * operations and a single map operation. The latter one represents the original | |
1476 | * map operation requested by the caller. | |
1477 | * | |
1478 | * Note that before calling this function again with another mapping request it | |
f72c2db4 | 1479 | * is necessary to update the &drm_gpuvm's view of the GPU VA space. The |
e6303f32 | 1480 | * previously obtained operations must be either processed or abandoned. To |
f72c2db4 | 1481 | * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), |
e6303f32 DK |
1482 | * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be |
1483 | * used. | |
1484 | * | |
1485 | * After the caller finished processing the returned &drm_gpuva_ops, they must | |
1486 | * be freed with &drm_gpuva_ops_free. | |
1487 | * | |
1488 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure | |
1489 | */ | |
1490 | struct drm_gpuva_ops * | |
f72c2db4 | 1491 | drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
1492 | u64 req_addr, u64 req_range, |
1493 | struct drm_gem_object *req_obj, u64 req_offset) | |
1494 | { | |
1495 | struct drm_gpuva_ops *ops; | |
1496 | struct { | |
f72c2db4 | 1497 | struct drm_gpuvm *vm; |
e6303f32 DK |
1498 | struct drm_gpuva_ops *ops; |
1499 | } args; | |
1500 | int ret; | |
1501 | ||
1502 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | |
1503 | if (unlikely(!ops)) | |
1504 | return ERR_PTR(-ENOMEM); | |
1505 | ||
1506 | INIT_LIST_HEAD(&ops->list); | |
1507 | ||
f72c2db4 | 1508 | args.vm = gpuvm; |
e6303f32 DK |
1509 | args.ops = ops; |
1510 | ||
f72c2db4 | 1511 | ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, |
e6303f32 DK |
1512 | req_addr, req_range, |
1513 | req_obj, req_offset); | |
1514 | if (ret) | |
1515 | goto err_free_ops; | |
1516 | ||
1517 | return ops; | |
1518 | ||
1519 | err_free_ops: | |
f72c2db4 | 1520 | drm_gpuva_ops_free(gpuvm, ops); |
e6303f32 DK |
1521 | return ERR_PTR(ret); |
1522 | } | |
f72c2db4 | 1523 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create); |
e6303f32 DK |
1524 | |
1525 | /** | |
f72c2db4 | 1526 | * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on |
e6303f32 | 1527 | * unmap |
f72c2db4 | 1528 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
e6303f32 DK |
1529 | * @req_addr: the start address of the range to unmap |
1530 | * @req_range: the range of the mappings to unmap | |
1531 | * | |
1532 | * This function creates a list of operations to perform unmapping and, if | |
1533 | * required, splitting of the mappings overlapping the unmap range. | |
1534 | * | |
1535 | * The list can be iterated with &drm_gpuva_for_each_op and must be processed | |
1536 | * in the given order. It can contain unmap and remap operations, depending on | |
1537 | * whether there are actual overlapping mappings to split. | |
1538 | * | |
1539 | * There can be an arbitrary amount of unmap operations and a maximum of two | |
1540 | * remap operations. | |
1541 | * | |
1542 | * Note that before calling this function again with another range to unmap it | |
f72c2db4 | 1543 | * is necessary to update the &drm_gpuvm's view of the GPU VA space. The |
e6303f32 | 1544 | * previously obtained operations must be processed or abandoned. To update the |
f72c2db4 | 1545 | * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), |
e6303f32 DK |
1546 | * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be |
1547 | * used. | |
1548 | * | |
1549 | * After the caller finished processing the returned &drm_gpuva_ops, they must | |
1550 | * be freed with &drm_gpuva_ops_free. | |
1551 | * | |
1552 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure | |
1553 | */ | |
1554 | struct drm_gpuva_ops * | |
f72c2db4 | 1555 | drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
1556 | u64 req_addr, u64 req_range) |
1557 | { | |
1558 | struct drm_gpuva_ops *ops; | |
1559 | struct { | |
f72c2db4 | 1560 | struct drm_gpuvm *vm; |
e6303f32 DK |
1561 | struct drm_gpuva_ops *ops; |
1562 | } args; | |
1563 | int ret; | |
1564 | ||
1565 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | |
1566 | if (unlikely(!ops)) | |
1567 | return ERR_PTR(-ENOMEM); | |
1568 | ||
1569 | INIT_LIST_HEAD(&ops->list); | |
1570 | ||
f72c2db4 | 1571 | args.vm = gpuvm; |
e6303f32 DK |
1572 | args.ops = ops; |
1573 | ||
f72c2db4 | 1574 | ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args, |
e6303f32 DK |
1575 | req_addr, req_range); |
1576 | if (ret) | |
1577 | goto err_free_ops; | |
1578 | ||
1579 | return ops; | |
1580 | ||
1581 | err_free_ops: | |
f72c2db4 | 1582 | drm_gpuva_ops_free(gpuvm, ops); |
e6303f32 DK |
1583 | return ERR_PTR(ret); |
1584 | } | |
f72c2db4 | 1585 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create); |
e6303f32 DK |
1586 | |
1587 | /** | |
f72c2db4 DK |
1588 | * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch |
1589 | * @gpuvm: the &drm_gpuvm representing the GPU VA space | |
e6303f32 DK |
1590 | * @addr: the start address of the range to prefetch |
1591 | * @range: the range of the mappings to prefetch | |
1592 | * | |
1593 | * This function creates a list of operations to perform prefetching. | |
1594 | * | |
1595 | * The list can be iterated with &drm_gpuva_for_each_op and must be processed | |
1596 | * in the given order. It can contain prefetch operations. | |
1597 | * | |
1598 | * There can be an arbitrary amount of prefetch operations. | |
1599 | * | |
1600 | * After the caller finished processing the returned &drm_gpuva_ops, they must | |
1601 | * be freed with &drm_gpuva_ops_free. | |
1602 | * | |
1603 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure | |
1604 | */ | |
1605 | struct drm_gpuva_ops * | |
f72c2db4 | 1606 | drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
1607 | u64 addr, u64 range) |
1608 | { | |
1609 | struct drm_gpuva_ops *ops; | |
1610 | struct drm_gpuva_op *op; | |
1611 | struct drm_gpuva *va; | |
1612 | u64 end = addr + range; | |
1613 | int ret; | |
1614 | ||
1615 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | |
1616 | if (!ops) | |
1617 | return ERR_PTR(-ENOMEM); | |
1618 | ||
1619 | INIT_LIST_HEAD(&ops->list); | |
1620 | ||
f72c2db4 DK |
1621 | drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { |
1622 | op = gpuva_op_alloc(gpuvm); | |
e6303f32 DK |
1623 | if (!op) { |
1624 | ret = -ENOMEM; | |
1625 | goto err_free_ops; | |
1626 | } | |
1627 | ||
1628 | op->op = DRM_GPUVA_OP_PREFETCH; | |
1629 | op->prefetch.va = va; | |
1630 | list_add_tail(&op->entry, &ops->list); | |
1631 | } | |
1632 | ||
1633 | return ops; | |
1634 | ||
1635 | err_free_ops: | |
f72c2db4 | 1636 | drm_gpuva_ops_free(gpuvm, ops); |
e6303f32 DK |
1637 | return ERR_PTR(ret); |
1638 | } | |
f72c2db4 | 1639 | EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); |
e6303f32 DK |
1640 | |
1641 | /** | |
f72c2db4 DK |
1642 | * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM |
1643 | * @gpuvm: the &drm_gpuvm representing the GPU VA space | |
e6303f32 DK |
1644 | * @obj: the &drm_gem_object to unmap |
1645 | * | |
1646 | * This function creates a list of operations to perform unmapping for every | |
1647 | * GPUVA attached to a GEM. | |
1648 | * | |
1649 | * The list can be iterated with &drm_gpuva_for_each_op and consists out of an | |
1650 | * arbitrary amount of unmap operations. | |
1651 | * | |
1652 | * After the caller finished processing the returned &drm_gpuva_ops, they must | |
1653 | * be freed with &drm_gpuva_ops_free. | |
1654 | * | |
1655 | * It is the callers responsibility to protect the GEMs GPUVA list against | |
1656 | * concurrent access using the GEMs dma_resv lock. | |
1657 | * | |
1658 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure | |
1659 | */ | |
1660 | struct drm_gpuva_ops * | |
f72c2db4 | 1661 | drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
1662 | struct drm_gem_object *obj) |
1663 | { | |
1664 | struct drm_gpuva_ops *ops; | |
1665 | struct drm_gpuva_op *op; | |
1666 | struct drm_gpuva *va; | |
1667 | int ret; | |
1668 | ||
1669 | drm_gem_gpuva_assert_lock_held(obj); | |
1670 | ||
1671 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | |
1672 | if (!ops) | |
1673 | return ERR_PTR(-ENOMEM); | |
1674 | ||
1675 | INIT_LIST_HEAD(&ops->list); | |
1676 | ||
1677 | drm_gem_for_each_gpuva(va, obj) { | |
f72c2db4 | 1678 | op = gpuva_op_alloc(gpuvm); |
e6303f32 DK |
1679 | if (!op) { |
1680 | ret = -ENOMEM; | |
1681 | goto err_free_ops; | |
1682 | } | |
1683 | ||
1684 | op->op = DRM_GPUVA_OP_UNMAP; | |
1685 | op->unmap.va = va; | |
1686 | list_add_tail(&op->entry, &ops->list); | |
1687 | } | |
1688 | ||
1689 | return ops; | |
1690 | ||
1691 | err_free_ops: | |
f72c2db4 | 1692 | drm_gpuva_ops_free(gpuvm, ops); |
e6303f32 DK |
1693 | return ERR_PTR(ret); |
1694 | } | |
f72c2db4 | 1695 | EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create); |
e6303f32 DK |
1696 | |
1697 | /** | |
1698 | * drm_gpuva_ops_free() - free the given &drm_gpuva_ops | |
f72c2db4 | 1699 | * @gpuvm: the &drm_gpuvm the ops were created for |
e6303f32 DK |
1700 | * @ops: the &drm_gpuva_ops to free |
1701 | * | |
1702 | * Frees the given &drm_gpuva_ops structure including all the ops associated | |
1703 | * with it. | |
1704 | */ | |
1705 | void | |
f72c2db4 | 1706 | drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, |
e6303f32 DK |
1707 | struct drm_gpuva_ops *ops) |
1708 | { | |
1709 | struct drm_gpuva_op *op, *next; | |
1710 | ||
1711 | drm_gpuva_for_each_op_safe(op, next, ops) { | |
1712 | list_del(&op->entry); | |
1713 | ||
1714 | if (op->op == DRM_GPUVA_OP_REMAP) { | |
1715 | kfree(op->remap.prev); | |
1716 | kfree(op->remap.next); | |
1717 | kfree(op->remap.unmap); | |
1718 | } | |
1719 | ||
f72c2db4 | 1720 | gpuva_op_free(gpuvm, op); |
e6303f32 DK |
1721 | } |
1722 | ||
1723 | kfree(ops); | |
1724 | } | |
1725 | EXPORT_SYMBOL_GPL(drm_gpuva_ops_free); | |
fe7acaa7 DK |
1726 | |
1727 | MODULE_DESCRIPTION("DRM GPUVM"); | |
1728 | MODULE_LICENSE("GPL"); |