Commit | Line | Data |
---|---|---|
dff96888 | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
fb1d9738 JB |
2 | /************************************************************************** |
3 | * | |
09881d29 | 4 | * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA |
fb1d9738 JB |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
09881d29 | 28 | #include "vmwgfx_bo.h" |
fb1d9738 | 29 | #include "vmwgfx_drv.h" |
760285e7 | 30 | #include <drm/ttm/ttm_placement.h> |
fb1d9738 | 31 | |
9036f8c7 | 32 | static const struct ttm_place vram_placement_flags = { |
f1217ed0 CK |
33 | .fpfn = 0, |
34 | .lpfn = 0, | |
48e07c23 | 35 | .mem_type = TTM_PL_VRAM, |
ce65b874 | 36 | .flags = 0 |
f1217ed0 | 37 | }; |
fb1d9738 | 38 | |
9036f8c7 | 39 | static const struct ttm_place sys_placement_flags = { |
f1217ed0 CK |
40 | .fpfn = 0, |
41 | .lpfn = 0, | |
48e07c23 | 42 | .mem_type = TTM_PL_SYSTEM, |
ce65b874 | 43 | .flags = 0 |
f1217ed0 | 44 | }; |
3530bdc3 | 45 | |
f1217ed0 | 46 | struct ttm_placement vmw_vram_placement = { |
fb1d9738 JB |
47 | .num_placement = 1, |
48 | .placement = &vram_placement_flags, | |
fb1d9738 JB |
49 | }; |
50 | ||
fb1d9738 | 51 | struct ttm_placement vmw_sys_placement = { |
fb1d9738 JB |
52 | .num_placement = 1, |
53 | .placement = &sys_placement_flags, | |
fb1d9738 JB |
54 | }; |
55 | ||
308d17ef TH |
56 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
57 | ||
d92d9851 | 58 | /** |
2cd80dbd ZR |
59 | * __vmw_piter_non_sg_next: Helper functions to advance |
60 | * a struct vmw_piter iterator. | |
d92d9851 TH |
61 | * |
62 | * @viter: Pointer to the iterator. | |
63 | * | |
64 | * These functions return false if past the end of the list, | |
65 | * true otherwise. Functions are selected depending on the current | |
66 | * DMA mapping mode. | |
67 | */ | |
68 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) | |
69 | { | |
70 | return ++(viter->i) < viter->num_pages; | |
71 | } | |
72 | ||
73 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) | |
74 | { | |
8dc39cfc TH |
75 | bool ret = __vmw_piter_non_sg_next(viter); |
76 | ||
77 | return __sg_page_iter_dma_next(&viter->iter) && ret; | |
d92d9851 TH |
78 | } |
79 | ||
80 | ||
d92d9851 TH |
81 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) |
82 | { | |
83 | return viter->addrs[viter->i]; | |
84 | } | |
85 | ||
86 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) | |
87 | { | |
8dc39cfc | 88 | return sg_page_iter_dma_address(&viter->iter); |
d92d9851 TH |
89 | } |
90 | ||
91 | ||
92 | /** | |
93 | * vmw_piter_start - Initialize a struct vmw_piter. | |
94 | * | |
95 | * @viter: Pointer to the iterator to initialize | |
96 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from | |
a38feeaa | 97 | * @p_offset: Pointer offset used to update current array position |
d92d9851 TH |
98 | * |
99 | * Note that we're following the convention of __sg_page_iter_start, so that | |
100 | * the iterator doesn't point to a valid page after initialization; it has | |
101 | * to be advanced one step first. | |
102 | */ | |
103 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |
104 | unsigned long p_offset) | |
105 | { | |
106 | viter->i = p_offset - 1; | |
107 | viter->num_pages = vsgt->num_pages; | |
8dc39cfc | 108 | viter->pages = vsgt->pages; |
d92d9851 | 109 | switch (vsgt->mode) { |
d92d9851 TH |
110 | case vmw_dma_alloc_coherent: |
111 | viter->next = &__vmw_piter_non_sg_next; | |
112 | viter->dma_address = &__vmw_piter_dma_addr; | |
d92d9851 TH |
113 | viter->addrs = vsgt->addrs; |
114 | break; | |
115 | case vmw_dma_map_populate: | |
116 | case vmw_dma_map_bind: | |
117 | viter->next = &__vmw_piter_sg_next; | |
118 | viter->dma_address = &__vmw_piter_sg_addr; | |
8dc39cfc | 119 | __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, |
d92d9851 TH |
120 | vsgt->sgt->orig_nents, p_offset); |
121 | break; | |
122 | default: | |
123 | BUG(); | |
124 | } | |
125 | } | |
126 | ||
127 | /** | |
128 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for | |
129 | * TTM pages | |
130 | * | |
131 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
132 | * | |
133 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. | |
134 | */ | |
135 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) | |
136 | { | |
9703bb32 | 137 | struct device *dev = vmw_tt->dev_priv->drm.dev; |
d92d9851 | 138 | |
c915c2cb | 139 | dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); |
d92d9851 TH |
140 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
141 | } | |
142 | ||
143 | /** | |
144 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses | |
145 | * | |
146 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
147 | * | |
148 | * This function is used to get device addresses from the kernel DMA layer. | |
149 | * However, it's violating the DMA API in that when this operation has been | |
150 | * performed, it's illegal for the CPU to write to the pages without first | |
151 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is | |
152 | * therefore only legal to call this function if we know that the function | |
153 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most | |
154 | * a CPU write buffer flush. | |
155 | */ | |
156 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) | |
157 | { | |
9703bb32 | 158 | struct device *dev = vmw_tt->dev_priv->drm.dev; |
d92d9851 | 159 | |
c915c2cb | 160 | return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); |
d92d9851 TH |
161 | } |
162 | ||
163 | /** | |
164 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device | |
165 | * | |
166 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
167 | * | |
168 | * Select the correct function for and make sure the TTM pages are | |
169 | * visible to the device. Allocate storage for the device mappings. | |
170 | * If a mapping has already been performed, indicated by the storage | |
171 | * pointer being non NULL, the function returns success. | |
172 | */ | |
173 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) | |
174 | { | |
175 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
d92d9851 | 176 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; |
d92d9851 | 177 | int ret = 0; |
d92d9851 TH |
178 | |
179 | if (vmw_tt->mapped) | |
180 | return 0; | |
181 | ||
182 | vsgt->mode = dev_priv->map_mode; | |
e34b8fee CK |
183 | vsgt->pages = vmw_tt->dma_ttm.pages; |
184 | vsgt->num_pages = vmw_tt->dma_ttm.num_pages; | |
d92d9851 | 185 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; |
8afa13a0 | 186 | vsgt->sgt = NULL; |
d92d9851 TH |
187 | |
188 | switch (dev_priv->map_mode) { | |
189 | case vmw_dma_map_bind: | |
190 | case vmw_dma_map_populate: | |
b32233ac ZR |
191 | if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) { |
192 | vsgt->sgt = vmw_tt->dma_ttm.sg; | |
193 | } else { | |
194 | vsgt->sgt = &vmw_tt->sgt; | |
195 | ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt, | |
196 | vsgt->pages, vsgt->num_pages, 0, | |
197 | (unsigned long)vsgt->num_pages << PAGE_SHIFT, | |
198 | dma_get_max_seg_size(dev_priv->drm.dev), | |
199 | GFP_KERNEL); | |
200 | if (ret) | |
201 | goto out_sg_alloc_fail; | |
202 | } | |
d92d9851 | 203 | |
d92d9851 TH |
204 | ret = vmw_ttm_map_for_dma(vmw_tt); |
205 | if (unlikely(ret != 0)) | |
206 | goto out_map_fail; | |
207 | ||
208 | break; | |
209 | default: | |
210 | break; | |
211 | } | |
212 | ||
d92d9851 TH |
213 | vmw_tt->mapped = true; |
214 | return 0; | |
215 | ||
216 | out_map_fail: | |
b32233ac ZR |
217 | drm_warn(&dev_priv->drm, "VSG table map failed!"); |
218 | sg_free_table(vsgt->sgt); | |
219 | vsgt->sgt = NULL; | |
d92d9851 | 220 | out_sg_alloc_fail: |
d92d9851 TH |
221 | return ret; |
222 | } | |
223 | ||
224 | /** | |
225 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings | |
226 | * | |
227 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
228 | * | |
229 | * Tear down any previously set up device DMA mappings and free | |
230 | * any storage space allocated for them. If there are no mappings set up, | |
231 | * this function is a NOP. | |
232 | */ | |
233 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |
234 | { | |
235 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
236 | ||
237 | if (!vmw_tt->vsgt.sgt) | |
238 | return; | |
239 | ||
240 | switch (dev_priv->map_mode) { | |
241 | case vmw_dma_map_bind: | |
242 | case vmw_dma_map_populate: | |
243 | vmw_ttm_unmap_from_dma(vmw_tt); | |
244 | sg_free_table(vmw_tt->vsgt.sgt); | |
245 | vmw_tt->vsgt.sgt = NULL; | |
d92d9851 TH |
246 | break; |
247 | default: | |
248 | break; | |
249 | } | |
250 | vmw_tt->mapped = false; | |
251 | } | |
252 | ||
0fd53cfb TH |
253 | /** |
254 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | |
255 | * TTM buffer object | |
256 | * | |
257 | * @bo: Pointer to a struct ttm_buffer_object | |
258 | * | |
259 | * Returns a pointer to a struct vmw_sg_table object. The object should | |
260 | * not be freed after use. | |
261 | * Note that for the device addresses to be valid, the buffer object must | |
262 | * either be reserved or pinned. | |
263 | */ | |
264 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | |
265 | { | |
266 | struct vmw_ttm_tt *vmw_tt = | |
e34b8fee | 267 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); |
0fd53cfb TH |
268 | |
269 | return &vmw_tt->vsgt; | |
270 | } | |
271 | ||
272 | ||
8af8a109 | 273 | static int vmw_ttm_bind(struct ttm_device *bdev, |
0a667b50 | 274 | struct ttm_tt *ttm, struct ttm_resource *bo_mem) |
fb1d9738 | 275 | { |
d92d9851 | 276 | struct vmw_ttm_tt *vmw_be = |
e34b8fee | 277 | container_of(ttm, struct vmw_ttm_tt, dma_ttm); |
0b988ca1 DA |
278 | int ret = 0; |
279 | ||
280 | if (!bo_mem) | |
281 | return -EINVAL; | |
282 | ||
283 | if (vmw_be->bound) | |
284 | return 0; | |
d92d9851 TH |
285 | |
286 | ret = vmw_ttm_map_dma(vmw_be); | |
287 | if (unlikely(ret != 0)) | |
288 | return ret; | |
135cba0d TH |
289 | |
290 | vmw_be->gmr_id = bo_mem->start; | |
6da768aa | 291 | vmw_be->mem_type = bo_mem->mem_type; |
135cba0d | 292 | |
6da768aa TH |
293 | switch (bo_mem->mem_type) { |
294 | case VMW_PL_GMR: | |
0b988ca1 | 295 | ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
6da768aa | 296 | ttm->num_pages, vmw_be->gmr_id); |
0b988ca1 | 297 | break; |
6da768aa TH |
298 | case VMW_PL_MOB: |
299 | if (unlikely(vmw_be->mob == NULL)) { | |
300 | vmw_be->mob = | |
301 | vmw_mob_create(ttm->num_pages); | |
302 | if (unlikely(vmw_be->mob == NULL)) | |
303 | return -ENOMEM; | |
304 | } | |
305 | ||
0b988ca1 | 306 | ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
0fd53cfb | 307 | &vmw_be->vsgt, ttm->num_pages, |
6da768aa | 308 | vmw_be->gmr_id); |
0b988ca1 | 309 | break; |
f6be2326 ZR |
310 | case VMW_PL_SYSTEM: |
311 | /* Nothing to be done for a system bind */ | |
312 | break; | |
6da768aa TH |
313 | default: |
314 | BUG(); | |
315 | } | |
0b988ca1 DA |
316 | vmw_be->bound = true; |
317 | return ret; | |
fb1d9738 JB |
318 | } |
319 | ||
8af8a109 | 320 | static void vmw_ttm_unbind(struct ttm_device *bdev, |
0a667b50 | 321 | struct ttm_tt *ttm) |
fb1d9738 | 322 | { |
d92d9851 | 323 | struct vmw_ttm_tt *vmw_be = |
e34b8fee | 324 | container_of(ttm, struct vmw_ttm_tt, dma_ttm); |
135cba0d | 325 | |
0b988ca1 DA |
326 | if (!vmw_be->bound) |
327 | return; | |
328 | ||
6da768aa TH |
329 | switch (vmw_be->mem_type) { |
330 | case VMW_PL_GMR: | |
331 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | |
332 | break; | |
333 | case VMW_PL_MOB: | |
334 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); | |
335 | break; | |
f6be2326 ZR |
336 | case VMW_PL_SYSTEM: |
337 | break; | |
6da768aa TH |
338 | default: |
339 | BUG(); | |
340 | } | |
d92d9851 TH |
341 | |
342 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | |
343 | vmw_ttm_unmap_dma(vmw_be); | |
0b988ca1 | 344 | vmw_be->bound = false; |
fb1d9738 JB |
345 | } |
346 | ||
6da768aa | 347 | |
8af8a109 | 348 | static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) |
fb1d9738 | 349 | { |
d92d9851 | 350 | struct vmw_ttm_tt *vmw_be = |
e34b8fee | 351 | container_of(ttm, struct vmw_ttm_tt, dma_ttm); |
d92d9851 TH |
352 | |
353 | vmw_ttm_unmap_dma(vmw_be); | |
e54163e9 | 354 | ttm_tt_fini(ttm); |
6da768aa TH |
355 | if (vmw_be->mob) |
356 | vmw_mob_destroy(vmw_be->mob); | |
357 | ||
fb1d9738 JB |
358 | kfree(vmw_be); |
359 | } | |
360 | ||
0fd53cfb | 361 | |
8af8a109 | 362 | static int vmw_ttm_populate(struct ttm_device *bdev, |
0a667b50 | 363 | struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
d92d9851 | 364 | { |
b32233ac | 365 | bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; |
f07069da | 366 | |
7eec9151 | 367 | if (ttm_tt_is_populated(ttm)) |
d92d9851 TH |
368 | return 0; |
369 | ||
b32233ac ZR |
370 | if (external && ttm->sg) |
371 | return drm_prime_sg_to_dma_addr_array(ttm->sg, | |
372 | ttm->dma_address, | |
373 | ttm->num_pages); | |
f07069da | 374 | |
b32233ac | 375 | return ttm_pool_alloc(&bdev->pool, ttm, ctx); |
d92d9851 TH |
376 | } |
377 | ||
8af8a109 | 378 | static void vmw_ttm_unpopulate(struct ttm_device *bdev, |
0a667b50 | 379 | struct ttm_tt *ttm) |
d92d9851 TH |
380 | { |
381 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, | |
e34b8fee | 382 | dma_ttm); |
b32233ac ZR |
383 | bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; |
384 | ||
385 | if (external) | |
386 | return; | |
6da768aa | 387 | |
e54163e9 CK |
388 | vmw_ttm_unbind(bdev, ttm); |
389 | ||
6da768aa TH |
390 | if (vmw_tt->mob) { |
391 | vmw_mob_destroy(vmw_tt->mob); | |
392 | vmw_tt->mob = NULL; | |
393 | } | |
394 | ||
d92d9851 | 395 | vmw_ttm_unmap_dma(vmw_tt); |
f07069da | 396 | |
8567d515 | 397 | ttm_pool_free(&bdev->pool, ttm); |
d92d9851 TH |
398 | } |
399 | ||
dde5da23 CK |
400 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, |
401 | uint32_t page_flags) | |
fb1d9738 | 402 | { |
649bf3ca | 403 | struct vmw_ttm_tt *vmw_be; |
d92d9851 | 404 | int ret; |
b32233ac | 405 | bool external = bo->type == ttm_bo_type_sg; |
fb1d9738 | 406 | |
d92d9851 | 407 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
fb1d9738 JB |
408 | if (!vmw_be) |
409 | return NULL; | |
410 | ||
668b2066 | 411 | vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev); |
6da768aa | 412 | vmw_be->mob = NULL; |
fb1d9738 | 413 | |
b32233ac ZR |
414 | if (external) |
415 | page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE; | |
416 | ||
417 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external) | |
4c515bb1 CK |
418 | ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags, |
419 | ttm_cached); | |
d92d9851 | 420 | else |
e34b8fee | 421 | ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, |
e36764ec | 422 | ttm_cached, 0); |
d92d9851 TH |
423 | if (unlikely(ret != 0)) |
424 | goto out_no_init; | |
425 | ||
e34b8fee | 426 | return &vmw_be->dma_ttm; |
d92d9851 TH |
427 | out_no_init: |
428 | kfree(vmw_be); | |
429 | return NULL; | |
fb1d9738 JB |
430 | } |
431 | ||
8227622f | 432 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
fb1d9738 JB |
433 | struct ttm_placement *placement) |
434 | { | |
435 | *placement = vmw_sys_placement; | |
436 | } | |
437 | ||
8af8a109 | 438 | static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) |
96bf8b87 | 439 | { |
668b2066 | 440 | struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); |
96bf8b87 | 441 | |
96bf8b87 JG |
442 | switch (mem->mem_type) { |
443 | case TTM_PL_SYSTEM: | |
f6be2326 | 444 | case VMW_PL_SYSTEM: |
135cba0d | 445 | case VMW_PL_GMR: |
6da768aa | 446 | case VMW_PL_MOB: |
96bf8b87 JG |
447 | return 0; |
448 | case TTM_PL_VRAM: | |
54d04ea8 CK |
449 | mem->bus.offset = (mem->start << PAGE_SHIFT) + |
450 | dev_priv->vram_start; | |
96bf8b87 | 451 | mem->bus.is_iomem = true; |
1cf65c45 | 452 | mem->bus.caching = ttm_cached; |
96bf8b87 JG |
453 | break; |
454 | default: | |
455 | return -EINVAL; | |
456 | } | |
457 | return 0; | |
458 | } | |
459 | ||
6da768aa TH |
460 | /** |
461 | * vmw_move_notify - TTM move_notify_callback | |
462 | * | |
fd11a3c0 | 463 | * @bo: The TTM buffer object about to move. |
6cf9dc23 CK |
464 | * @old_mem: The old memory where we move from |
465 | * @new_mem: The struct ttm_resource indicating to what memory | |
fd11a3c0 | 466 | * region the move is taking place. |
6da768aa TH |
467 | * |
468 | * Calls move_notify for all subsystems needing it. | |
469 | * (currently only resources). | |
470 | */ | |
471 | static void vmw_move_notify(struct ttm_buffer_object *bo, | |
6cf9dc23 CK |
472 | struct ttm_resource *old_mem, |
473 | struct ttm_resource *new_mem) | |
6da768aa | 474 | { |
6cf9dc23 CK |
475 | vmw_bo_move_notify(bo, new_mem); |
476 | vmw_query_move_notify(bo, old_mem, new_mem); | |
6da768aa TH |
477 | } |
478 | ||
479 | ||
480 | /** | |
481 | * vmw_swap_notify - TTM move_notify_callback | |
482 | * | |
fd11a3c0 | 483 | * @bo: The TTM buffer object about to be swapped out. |
6da768aa TH |
484 | */ |
485 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | |
486 | { | |
e9431ea5 | 487 | vmw_bo_swap_notify(bo); |
f08c86c3 | 488 | (void) ttm_bo_wait(bo, false, false); |
6da768aa TH |
489 | } |
490 | ||
f6be2326 ZR |
491 | static bool vmw_memtype_is_system(uint32_t mem_type) |
492 | { | |
493 | return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM; | |
494 | } | |
495 | ||
bcff5d3e DA |
496 | static int vmw_move(struct ttm_buffer_object *bo, |
497 | bool evict, | |
498 | struct ttm_operation_ctx *ctx, | |
ebdf5651 DA |
499 | struct ttm_resource *new_mem, |
500 | struct ttm_place *hop) | |
bcff5d3e | 501 | { |
a44df74c ZR |
502 | struct ttm_resource_manager *new_man; |
503 | struct ttm_resource_manager *old_man = NULL; | |
504 | int ret = 0; | |
505 | ||
506 | new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); | |
507 | if (bo->resource) | |
508 | old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type); | |
bcff5d3e | 509 | |
f6be2326 | 510 | if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { |
bfe5e585 DA |
511 | ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); |
512 | if (ret) | |
513 | return ret; | |
514 | } | |
515 | ||
a44df74c ZR |
516 | if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM && |
517 | bo->ttm == NULL)) { | |
518 | ttm_bo_move_null(bo, new_mem); | |
519 | return 0; | |
520 | } | |
521 | ||
d3116756 | 522 | vmw_move_notify(bo, bo->resource, new_mem); |
6d820003 | 523 | |
a44df74c | 524 | if (old_man && old_man->use_tt && new_man->use_tt) { |
f6be2326 | 525 | if (vmw_memtype_is_system(bo->resource->mem_type)) { |
5b7a2c92 | 526 | ttm_bo_move_null(bo, new_mem); |
bcff5d3e DA |
527 | return 0; |
528 | } | |
29a1d482 | 529 | ret = ttm_bo_wait_ctx(bo, ctx); |
c37d951c | 530 | if (ret) |
6d820003 | 531 | goto fail; |
29a1d482 DA |
532 | |
533 | vmw_ttm_unbind(bo->bdev, bo->ttm); | |
bfa3357e | 534 | ttm_resource_free(bo, &bo->resource); |
c37d951c DA |
535 | ttm_bo_assign_mem(bo, new_mem); |
536 | return 0; | |
bcff5d3e | 537 | } else { |
6d820003 DA |
538 | ret = ttm_bo_move_memcpy(bo, ctx, new_mem); |
539 | if (ret) | |
540 | goto fail; | |
bcff5d3e | 541 | } |
6d820003 DA |
542 | return 0; |
543 | fail: | |
d3116756 | 544 | vmw_move_notify(bo, new_mem, bo->resource); |
6d820003 | 545 | return ret; |
bcff5d3e | 546 | } |
6da768aa | 547 | |
8af8a109 | 548 | struct ttm_device_funcs vmw_bo_driver = { |
649bf3ca | 549 | .ttm_tt_create = &vmw_ttm_tt_create, |
d92d9851 TH |
550 | .ttm_tt_populate = &vmw_ttm_populate, |
551 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, | |
debf8ab9 | 552 | .ttm_tt_destroy = &vmw_ttm_destroy, |
a2ab19fe | 553 | .eviction_valuable = ttm_bo_eviction_valuable, |
fb1d9738 | 554 | .evict_flags = vmw_evict_flags, |
bcff5d3e | 555 | .move = vmw_move, |
6da768aa | 556 | .swap_notify = vmw_swap_notify, |
96bf8b87 | 557 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
fb1d9738 | 558 | }; |
56dc01f1 DA |
559 | |
560 | int vmw_bo_create_and_populate(struct vmw_private *dev_priv, | |
668b2066 ZR |
561 | size_t bo_size, u32 domain, |
562 | struct vmw_bo **bo_p) | |
56dc01f1 DA |
563 | { |
564 | struct ttm_operation_ctx ctx = { | |
565 | .interruptible = false, | |
566 | .no_wait_gpu = false | |
567 | }; | |
668b2066 | 568 | struct vmw_bo *vbo; |
56dc01f1 | 569 | int ret; |
668b2066 ZR |
570 | struct vmw_bo_params bo_params = { |
571 | .domain = domain, | |
572 | .busy_domain = domain, | |
573 | .bo_type = ttm_bo_type_kernel, | |
574 | .size = bo_size, | |
575 | .pin = true | |
576 | }; | |
56dc01f1 | 577 | |
668b2066 | 578 | ret = vmw_bo_create(dev_priv, &bo_params, &vbo); |
56dc01f1 DA |
579 | if (unlikely(ret != 0)) |
580 | return ret; | |
581 | ||
668b2066 | 582 | ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); |
56dc01f1 | 583 | BUG_ON(ret != 0); |
668b2066 | 584 | ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); |
a2d6ddc4 DA |
585 | if (likely(ret == 0)) { |
586 | struct vmw_ttm_tt *vmw_tt = | |
668b2066 | 587 | container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); |
a2d6ddc4 DA |
588 | ret = vmw_ttm_map_dma(vmw_tt); |
589 | } | |
56dc01f1 | 590 | |
668b2066 | 591 | ttm_bo_unreserve(&vbo->tbo); |
56dc01f1 DA |
592 | |
593 | if (likely(ret == 0)) | |
668b2066 | 594 | *bo_p = vbo; |
56dc01f1 DA |
595 | return ret; |
596 | } |