drm/vmwgfx: stop setting multiple domain flags
[linux-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_ttm_buffer.c
CommitLineData
dff96888 1// SPDX-License-Identifier: GPL-2.0 OR MIT
fb1d9738
JB
2/**************************************************************************
3 *
dff96888 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
fb1d9738
JB
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h>
fb1d9738 32
9036f8c7 33static const struct ttm_place vram_placement_flags = {
f1217ed0
CK
34 .fpfn = 0,
35 .lpfn = 0,
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37};
fb1d9738 38
9036f8c7 39static const struct ttm_place vram_ne_placement_flags = {
f1217ed0
CK
40 .fpfn = 0,
41 .lpfn = 0,
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43};
fb1d9738 44
9036f8c7 45static const struct ttm_place sys_placement_flags = {
f1217ed0
CK
46 .fpfn = 0,
47 .lpfn = 0,
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49};
3530bdc3 50
9036f8c7 51static const struct ttm_place sys_ne_placement_flags = {
f1217ed0
CK
52 .fpfn = 0,
53 .lpfn = 0,
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55};
135cba0d 56
9036f8c7 57static const struct ttm_place gmr_placement_flags = {
f1217ed0
CK
58 .fpfn = 0,
59 .lpfn = 0,
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61};
d991ef03 62
9036f8c7 63static const struct ttm_place gmr_ne_placement_flags = {
f1217ed0
CK
64 .fpfn = 0,
65 .lpfn = 0,
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67};
6da768aa 68
9036f8c7 69static const struct ttm_place mob_placement_flags = {
fb1d9738
JB
70 .fpfn = 0,
71 .lpfn = 0,
f1217ed0
CK
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73};
74
9036f8c7 75static const struct ttm_place mob_ne_placement_flags = {
3eab3d9e
TH
76 .fpfn = 0,
77 .lpfn = 0,
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79};
80
f1217ed0 81struct ttm_placement vmw_vram_placement = {
fb1d9738
JB
82 .num_placement = 1,
83 .placement = &vram_placement_flags,
84 .num_busy_placement = 1,
85 .busy_placement = &vram_placement_flags
86};
87
9036f8c7 88static const struct ttm_place vram_gmr_placement_flags[] = {
f1217ed0
CK
89 {
90 .fpfn = 0,
91 .lpfn = 0,
92 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
93 }, {
94 .fpfn = 0,
95 .lpfn = 0,
96 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
97 }
135cba0d
TH
98};
99
9036f8c7 100static const struct ttm_place gmr_vram_placement_flags[] = {
f1217ed0
CK
101 {
102 .fpfn = 0,
103 .lpfn = 0,
104 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105 }, {
106 .fpfn = 0,
107 .lpfn = 0,
108 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109 }
5bb39e81
TH
110};
111
135cba0d 112struct ttm_placement vmw_vram_gmr_placement = {
135cba0d
TH
113 .num_placement = 2,
114 .placement = vram_gmr_placement_flags,
115 .num_busy_placement = 1,
116 .busy_placement = &gmr_placement_flags
117};
118
9036f8c7 119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
f1217ed0
CK
120 {
121 .fpfn = 0,
122 .lpfn = 0,
123 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124 TTM_PL_FLAG_NO_EVICT
125 }, {
126 .fpfn = 0,
127 .lpfn = 0,
128 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129 TTM_PL_FLAG_NO_EVICT
130 }
d991ef03
JB
131};
132
133struct ttm_placement vmw_vram_gmr_ne_placement = {
d991ef03
JB
134 .num_placement = 2,
135 .placement = vram_gmr_ne_placement_flags,
136 .num_busy_placement = 1,
137 .busy_placement = &gmr_ne_placement_flags
138};
139
8ba5152a 140struct ttm_placement vmw_vram_sys_placement = {
8ba5152a
TH
141 .num_placement = 1,
142 .placement = &vram_placement_flags,
143 .num_busy_placement = 1,
144 .busy_placement = &sys_placement_flags
145};
146
fb1d9738 147struct ttm_placement vmw_vram_ne_placement = {
fb1d9738
JB
148 .num_placement = 1,
149 .placement = &vram_ne_placement_flags,
150 .num_busy_placement = 1,
151 .busy_placement = &vram_ne_placement_flags
152};
153
154struct ttm_placement vmw_sys_placement = {
fb1d9738
JB
155 .num_placement = 1,
156 .placement = &sys_placement_flags,
157 .num_busy_placement = 1,
158 .busy_placement = &sys_placement_flags
159};
160
3530bdc3 161struct ttm_placement vmw_sys_ne_placement = {
3530bdc3
TH
162 .num_placement = 1,
163 .placement = &sys_ne_placement_flags,
164 .num_busy_placement = 1,
165 .busy_placement = &sys_ne_placement_flags
166};
167
9036f8c7 168static const struct ttm_place evictable_placement_flags[] = {
f1217ed0
CK
169 {
170 .fpfn = 0,
171 .lpfn = 0,
172 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173 }, {
174 .fpfn = 0,
175 .lpfn = 0,
176 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177 }, {
178 .fpfn = 0,
179 .lpfn = 0,
180 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181 }, {
182 .fpfn = 0,
183 .lpfn = 0,
184 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185 }
d991ef03
JB
186};
187
ef86cfee
TH
188static const struct ttm_place nonfixed_placement_flags[] = {
189 {
190 .fpfn = 0,
191 .lpfn = 0,
192 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193 }, {
194 .fpfn = 0,
195 .lpfn = 0,
196 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197 }, {
198 .fpfn = 0,
199 .lpfn = 0,
200 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201 }
202};
203
d991ef03 204struct ttm_placement vmw_evictable_placement = {
6da768aa 205 .num_placement = 4,
d991ef03
JB
206 .placement = evictable_placement_flags,
207 .num_busy_placement = 1,
208 .busy_placement = &sys_placement_flags
209};
210
5bb39e81 211struct ttm_placement vmw_srf_placement = {
5bb39e81
TH
212 .num_placement = 1,
213 .num_busy_placement = 2,
214 .placement = &gmr_placement_flags,
215 .busy_placement = gmr_vram_placement_flags
216};
217
6da768aa 218struct ttm_placement vmw_mob_placement = {
6da768aa
TH
219 .num_placement = 1,
220 .num_busy_placement = 1,
221 .placement = &mob_placement_flags,
222 .busy_placement = &mob_placement_flags
223};
224
3eab3d9e
TH
225struct ttm_placement vmw_mob_ne_placement = {
226 .num_placement = 1,
227 .num_busy_placement = 1,
228 .placement = &mob_ne_placement_flags,
229 .busy_placement = &mob_ne_placement_flags
230};
231
ef86cfee
TH
232struct ttm_placement vmw_nonfixed_placement = {
233 .num_placement = 3,
234 .placement = nonfixed_placement_flags,
235 .num_busy_placement = 1,
236 .busy_placement = &sys_placement_flags
237};
238
649bf3ca 239struct vmw_ttm_tt {
d92d9851 240 struct ttm_dma_tt dma_ttm;
135cba0d
TH
241 struct vmw_private *dev_priv;
242 int gmr_id;
6da768aa
TH
243 struct vmw_mob *mob;
244 int mem_type;
d92d9851
TH
245 struct sg_table sgt;
246 struct vmw_sg_table vsgt;
247 uint64_t sg_alloc_size;
248 bool mapped;
fb1d9738
JB
249};
250
308d17ef
TH
251const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252
d92d9851
TH
253/**
254 * Helper functions to advance a struct vmw_piter iterator.
255 *
256 * @viter: Pointer to the iterator.
257 *
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
260 * DMA mapping mode.
261 */
262static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263{
264 return ++(viter->i) < viter->num_pages;
265}
266
267static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268{
8dc39cfc
TH
269 bool ret = __vmw_piter_non_sg_next(viter);
270
271 return __sg_page_iter_dma_next(&viter->iter) && ret;
d92d9851
TH
272}
273
274
275/**
276 * Helper functions to return a pointer to the current page.
277 *
278 * @viter: Pointer to the iterator
279 *
280 * These functions return a pointer to the page currently
281 * pointed to by @viter. Functions are selected depending on the
282 * current mapping mode.
283 */
284static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
285{
286 return viter->pages[viter->i];
287}
288
d92d9851
TH
289/**
290 * Helper functions to return the DMA address of the current page.
291 *
292 * @viter: Pointer to the iterator
293 *
294 * These functions return the DMA address of the page currently
295 * pointed to by @viter. Functions are selected depending on the
296 * current mapping mode.
297 */
298static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
299{
300 return page_to_phys(viter->pages[viter->i]);
301}
302
303static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
304{
305 return viter->addrs[viter->i];
306}
307
308static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
309{
8dc39cfc 310 return sg_page_iter_dma_address(&viter->iter);
d92d9851
TH
311}
312
313
314/**
315 * vmw_piter_start - Initialize a struct vmw_piter.
316 *
317 * @viter: Pointer to the iterator to initialize
318 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
319 *
320 * Note that we're following the convention of __sg_page_iter_start, so that
321 * the iterator doesn't point to a valid page after initialization; it has
322 * to be advanced one step first.
323 */
324void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
325 unsigned long p_offset)
326{
327 viter->i = p_offset - 1;
328 viter->num_pages = vsgt->num_pages;
8dc39cfc
TH
329 viter->page = &__vmw_piter_non_sg_page;
330 viter->pages = vsgt->pages;
d92d9851
TH
331 switch (vsgt->mode) {
332 case vmw_dma_phys:
333 viter->next = &__vmw_piter_non_sg_next;
334 viter->dma_address = &__vmw_piter_phys_addr;
d92d9851
TH
335 break;
336 case vmw_dma_alloc_coherent:
337 viter->next = &__vmw_piter_non_sg_next;
338 viter->dma_address = &__vmw_piter_dma_addr;
d92d9851
TH
339 viter->addrs = vsgt->addrs;
340 break;
341 case vmw_dma_map_populate:
342 case vmw_dma_map_bind:
343 viter->next = &__vmw_piter_sg_next;
344 viter->dma_address = &__vmw_piter_sg_addr;
8dc39cfc 345 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
d92d9851
TH
346 vsgt->sgt->orig_nents, p_offset);
347 break;
348 default:
349 BUG();
350 }
351}
352
353/**
354 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
355 * TTM pages
356 *
357 * @vmw_tt: Pointer to a struct vmw_ttm_backend
358 *
359 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
360 */
361static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
362{
363 struct device *dev = vmw_tt->dev_priv->dev->dev;
364
365 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
366 DMA_BIDIRECTIONAL);
367 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
368}
369
370/**
371 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
372 *
373 * @vmw_tt: Pointer to a struct vmw_ttm_backend
374 *
375 * This function is used to get device addresses from the kernel DMA layer.
376 * However, it's violating the DMA API in that when this operation has been
377 * performed, it's illegal for the CPU to write to the pages without first
378 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
379 * therefore only legal to call this function if we know that the function
380 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
381 * a CPU write buffer flush.
382 */
383static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
384{
385 struct device *dev = vmw_tt->dev_priv->dev->dev;
386 int ret;
387
388 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
389 DMA_BIDIRECTIONAL);
390 if (unlikely(ret == 0))
391 return -ENOMEM;
392
393 vmw_tt->sgt.nents = ret;
394
395 return 0;
396}
397
398/**
399 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
400 *
401 * @vmw_tt: Pointer to a struct vmw_ttm_tt
402 *
403 * Select the correct function for and make sure the TTM pages are
404 * visible to the device. Allocate storage for the device mappings.
405 * If a mapping has already been performed, indicated by the storage
406 * pointer being non NULL, the function returns success.
407 */
408static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
409{
410 struct vmw_private *dev_priv = vmw_tt->dev_priv;
411 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
412 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
279c01f6
RH
413 struct ttm_operation_ctx ctx = {
414 .interruptible = true,
415 .no_wait_gpu = false
416 };
d92d9851
TH
417 struct vmw_piter iter;
418 dma_addr_t old;
419 int ret = 0;
420 static size_t sgl_size;
421 static size_t sgt_size;
422
423 if (vmw_tt->mapped)
424 return 0;
425
426 vsgt->mode = dev_priv->map_mode;
427 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
428 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
429 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
430 vsgt->sgt = &vmw_tt->sgt;
431
432 switch (dev_priv->map_mode) {
433 case vmw_dma_map_bind:
434 case vmw_dma_map_populate:
435 if (unlikely(!sgl_size)) {
436 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
437 sgt_size = ttm_round_pot(sizeof(struct sg_table));
438 }
439 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
279c01f6 440 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
d92d9851
TH
441 if (unlikely(ret != 0))
442 return ret;
443
bde15555
TH
444 ret = __sg_alloc_table_from_pages
445 (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
446 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
447 dma_get_max_seg_size(dev_priv->dev->dev),
448 GFP_KERNEL);
d92d9851
TH
449 if (unlikely(ret != 0))
450 goto out_sg_alloc_fail;
451
452 if (vsgt->num_pages > vmw_tt->sgt.nents) {
453 uint64_t over_alloc =
454 sgl_size * (vsgt->num_pages -
455 vmw_tt->sgt.nents);
456
457 ttm_mem_global_free(glob, over_alloc);
458 vmw_tt->sg_alloc_size -= over_alloc;
459 }
460
461 ret = vmw_ttm_map_for_dma(vmw_tt);
462 if (unlikely(ret != 0))
463 goto out_map_fail;
464
465 break;
466 default:
467 break;
468 }
469
470 old = ~((dma_addr_t) 0);
471 vmw_tt->vsgt.num_regions = 0;
472 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
473 dma_addr_t cur = vmw_piter_dma_addr(&iter);
474
475 if (cur != old + PAGE_SIZE)
476 vmw_tt->vsgt.num_regions++;
477 old = cur;
478 }
479
480 vmw_tt->mapped = true;
481 return 0;
482
483out_map_fail:
484 sg_free_table(vmw_tt->vsgt.sgt);
485 vmw_tt->vsgt.sgt = NULL;
486out_sg_alloc_fail:
487 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
488 return ret;
489}
490
491/**
492 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
493 *
494 * @vmw_tt: Pointer to a struct vmw_ttm_tt
495 *
496 * Tear down any previously set up device DMA mappings and free
497 * any storage space allocated for them. If there are no mappings set up,
498 * this function is a NOP.
499 */
500static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
501{
502 struct vmw_private *dev_priv = vmw_tt->dev_priv;
503
504 if (!vmw_tt->vsgt.sgt)
505 return;
506
507 switch (dev_priv->map_mode) {
508 case vmw_dma_map_bind:
509 case vmw_dma_map_populate:
510 vmw_ttm_unmap_from_dma(vmw_tt);
511 sg_free_table(vmw_tt->vsgt.sgt);
512 vmw_tt->vsgt.sgt = NULL;
513 ttm_mem_global_free(vmw_mem_glob(dev_priv),
514 vmw_tt->sg_alloc_size);
515 break;
516 default:
517 break;
518 }
519 vmw_tt->mapped = false;
520}
521
0fd53cfb
TH
522/**
523 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
524 * TTM buffer object
525 *
526 * @bo: Pointer to a struct ttm_buffer_object
527 *
528 * Returns a pointer to a struct vmw_sg_table object. The object should
529 * not be freed after use.
530 * Note that for the device addresses to be valid, the buffer object must
531 * either be reserved or pinned.
532 */
533const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
534{
535 struct vmw_ttm_tt *vmw_tt =
536 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
537
538 return &vmw_tt->vsgt;
539}
540
541
0a667b50
DA
542static int vmw_ttm_bind(struct ttm_bo_device *bdev,
543 struct ttm_tt *ttm, struct ttm_resource *bo_mem)
fb1d9738 544{
d92d9851
TH
545 struct vmw_ttm_tt *vmw_be =
546 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
547 int ret;
548
549 ret = vmw_ttm_map_dma(vmw_be);
550 if (unlikely(ret != 0))
551 return ret;
135cba0d
TH
552
553 vmw_be->gmr_id = bo_mem->start;
6da768aa 554 vmw_be->mem_type = bo_mem->mem_type;
135cba0d 555
6da768aa
TH
556 switch (bo_mem->mem_type) {
557 case VMW_PL_GMR:
558 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
559 ttm->num_pages, vmw_be->gmr_id);
560 case VMW_PL_MOB:
561 if (unlikely(vmw_be->mob == NULL)) {
562 vmw_be->mob =
563 vmw_mob_create(ttm->num_pages);
564 if (unlikely(vmw_be->mob == NULL))
565 return -ENOMEM;
566 }
567
568 return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
0fd53cfb 569 &vmw_be->vsgt, ttm->num_pages,
6da768aa
TH
570 vmw_be->gmr_id);
571 default:
572 BUG();
573 }
574 return 0;
fb1d9738
JB
575}
576
0a667b50
DA
577static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
578 struct ttm_tt *ttm)
fb1d9738 579{
d92d9851
TH
580 struct vmw_ttm_tt *vmw_be =
581 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
135cba0d 582
6da768aa
TH
583 switch (vmw_be->mem_type) {
584 case VMW_PL_GMR:
585 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
586 break;
587 case VMW_PL_MOB:
588 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
589 break;
590 default:
591 BUG();
592 }
d92d9851
TH
593
594 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
595 vmw_ttm_unmap_dma(vmw_be);
fb1d9738
JB
596}
597
6da768aa 598
0a667b50 599static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
fb1d9738 600{
d92d9851
TH
601 struct vmw_ttm_tt *vmw_be =
602 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
603
604 vmw_ttm_unmap_dma(vmw_be);
605 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
606 ttm_dma_tt_fini(&vmw_be->dma_ttm);
607 else
608 ttm_tt_fini(ttm);
6da768aa
TH
609
610 if (vmw_be->mob)
611 vmw_mob_destroy(vmw_be->mob);
612
fb1d9738
JB
613 kfree(vmw_be);
614}
615
0fd53cfb 616
0a667b50
DA
617static int vmw_ttm_populate(struct ttm_bo_device *bdev,
618 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
d92d9851
TH
619{
620 struct vmw_ttm_tt *vmw_tt =
621 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
622 struct vmw_private *dev_priv = vmw_tt->dev_priv;
623 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
624 int ret;
625
626 if (ttm->state != tt_unpopulated)
627 return 0;
628
629 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
630 size_t size =
631 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
d0cef9fa 632 ret = ttm_mem_global_alloc(glob, size, ctx);
d92d9851
TH
633 if (unlikely(ret != 0))
634 return ret;
635
d0cef9fa
RH
636 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
637 ctx);
d92d9851
TH
638 if (unlikely(ret != 0))
639 ttm_mem_global_free(glob, size);
640 } else
d0cef9fa 641 ret = ttm_pool_populate(ttm, ctx);
d92d9851
TH
642
643 return ret;
644}
645
0a667b50
DA
646static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
647 struct ttm_tt *ttm)
d92d9851
TH
648{
649 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
650 dma_ttm.ttm);
651 struct vmw_private *dev_priv = vmw_tt->dev_priv;
652 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
653
6da768aa
TH
654
655 if (vmw_tt->mob) {
656 vmw_mob_destroy(vmw_tt->mob);
657 vmw_tt->mob = NULL;
658 }
659
d92d9851
TH
660 vmw_ttm_unmap_dma(vmw_tt);
661 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
662 size_t size =
663 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
664
665 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
666 ttm_mem_global_free(glob, size);
667 } else
668 ttm_pool_unpopulate(ttm);
669}
670
dde5da23
CK
671static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
672 uint32_t page_flags)
fb1d9738 673{
649bf3ca 674 struct vmw_ttm_tt *vmw_be;
d92d9851 675 int ret;
fb1d9738 676
d92d9851 677 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
fb1d9738
JB
678 if (!vmw_be)
679 return NULL;
680
dde5da23 681 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
6da768aa 682 vmw_be->mob = NULL;
fb1d9738 683
d92d9851 684 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
dde5da23 685 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
d92d9851 686 else
dde5da23 687 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
d92d9851
TH
688 if (unlikely(ret != 0))
689 goto out_no_init;
690
691 return &vmw_be->dma_ttm.ttm;
692out_no_init:
693 kfree(vmw_be);
694 return NULL;
fb1d9738
JB
695}
696
8227622f 697static void vmw_evict_flags(struct ttm_buffer_object *bo,
fb1d9738
JB
698 struct ttm_placement *placement)
699{
700 *placement = vmw_sys_placement;
701}
702
fb1d9738
JB
703static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
704{
d08a9b9c
TH
705 struct ttm_object_file *tfile =
706 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
707
f1d34bfd 708 return vmw_user_bo_verify_access(bo, tfile);
fb1d9738
JB
709}
710
2966141a 711static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
96bf8b87 712{
96bf8b87
JG
713 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
714
96bf8b87
JG
715 switch (mem->mem_type) {
716 case TTM_PL_SYSTEM:
135cba0d 717 case VMW_PL_GMR:
6da768aa 718 case VMW_PL_MOB:
96bf8b87
JG
719 return 0;
720 case TTM_PL_VRAM:
54d04ea8
CK
721 mem->bus.offset = (mem->start << PAGE_SHIFT) +
722 dev_priv->vram_start;
96bf8b87
JG
723 mem->bus.is_iomem = true;
724 break;
725 default:
726 return -EINVAL;
727 }
728 return 0;
729}
730
6da768aa
TH
731/**
732 * vmw_move_notify - TTM move_notify_callback
733 *
fd11a3c0 734 * @bo: The TTM buffer object about to move.
2966141a 735 * @mem: The struct ttm_resource indicating to what memory
fd11a3c0 736 * region the move is taking place.
6da768aa
TH
737 *
738 * Calls move_notify for all subsystems needing it.
739 * (currently only resources).
740 */
741static void vmw_move_notify(struct ttm_buffer_object *bo,
66257db7 742 bool evict,
2966141a 743 struct ttm_resource *mem)
6da768aa 744{
e9431ea5 745 vmw_bo_move_notify(bo, mem);
fd11a3c0 746 vmw_query_move_notify(bo, mem);
6da768aa
TH
747}
748
749
750/**
751 * vmw_swap_notify - TTM move_notify_callback
752 *
fd11a3c0 753 * @bo: The TTM buffer object about to be swapped out.
6da768aa
TH
754 */
755static void vmw_swap_notify(struct ttm_buffer_object *bo)
756{
e9431ea5 757 vmw_bo_swap_notify(bo);
f08c86c3 758 (void) ttm_bo_wait(bo, false, false);
6da768aa
TH
759}
760
761
fb1d9738 762struct ttm_bo_driver vmw_bo_driver = {
649bf3ca 763 .ttm_tt_create = &vmw_ttm_tt_create,
d92d9851
TH
764 .ttm_tt_populate = &vmw_ttm_populate,
765 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
debf8ab9
DA
766 .ttm_tt_bind = &vmw_ttm_bind,
767 .ttm_tt_unbind = &vmw_ttm_unbind,
768 .ttm_tt_destroy = &vmw_ttm_destroy,
a2ab19fe 769 .eviction_valuable = ttm_bo_eviction_valuable,
fb1d9738
JB
770 .evict_flags = vmw_evict_flags,
771 .move = NULL,
772 .verify_access = vmw_verify_access,
6da768aa
TH
773 .move_notify = vmw_move_notify,
774 .swap_notify = vmw_swap_notify,
96bf8b87 775 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
fb1d9738 776};
56dc01f1
DA
777
778int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
779 unsigned long bo_size,
780 struct ttm_buffer_object **bo_p)
781{
782 struct ttm_operation_ctx ctx = {
783 .interruptible = false,
784 .no_wait_gpu = false
785 };
786 struct ttm_buffer_object *bo;
787 int ret;
788
789 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
790 ttm_bo_type_device,
791 &vmw_sys_ne_placement,
792 0, false, &bo);
793
794 if (unlikely(ret != 0))
795 return ret;
796
797 ret = ttm_bo_reserve(bo, false, true, NULL);
798 BUG_ON(ret != 0);
0a667b50 799 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
a2d6ddc4
DA
800 if (likely(ret == 0)) {
801 struct vmw_ttm_tt *vmw_tt =
802 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
803 ret = vmw_ttm_map_dma(vmw_tt);
804 }
56dc01f1
DA
805
806 ttm_bo_unreserve(bo);
807
808 if (likely(ret == 0))
809 *bo_p = bo;
810 return ret;
811}