lib/scatterlist: Provide a DMA page iterator
[linux-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_ttm_buffer.c
CommitLineData
dff96888 1// SPDX-License-Identifier: GPL-2.0 OR MIT
fb1d9738
JB
2/**************************************************************************
3 *
dff96888 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
fb1d9738
JB
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h>
fb1d9738 32
9036f8c7 33static const struct ttm_place vram_placement_flags = {
f1217ed0
CK
34 .fpfn = 0,
35 .lpfn = 0,
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37};
fb1d9738 38
9036f8c7 39static const struct ttm_place vram_ne_placement_flags = {
f1217ed0
CK
40 .fpfn = 0,
41 .lpfn = 0,
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43};
fb1d9738 44
9036f8c7 45static const struct ttm_place sys_placement_flags = {
f1217ed0
CK
46 .fpfn = 0,
47 .lpfn = 0,
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49};
3530bdc3 50
9036f8c7 51static const struct ttm_place sys_ne_placement_flags = {
f1217ed0
CK
52 .fpfn = 0,
53 .lpfn = 0,
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55};
135cba0d 56
9036f8c7 57static const struct ttm_place gmr_placement_flags = {
f1217ed0
CK
58 .fpfn = 0,
59 .lpfn = 0,
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61};
d991ef03 62
9036f8c7 63static const struct ttm_place gmr_ne_placement_flags = {
f1217ed0
CK
64 .fpfn = 0,
65 .lpfn = 0,
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67};
6da768aa 68
9036f8c7 69static const struct ttm_place mob_placement_flags = {
fb1d9738
JB
70 .fpfn = 0,
71 .lpfn = 0,
f1217ed0
CK
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73};
74
9036f8c7 75static const struct ttm_place mob_ne_placement_flags = {
3eab3d9e
TH
76 .fpfn = 0,
77 .lpfn = 0,
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79};
80
f1217ed0 81struct ttm_placement vmw_vram_placement = {
fb1d9738
JB
82 .num_placement = 1,
83 .placement = &vram_placement_flags,
84 .num_busy_placement = 1,
85 .busy_placement = &vram_placement_flags
86};
87
9036f8c7 88static const struct ttm_place vram_gmr_placement_flags[] = {
f1217ed0
CK
89 {
90 .fpfn = 0,
91 .lpfn = 0,
92 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
93 }, {
94 .fpfn = 0,
95 .lpfn = 0,
96 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
97 }
135cba0d
TH
98};
99
9036f8c7 100static const struct ttm_place gmr_vram_placement_flags[] = {
f1217ed0
CK
101 {
102 .fpfn = 0,
103 .lpfn = 0,
104 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105 }, {
106 .fpfn = 0,
107 .lpfn = 0,
108 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109 }
5bb39e81
TH
110};
111
135cba0d 112struct ttm_placement vmw_vram_gmr_placement = {
135cba0d
TH
113 .num_placement = 2,
114 .placement = vram_gmr_placement_flags,
115 .num_busy_placement = 1,
116 .busy_placement = &gmr_placement_flags
117};
118
9036f8c7 119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
f1217ed0
CK
120 {
121 .fpfn = 0,
122 .lpfn = 0,
123 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124 TTM_PL_FLAG_NO_EVICT
125 }, {
126 .fpfn = 0,
127 .lpfn = 0,
128 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129 TTM_PL_FLAG_NO_EVICT
130 }
d991ef03
JB
131};
132
133struct ttm_placement vmw_vram_gmr_ne_placement = {
d991ef03
JB
134 .num_placement = 2,
135 .placement = vram_gmr_ne_placement_flags,
136 .num_busy_placement = 1,
137 .busy_placement = &gmr_ne_placement_flags
138};
139
8ba5152a 140struct ttm_placement vmw_vram_sys_placement = {
8ba5152a
TH
141 .num_placement = 1,
142 .placement = &vram_placement_flags,
143 .num_busy_placement = 1,
144 .busy_placement = &sys_placement_flags
145};
146
fb1d9738 147struct ttm_placement vmw_vram_ne_placement = {
fb1d9738
JB
148 .num_placement = 1,
149 .placement = &vram_ne_placement_flags,
150 .num_busy_placement = 1,
151 .busy_placement = &vram_ne_placement_flags
152};
153
154struct ttm_placement vmw_sys_placement = {
fb1d9738
JB
155 .num_placement = 1,
156 .placement = &sys_placement_flags,
157 .num_busy_placement = 1,
158 .busy_placement = &sys_placement_flags
159};
160
3530bdc3 161struct ttm_placement vmw_sys_ne_placement = {
3530bdc3
TH
162 .num_placement = 1,
163 .placement = &sys_ne_placement_flags,
164 .num_busy_placement = 1,
165 .busy_placement = &sys_ne_placement_flags
166};
167
9036f8c7 168static const struct ttm_place evictable_placement_flags[] = {
f1217ed0
CK
169 {
170 .fpfn = 0,
171 .lpfn = 0,
172 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173 }, {
174 .fpfn = 0,
175 .lpfn = 0,
176 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177 }, {
178 .fpfn = 0,
179 .lpfn = 0,
180 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181 }, {
182 .fpfn = 0,
183 .lpfn = 0,
184 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185 }
d991ef03
JB
186};
187
ef86cfee
TH
188static const struct ttm_place nonfixed_placement_flags[] = {
189 {
190 .fpfn = 0,
191 .lpfn = 0,
192 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193 }, {
194 .fpfn = 0,
195 .lpfn = 0,
196 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197 }, {
198 .fpfn = 0,
199 .lpfn = 0,
200 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201 }
202};
203
d991ef03 204struct ttm_placement vmw_evictable_placement = {
6da768aa 205 .num_placement = 4,
d991ef03
JB
206 .placement = evictable_placement_flags,
207 .num_busy_placement = 1,
208 .busy_placement = &sys_placement_flags
209};
210
5bb39e81 211struct ttm_placement vmw_srf_placement = {
5bb39e81
TH
212 .num_placement = 1,
213 .num_busy_placement = 2,
214 .placement = &gmr_placement_flags,
215 .busy_placement = gmr_vram_placement_flags
216};
217
6da768aa 218struct ttm_placement vmw_mob_placement = {
6da768aa
TH
219 .num_placement = 1,
220 .num_busy_placement = 1,
221 .placement = &mob_placement_flags,
222 .busy_placement = &mob_placement_flags
223};
224
3eab3d9e
TH
225struct ttm_placement vmw_mob_ne_placement = {
226 .num_placement = 1,
227 .num_busy_placement = 1,
228 .placement = &mob_ne_placement_flags,
229 .busy_placement = &mob_ne_placement_flags
230};
231
ef86cfee
TH
232struct ttm_placement vmw_nonfixed_placement = {
233 .num_placement = 3,
234 .placement = nonfixed_placement_flags,
235 .num_busy_placement = 1,
236 .busy_placement = &sys_placement_flags
237};
238
649bf3ca 239struct vmw_ttm_tt {
d92d9851 240 struct ttm_dma_tt dma_ttm;
135cba0d
TH
241 struct vmw_private *dev_priv;
242 int gmr_id;
6da768aa
TH
243 struct vmw_mob *mob;
244 int mem_type;
d92d9851
TH
245 struct sg_table sgt;
246 struct vmw_sg_table vsgt;
247 uint64_t sg_alloc_size;
248 bool mapped;
fb1d9738
JB
249};
250
308d17ef
TH
251const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252
d92d9851
TH
253/**
254 * Helper functions to advance a struct vmw_piter iterator.
255 *
256 * @viter: Pointer to the iterator.
257 *
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
260 * DMA mapping mode.
261 */
262static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263{
264 return ++(viter->i) < viter->num_pages;
265}
266
267static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268{
269 return __sg_page_iter_next(&viter->iter);
270}
271
272
273/**
274 * Helper functions to return a pointer to the current page.
275 *
276 * @viter: Pointer to the iterator
277 *
278 * These functions return a pointer to the page currently
279 * pointed to by @viter. Functions are selected depending on the
280 * current mapping mode.
281 */
282static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
283{
284 return viter->pages[viter->i];
285}
286
287static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
288{
289 return sg_page_iter_page(&viter->iter);
290}
291
292
293/**
294 * Helper functions to return the DMA address of the current page.
295 *
296 * @viter: Pointer to the iterator
297 *
298 * These functions return the DMA address of the page currently
299 * pointed to by @viter. Functions are selected depending on the
300 * current mapping mode.
301 */
302static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
303{
304 return page_to_phys(viter->pages[viter->i]);
305}
306
307static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
308{
309 return viter->addrs[viter->i];
310}
311
312static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
313{
d901b276
JG
314 /*
315 * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
316 * needs revision. See
317 * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
318 */
319 return sg_page_iter_dma_address(
320 container_of(&viter->iter, struct sg_dma_page_iter, base));
d92d9851
TH
321}
322
323
324/**
325 * vmw_piter_start - Initialize a struct vmw_piter.
326 *
327 * @viter: Pointer to the iterator to initialize
328 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
329 *
330 * Note that we're following the convention of __sg_page_iter_start, so that
331 * the iterator doesn't point to a valid page after initialization; it has
332 * to be advanced one step first.
333 */
334void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
335 unsigned long p_offset)
336{
337 viter->i = p_offset - 1;
338 viter->num_pages = vsgt->num_pages;
339 switch (vsgt->mode) {
340 case vmw_dma_phys:
341 viter->next = &__vmw_piter_non_sg_next;
342 viter->dma_address = &__vmw_piter_phys_addr;
343 viter->page = &__vmw_piter_non_sg_page;
344 viter->pages = vsgt->pages;
345 break;
346 case vmw_dma_alloc_coherent:
347 viter->next = &__vmw_piter_non_sg_next;
348 viter->dma_address = &__vmw_piter_dma_addr;
349 viter->page = &__vmw_piter_non_sg_page;
350 viter->addrs = vsgt->addrs;
0fd53cfb 351 viter->pages = vsgt->pages;
d92d9851
TH
352 break;
353 case vmw_dma_map_populate:
354 case vmw_dma_map_bind:
355 viter->next = &__vmw_piter_sg_next;
356 viter->dma_address = &__vmw_piter_sg_addr;
357 viter->page = &__vmw_piter_sg_page;
358 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
359 vsgt->sgt->orig_nents, p_offset);
360 break;
361 default:
362 BUG();
363 }
364}
365
366/**
367 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
368 * TTM pages
369 *
370 * @vmw_tt: Pointer to a struct vmw_ttm_backend
371 *
372 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
373 */
374static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
375{
376 struct device *dev = vmw_tt->dev_priv->dev->dev;
377
378 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
379 DMA_BIDIRECTIONAL);
380 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
381}
382
383/**
384 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
385 *
386 * @vmw_tt: Pointer to a struct vmw_ttm_backend
387 *
388 * This function is used to get device addresses from the kernel DMA layer.
389 * However, it's violating the DMA API in that when this operation has been
390 * performed, it's illegal for the CPU to write to the pages without first
391 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
392 * therefore only legal to call this function if we know that the function
393 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
394 * a CPU write buffer flush.
395 */
396static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
397{
398 struct device *dev = vmw_tt->dev_priv->dev->dev;
399 int ret;
400
401 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
402 DMA_BIDIRECTIONAL);
403 if (unlikely(ret == 0))
404 return -ENOMEM;
405
406 vmw_tt->sgt.nents = ret;
407
408 return 0;
409}
410
411/**
412 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
413 *
414 * @vmw_tt: Pointer to a struct vmw_ttm_tt
415 *
416 * Select the correct function for and make sure the TTM pages are
417 * visible to the device. Allocate storage for the device mappings.
418 * If a mapping has already been performed, indicated by the storage
419 * pointer being non NULL, the function returns success.
420 */
421static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
422{
423 struct vmw_private *dev_priv = vmw_tt->dev_priv;
424 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
425 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
279c01f6
RH
426 struct ttm_operation_ctx ctx = {
427 .interruptible = true,
428 .no_wait_gpu = false
429 };
d92d9851
TH
430 struct vmw_piter iter;
431 dma_addr_t old;
432 int ret = 0;
433 static size_t sgl_size;
434 static size_t sgt_size;
435
436 if (vmw_tt->mapped)
437 return 0;
438
439 vsgt->mode = dev_priv->map_mode;
440 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
441 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
442 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
443 vsgt->sgt = &vmw_tt->sgt;
444
445 switch (dev_priv->map_mode) {
446 case vmw_dma_map_bind:
447 case vmw_dma_map_populate:
448 if (unlikely(!sgl_size)) {
449 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
450 sgt_size = ttm_round_pot(sizeof(struct sg_table));
451 }
452 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
279c01f6 453 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
d92d9851
TH
454 if (unlikely(ret != 0))
455 return ret;
456
457 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
458 vsgt->num_pages, 0,
459 (unsigned long)
460 vsgt->num_pages << PAGE_SHIFT,
461 GFP_KERNEL);
462 if (unlikely(ret != 0))
463 goto out_sg_alloc_fail;
464
465 if (vsgt->num_pages > vmw_tt->sgt.nents) {
466 uint64_t over_alloc =
467 sgl_size * (vsgt->num_pages -
468 vmw_tt->sgt.nents);
469
470 ttm_mem_global_free(glob, over_alloc);
471 vmw_tt->sg_alloc_size -= over_alloc;
472 }
473
474 ret = vmw_ttm_map_for_dma(vmw_tt);
475 if (unlikely(ret != 0))
476 goto out_map_fail;
477
478 break;
479 default:
480 break;
481 }
482
483 old = ~((dma_addr_t) 0);
484 vmw_tt->vsgt.num_regions = 0;
485 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
486 dma_addr_t cur = vmw_piter_dma_addr(&iter);
487
488 if (cur != old + PAGE_SIZE)
489 vmw_tt->vsgt.num_regions++;
490 old = cur;
491 }
492
493 vmw_tt->mapped = true;
494 return 0;
495
496out_map_fail:
497 sg_free_table(vmw_tt->vsgt.sgt);
498 vmw_tt->vsgt.sgt = NULL;
499out_sg_alloc_fail:
500 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
501 return ret;
502}
503
504/**
505 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
506 *
507 * @vmw_tt: Pointer to a struct vmw_ttm_tt
508 *
509 * Tear down any previously set up device DMA mappings and free
510 * any storage space allocated for them. If there are no mappings set up,
511 * this function is a NOP.
512 */
513static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
514{
515 struct vmw_private *dev_priv = vmw_tt->dev_priv;
516
517 if (!vmw_tt->vsgt.sgt)
518 return;
519
520 switch (dev_priv->map_mode) {
521 case vmw_dma_map_bind:
522 case vmw_dma_map_populate:
523 vmw_ttm_unmap_from_dma(vmw_tt);
524 sg_free_table(vmw_tt->vsgt.sgt);
525 vmw_tt->vsgt.sgt = NULL;
526 ttm_mem_global_free(vmw_mem_glob(dev_priv),
527 vmw_tt->sg_alloc_size);
528 break;
529 default:
530 break;
531 }
532 vmw_tt->mapped = false;
533}
534
0fd53cfb
TH
535
536/**
537 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
538 *
539 * @bo: Pointer to a struct ttm_buffer_object
540 *
541 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
542 * instead of a pointer to a struct vmw_ttm_backend as argument.
543 * Note that the buffer object must be either pinned or reserved before
544 * calling this function.
545 */
546int vmw_bo_map_dma(struct ttm_buffer_object *bo)
547{
548 struct vmw_ttm_tt *vmw_tt =
549 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
550
551 return vmw_ttm_map_dma(vmw_tt);
552}
553
554
555/**
556 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
557 *
558 * @bo: Pointer to a struct ttm_buffer_object
559 *
560 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
561 * instead of a pointer to a struct vmw_ttm_backend as argument.
562 */
563void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
564{
565 struct vmw_ttm_tt *vmw_tt =
566 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
567
568 vmw_ttm_unmap_dma(vmw_tt);
569}
570
571
572/**
573 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
574 * TTM buffer object
575 *
576 * @bo: Pointer to a struct ttm_buffer_object
577 *
578 * Returns a pointer to a struct vmw_sg_table object. The object should
579 * not be freed after use.
580 * Note that for the device addresses to be valid, the buffer object must
581 * either be reserved or pinned.
582 */
583const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
584{
585 struct vmw_ttm_tt *vmw_tt =
586 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
587
588 return &vmw_tt->vsgt;
589}
590
591
649bf3ca 592static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
fb1d9738 593{
d92d9851
TH
594 struct vmw_ttm_tt *vmw_be =
595 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
596 int ret;
597
598 ret = vmw_ttm_map_dma(vmw_be);
599 if (unlikely(ret != 0))
600 return ret;
135cba0d
TH
601
602 vmw_be->gmr_id = bo_mem->start;
6da768aa 603 vmw_be->mem_type = bo_mem->mem_type;
135cba0d 604
6da768aa
TH
605 switch (bo_mem->mem_type) {
606 case VMW_PL_GMR:
607 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
608 ttm->num_pages, vmw_be->gmr_id);
609 case VMW_PL_MOB:
610 if (unlikely(vmw_be->mob == NULL)) {
611 vmw_be->mob =
612 vmw_mob_create(ttm->num_pages);
613 if (unlikely(vmw_be->mob == NULL))
614 return -ENOMEM;
615 }
616
617 return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
0fd53cfb 618 &vmw_be->vsgt, ttm->num_pages,
6da768aa
TH
619 vmw_be->gmr_id);
620 default:
621 BUG();
622 }
623 return 0;
fb1d9738
JB
624}
625
649bf3ca 626static int vmw_ttm_unbind(struct ttm_tt *ttm)
fb1d9738 627{
d92d9851
TH
628 struct vmw_ttm_tt *vmw_be =
629 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
135cba0d 630
6da768aa
TH
631 switch (vmw_be->mem_type) {
632 case VMW_PL_GMR:
633 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
634 break;
635 case VMW_PL_MOB:
636 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
637 break;
638 default:
639 BUG();
640 }
d92d9851
TH
641
642 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
643 vmw_ttm_unmap_dma(vmw_be);
644
fb1d9738
JB
645 return 0;
646}
647
6da768aa 648
649bf3ca 649static void vmw_ttm_destroy(struct ttm_tt *ttm)
fb1d9738 650{
d92d9851
TH
651 struct vmw_ttm_tt *vmw_be =
652 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
653
654 vmw_ttm_unmap_dma(vmw_be);
655 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
656 ttm_dma_tt_fini(&vmw_be->dma_ttm);
657 else
658 ttm_tt_fini(ttm);
6da768aa
TH
659
660 if (vmw_be->mob)
661 vmw_mob_destroy(vmw_be->mob);
662
fb1d9738
JB
663 kfree(vmw_be);
664}
665
0fd53cfb 666
d0cef9fa 667static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
d92d9851
TH
668{
669 struct vmw_ttm_tt *vmw_tt =
670 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
671 struct vmw_private *dev_priv = vmw_tt->dev_priv;
672 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
673 int ret;
674
675 if (ttm->state != tt_unpopulated)
676 return 0;
677
678 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
679 size_t size =
680 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
d0cef9fa 681 ret = ttm_mem_global_alloc(glob, size, ctx);
d92d9851
TH
682 if (unlikely(ret != 0))
683 return ret;
684
d0cef9fa
RH
685 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
686 ctx);
d92d9851
TH
687 if (unlikely(ret != 0))
688 ttm_mem_global_free(glob, size);
689 } else
d0cef9fa 690 ret = ttm_pool_populate(ttm, ctx);
d92d9851
TH
691
692 return ret;
693}
694
695static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
696{
697 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
698 dma_ttm.ttm);
699 struct vmw_private *dev_priv = vmw_tt->dev_priv;
700 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
701
6da768aa
TH
702
703 if (vmw_tt->mob) {
704 vmw_mob_destroy(vmw_tt->mob);
705 vmw_tt->mob = NULL;
706 }
707
d92d9851
TH
708 vmw_ttm_unmap_dma(vmw_tt);
709 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
710 size_t size =
711 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
712
713 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
714 ttm_mem_global_free(glob, size);
715 } else
716 ttm_pool_unpopulate(ttm);
717}
718
fb1d9738 719static struct ttm_backend_func vmw_ttm_func = {
fb1d9738
JB
720 .bind = vmw_ttm_bind,
721 .unbind = vmw_ttm_unbind,
722 .destroy = vmw_ttm_destroy,
723};
724
dde5da23
CK
725static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
726 uint32_t page_flags)
fb1d9738 727{
649bf3ca 728 struct vmw_ttm_tt *vmw_be;
d92d9851 729 int ret;
fb1d9738 730
d92d9851 731 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
fb1d9738
JB
732 if (!vmw_be)
733 return NULL;
734
d92d9851 735 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
dde5da23 736 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
6da768aa 737 vmw_be->mob = NULL;
fb1d9738 738
d92d9851 739 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
dde5da23 740 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
d92d9851 741 else
dde5da23 742 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
d92d9851
TH
743 if (unlikely(ret != 0))
744 goto out_no_init;
745
746 return &vmw_be->dma_ttm.ttm;
747out_no_init:
748 kfree(vmw_be);
749 return NULL;
fb1d9738
JB
750}
751
8227622f 752static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
fb1d9738
JB
753{
754 return 0;
755}
756
8227622f 757static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
fb1d9738
JB
758 struct ttm_mem_type_manager *man)
759{
fb1d9738
JB
760 switch (type) {
761 case TTM_PL_SYSTEM:
762 /* System memory */
763
764 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
135cba0d 765 man->available_caching = TTM_PL_FLAG_CACHED;
fb1d9738
JB
766 man->default_caching = TTM_PL_FLAG_CACHED;
767 break;
768 case TTM_PL_VRAM:
769 /* "On-card" video ram */
d961db75 770 man->func = &ttm_bo_manager_func;
fb1d9738 771 man->gpu_offset = 0;
96bf8b87 772 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
135cba0d
TH
773 man->available_caching = TTM_PL_FLAG_CACHED;
774 man->default_caching = TTM_PL_FLAG_CACHED;
775 break;
776 case VMW_PL_GMR:
6da768aa 777 case VMW_PL_MOB:
135cba0d
TH
778 /*
779 * "Guest Memory Regions" is an aperture like feature with
780 * one slot per bo. There is an upper limit of the number of
781 * slots as well as the bo size.
782 */
783 man->func = &vmw_gmrid_manager_func;
784 man->gpu_offset = 0;
785 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
786 man->available_caching = TTM_PL_FLAG_CACHED;
787 man->default_caching = TTM_PL_FLAG_CACHED;
fb1d9738
JB
788 break;
789 default:
790 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
791 return -EINVAL;
792 }
793 return 0;
794}
795
8227622f 796static void vmw_evict_flags(struct ttm_buffer_object *bo,
fb1d9738
JB
797 struct ttm_placement *placement)
798{
799 *placement = vmw_sys_placement;
800}
801
fb1d9738
JB
802static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
803{
d08a9b9c
TH
804 struct ttm_object_file *tfile =
805 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
806
f1d34bfd 807 return vmw_user_bo_verify_access(bo, tfile);
fb1d9738
JB
808}
809
96bf8b87
JG
810static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
811{
812 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
813 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
814
815 mem->bus.addr = NULL;
816 mem->bus.is_iomem = false;
817 mem->bus.offset = 0;
818 mem->bus.size = mem->num_pages << PAGE_SHIFT;
819 mem->bus.base = 0;
820 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
821 return -EINVAL;
822 switch (mem->mem_type) {
823 case TTM_PL_SYSTEM:
135cba0d 824 case VMW_PL_GMR:
6da768aa 825 case VMW_PL_MOB:
96bf8b87
JG
826 return 0;
827 case TTM_PL_VRAM:
d961db75 828 mem->bus.offset = mem->start << PAGE_SHIFT;
96bf8b87
JG
829 mem->bus.base = dev_priv->vram_start;
830 mem->bus.is_iomem = true;
831 break;
832 default:
833 return -EINVAL;
834 }
835 return 0;
836}
837
838static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
839{
840}
841
842static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
843{
844 return 0;
845}
846
6da768aa
TH
847/**
848 * vmw_move_notify - TTM move_notify_callback
849 *
fd11a3c0
SY
850 * @bo: The TTM buffer object about to move.
851 * @mem: The struct ttm_mem_reg indicating to what memory
852 * region the move is taking place.
6da768aa
TH
853 *
854 * Calls move_notify for all subsystems needing it.
855 * (currently only resources).
856 */
857static void vmw_move_notify(struct ttm_buffer_object *bo,
66257db7 858 bool evict,
6da768aa
TH
859 struct ttm_mem_reg *mem)
860{
e9431ea5 861 vmw_bo_move_notify(bo, mem);
fd11a3c0 862 vmw_query_move_notify(bo, mem);
6da768aa
TH
863}
864
865
866/**
867 * vmw_swap_notify - TTM move_notify_callback
868 *
fd11a3c0 869 * @bo: The TTM buffer object about to be swapped out.
6da768aa
TH
870 */
871static void vmw_swap_notify(struct ttm_buffer_object *bo)
872{
e9431ea5 873 vmw_bo_swap_notify(bo);
f08c86c3 874 (void) ttm_bo_wait(bo, false, false);
6da768aa
TH
875}
876
877
fb1d9738 878struct ttm_bo_driver vmw_bo_driver = {
649bf3ca 879 .ttm_tt_create = &vmw_ttm_tt_create,
d92d9851
TH
880 .ttm_tt_populate = &vmw_ttm_populate,
881 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
fb1d9738
JB
882 .invalidate_caches = vmw_invalidate_caches,
883 .init_mem_type = vmw_init_mem_type,
a2ab19fe 884 .eviction_valuable = ttm_bo_eviction_valuable,
fb1d9738
JB
885 .evict_flags = vmw_evict_flags,
886 .move = NULL,
887 .verify_access = vmw_verify_access,
6da768aa
TH
888 .move_notify = vmw_move_notify,
889 .swap_notify = vmw_swap_notify,
96bf8b87
JG
890 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
891 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
892 .io_mem_free = &vmw_ttm_io_mem_free,
fb1d9738 893};