Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "vmwgfx_drv.h" | |
760285e7 DH |
29 | #include <drm/ttm/ttm_bo_driver.h> |
30 | #include <drm/ttm/ttm_placement.h> | |
31 | #include <drm/ttm/ttm_page_alloc.h> | |
fb1d9738 JB |
32 | |
33 | static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | | |
34 | TTM_PL_FLAG_CACHED; | |
35 | ||
36 | static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |
37 | TTM_PL_FLAG_CACHED | | |
38 | TTM_PL_FLAG_NO_EVICT; | |
39 | ||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | |
41 | TTM_PL_FLAG_CACHED; | |
42 | ||
3530bdc3 TH |
43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | |
44 | TTM_PL_FLAG_CACHED | | |
45 | TTM_PL_FLAG_NO_EVICT; | |
46 | ||
135cba0d TH |
47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
48 | TTM_PL_FLAG_CACHED; | |
49 | ||
d991ef03 JB |
50 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | |
51 | TTM_PL_FLAG_CACHED | | |
52 | TTM_PL_FLAG_NO_EVICT; | |
53 | ||
6da768aa TH |
54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | |
55 | TTM_PL_FLAG_CACHED; | |
56 | ||
fb1d9738 JB |
57 | struct ttm_placement vmw_vram_placement = { |
58 | .fpfn = 0, | |
59 | .lpfn = 0, | |
60 | .num_placement = 1, | |
61 | .placement = &vram_placement_flags, | |
62 | .num_busy_placement = 1, | |
63 | .busy_placement = &vram_placement_flags | |
64 | }; | |
65 | ||
135cba0d TH |
66 | static uint32_t vram_gmr_placement_flags[] = { |
67 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | |
68 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
69 | }; | |
70 | ||
5bb39e81 TH |
71 | static uint32_t gmr_vram_placement_flags[] = { |
72 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, | |
73 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | |
74 | }; | |
75 | ||
135cba0d TH |
76 | struct ttm_placement vmw_vram_gmr_placement = { |
77 | .fpfn = 0, | |
78 | .lpfn = 0, | |
79 | .num_placement = 2, | |
80 | .placement = vram_gmr_placement_flags, | |
81 | .num_busy_placement = 1, | |
82 | .busy_placement = &gmr_placement_flags | |
83 | }; | |
84 | ||
d991ef03 JB |
85 | static uint32_t vram_gmr_ne_placement_flags[] = { |
86 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, | |
87 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | |
88 | }; | |
89 | ||
90 | struct ttm_placement vmw_vram_gmr_ne_placement = { | |
91 | .fpfn = 0, | |
92 | .lpfn = 0, | |
93 | .num_placement = 2, | |
94 | .placement = vram_gmr_ne_placement_flags, | |
95 | .num_busy_placement = 1, | |
96 | .busy_placement = &gmr_ne_placement_flags | |
97 | }; | |
98 | ||
8ba5152a TH |
99 | struct ttm_placement vmw_vram_sys_placement = { |
100 | .fpfn = 0, | |
101 | .lpfn = 0, | |
102 | .num_placement = 1, | |
103 | .placement = &vram_placement_flags, | |
104 | .num_busy_placement = 1, | |
105 | .busy_placement = &sys_placement_flags | |
106 | }; | |
107 | ||
fb1d9738 JB |
108 | struct ttm_placement vmw_vram_ne_placement = { |
109 | .fpfn = 0, | |
110 | .lpfn = 0, | |
111 | .num_placement = 1, | |
112 | .placement = &vram_ne_placement_flags, | |
113 | .num_busy_placement = 1, | |
114 | .busy_placement = &vram_ne_placement_flags | |
115 | }; | |
116 | ||
117 | struct ttm_placement vmw_sys_placement = { | |
118 | .fpfn = 0, | |
119 | .lpfn = 0, | |
120 | .num_placement = 1, | |
121 | .placement = &sys_placement_flags, | |
122 | .num_busy_placement = 1, | |
123 | .busy_placement = &sys_placement_flags | |
124 | }; | |
125 | ||
3530bdc3 TH |
126 | struct ttm_placement vmw_sys_ne_placement = { |
127 | .fpfn = 0, | |
128 | .lpfn = 0, | |
129 | .num_placement = 1, | |
130 | .placement = &sys_ne_placement_flags, | |
131 | .num_busy_placement = 1, | |
132 | .busy_placement = &sys_ne_placement_flags | |
133 | }; | |
134 | ||
d991ef03 JB |
135 | static uint32_t evictable_placement_flags[] = { |
136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | |
137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | |
6da768aa TH |
138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | |
d991ef03 JB |
140 | }; |
141 | ||
142 | struct ttm_placement vmw_evictable_placement = { | |
143 | .fpfn = 0, | |
144 | .lpfn = 0, | |
6da768aa | 145 | .num_placement = 4, |
d991ef03 JB |
146 | .placement = evictable_placement_flags, |
147 | .num_busy_placement = 1, | |
148 | .busy_placement = &sys_placement_flags | |
149 | }; | |
150 | ||
5bb39e81 TH |
151 | struct ttm_placement vmw_srf_placement = { |
152 | .fpfn = 0, | |
153 | .lpfn = 0, | |
154 | .num_placement = 1, | |
155 | .num_busy_placement = 2, | |
156 | .placement = &gmr_placement_flags, | |
157 | .busy_placement = gmr_vram_placement_flags | |
158 | }; | |
159 | ||
6da768aa TH |
160 | struct ttm_placement vmw_mob_placement = { |
161 | .fpfn = 0, | |
162 | .lpfn = 0, | |
163 | .num_placement = 1, | |
164 | .num_busy_placement = 1, | |
165 | .placement = &mob_placement_flags, | |
166 | .busy_placement = &mob_placement_flags | |
167 | }; | |
168 | ||
649bf3ca | 169 | struct vmw_ttm_tt { |
d92d9851 | 170 | struct ttm_dma_tt dma_ttm; |
135cba0d TH |
171 | struct vmw_private *dev_priv; |
172 | int gmr_id; | |
6da768aa TH |
173 | struct vmw_mob *mob; |
174 | int mem_type; | |
d92d9851 TH |
175 | struct sg_table sgt; |
176 | struct vmw_sg_table vsgt; | |
177 | uint64_t sg_alloc_size; | |
178 | bool mapped; | |
fb1d9738 JB |
179 | }; |
180 | ||
308d17ef TH |
181 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
182 | ||
d92d9851 TH |
183 | /** |
184 | * Helper functions to advance a struct vmw_piter iterator. | |
185 | * | |
186 | * @viter: Pointer to the iterator. | |
187 | * | |
188 | * These functions return false if past the end of the list, | |
189 | * true otherwise. Functions are selected depending on the current | |
190 | * DMA mapping mode. | |
191 | */ | |
192 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) | |
193 | { | |
194 | return ++(viter->i) < viter->num_pages; | |
195 | } | |
196 | ||
197 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) | |
198 | { | |
199 | return __sg_page_iter_next(&viter->iter); | |
200 | } | |
201 | ||
202 | ||
203 | /** | |
204 | * Helper functions to return a pointer to the current page. | |
205 | * | |
206 | * @viter: Pointer to the iterator | |
207 | * | |
208 | * These functions return a pointer to the page currently | |
209 | * pointed to by @viter. Functions are selected depending on the | |
210 | * current mapping mode. | |
211 | */ | |
212 | static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) | |
213 | { | |
214 | return viter->pages[viter->i]; | |
215 | } | |
216 | ||
217 | static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) | |
218 | { | |
219 | return sg_page_iter_page(&viter->iter); | |
220 | } | |
221 | ||
222 | ||
223 | /** | |
224 | * Helper functions to return the DMA address of the current page. | |
225 | * | |
226 | * @viter: Pointer to the iterator | |
227 | * | |
228 | * These functions return the DMA address of the page currently | |
229 | * pointed to by @viter. Functions are selected depending on the | |
230 | * current mapping mode. | |
231 | */ | |
232 | static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) | |
233 | { | |
234 | return page_to_phys(viter->pages[viter->i]); | |
235 | } | |
236 | ||
237 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) | |
238 | { | |
239 | return viter->addrs[viter->i]; | |
240 | } | |
241 | ||
242 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) | |
243 | { | |
244 | return sg_page_iter_dma_address(&viter->iter); | |
245 | } | |
246 | ||
247 | ||
248 | /** | |
249 | * vmw_piter_start - Initialize a struct vmw_piter. | |
250 | * | |
251 | * @viter: Pointer to the iterator to initialize | |
252 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from | |
253 | * | |
254 | * Note that we're following the convention of __sg_page_iter_start, so that | |
255 | * the iterator doesn't point to a valid page after initialization; it has | |
256 | * to be advanced one step first. | |
257 | */ | |
258 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |
259 | unsigned long p_offset) | |
260 | { | |
261 | viter->i = p_offset - 1; | |
262 | viter->num_pages = vsgt->num_pages; | |
263 | switch (vsgt->mode) { | |
264 | case vmw_dma_phys: | |
265 | viter->next = &__vmw_piter_non_sg_next; | |
266 | viter->dma_address = &__vmw_piter_phys_addr; | |
267 | viter->page = &__vmw_piter_non_sg_page; | |
268 | viter->pages = vsgt->pages; | |
269 | break; | |
270 | case vmw_dma_alloc_coherent: | |
271 | viter->next = &__vmw_piter_non_sg_next; | |
272 | viter->dma_address = &__vmw_piter_dma_addr; | |
273 | viter->page = &__vmw_piter_non_sg_page; | |
274 | viter->addrs = vsgt->addrs; | |
0fd53cfb | 275 | viter->pages = vsgt->pages; |
d92d9851 TH |
276 | break; |
277 | case vmw_dma_map_populate: | |
278 | case vmw_dma_map_bind: | |
279 | viter->next = &__vmw_piter_sg_next; | |
280 | viter->dma_address = &__vmw_piter_sg_addr; | |
281 | viter->page = &__vmw_piter_sg_page; | |
282 | __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, | |
283 | vsgt->sgt->orig_nents, p_offset); | |
284 | break; | |
285 | default: | |
286 | BUG(); | |
287 | } | |
288 | } | |
289 | ||
290 | /** | |
291 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for | |
292 | * TTM pages | |
293 | * | |
294 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
295 | * | |
296 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. | |
297 | */ | |
298 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) | |
299 | { | |
300 | struct device *dev = vmw_tt->dev_priv->dev->dev; | |
301 | ||
302 | dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, | |
303 | DMA_BIDIRECTIONAL); | |
304 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; | |
305 | } | |
306 | ||
307 | /** | |
308 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses | |
309 | * | |
310 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
311 | * | |
312 | * This function is used to get device addresses from the kernel DMA layer. | |
313 | * However, it's violating the DMA API in that when this operation has been | |
314 | * performed, it's illegal for the CPU to write to the pages without first | |
315 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is | |
316 | * therefore only legal to call this function if we know that the function | |
317 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most | |
318 | * a CPU write buffer flush. | |
319 | */ | |
320 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) | |
321 | { | |
322 | struct device *dev = vmw_tt->dev_priv->dev->dev; | |
323 | int ret; | |
324 | ||
325 | ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, | |
326 | DMA_BIDIRECTIONAL); | |
327 | if (unlikely(ret == 0)) | |
328 | return -ENOMEM; | |
329 | ||
330 | vmw_tt->sgt.nents = ret; | |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
335 | /** | |
336 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device | |
337 | * | |
338 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
339 | * | |
340 | * Select the correct function for and make sure the TTM pages are | |
341 | * visible to the device. Allocate storage for the device mappings. | |
342 | * If a mapping has already been performed, indicated by the storage | |
343 | * pointer being non NULL, the function returns success. | |
344 | */ | |
345 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) | |
346 | { | |
347 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
348 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
349 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; | |
350 | struct vmw_piter iter; | |
351 | dma_addr_t old; | |
352 | int ret = 0; | |
353 | static size_t sgl_size; | |
354 | static size_t sgt_size; | |
355 | ||
356 | if (vmw_tt->mapped) | |
357 | return 0; | |
358 | ||
359 | vsgt->mode = dev_priv->map_mode; | |
360 | vsgt->pages = vmw_tt->dma_ttm.ttm.pages; | |
361 | vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; | |
362 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; | |
363 | vsgt->sgt = &vmw_tt->sgt; | |
364 | ||
365 | switch (dev_priv->map_mode) { | |
366 | case vmw_dma_map_bind: | |
367 | case vmw_dma_map_populate: | |
368 | if (unlikely(!sgl_size)) { | |
369 | sgl_size = ttm_round_pot(sizeof(struct scatterlist)); | |
370 | sgt_size = ttm_round_pot(sizeof(struct sg_table)); | |
371 | } | |
372 | vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; | |
373 | ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, | |
374 | true); | |
375 | if (unlikely(ret != 0)) | |
376 | return ret; | |
377 | ||
378 | ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, | |
379 | vsgt->num_pages, 0, | |
380 | (unsigned long) | |
381 | vsgt->num_pages << PAGE_SHIFT, | |
382 | GFP_KERNEL); | |
383 | if (unlikely(ret != 0)) | |
384 | goto out_sg_alloc_fail; | |
385 | ||
386 | if (vsgt->num_pages > vmw_tt->sgt.nents) { | |
387 | uint64_t over_alloc = | |
388 | sgl_size * (vsgt->num_pages - | |
389 | vmw_tt->sgt.nents); | |
390 | ||
391 | ttm_mem_global_free(glob, over_alloc); | |
392 | vmw_tt->sg_alloc_size -= over_alloc; | |
393 | } | |
394 | ||
395 | ret = vmw_ttm_map_for_dma(vmw_tt); | |
396 | if (unlikely(ret != 0)) | |
397 | goto out_map_fail; | |
398 | ||
399 | break; | |
400 | default: | |
401 | break; | |
402 | } | |
403 | ||
404 | old = ~((dma_addr_t) 0); | |
405 | vmw_tt->vsgt.num_regions = 0; | |
406 | for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { | |
407 | dma_addr_t cur = vmw_piter_dma_addr(&iter); | |
408 | ||
409 | if (cur != old + PAGE_SIZE) | |
410 | vmw_tt->vsgt.num_regions++; | |
411 | old = cur; | |
412 | } | |
413 | ||
414 | vmw_tt->mapped = true; | |
415 | return 0; | |
416 | ||
417 | out_map_fail: | |
418 | sg_free_table(vmw_tt->vsgt.sgt); | |
419 | vmw_tt->vsgt.sgt = NULL; | |
420 | out_sg_alloc_fail: | |
421 | ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); | |
422 | return ret; | |
423 | } | |
424 | ||
425 | /** | |
426 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings | |
427 | * | |
428 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
429 | * | |
430 | * Tear down any previously set up device DMA mappings and free | |
431 | * any storage space allocated for them. If there are no mappings set up, | |
432 | * this function is a NOP. | |
433 | */ | |
434 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |
435 | { | |
436 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
437 | ||
438 | if (!vmw_tt->vsgt.sgt) | |
439 | return; | |
440 | ||
441 | switch (dev_priv->map_mode) { | |
442 | case vmw_dma_map_bind: | |
443 | case vmw_dma_map_populate: | |
444 | vmw_ttm_unmap_from_dma(vmw_tt); | |
445 | sg_free_table(vmw_tt->vsgt.sgt); | |
446 | vmw_tt->vsgt.sgt = NULL; | |
447 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | |
448 | vmw_tt->sg_alloc_size); | |
449 | break; | |
450 | default: | |
451 | break; | |
452 | } | |
453 | vmw_tt->mapped = false; | |
454 | } | |
455 | ||
0fd53cfb TH |
456 | |
457 | /** | |
458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device | |
459 | * | |
460 | * @bo: Pointer to a struct ttm_buffer_object | |
461 | * | |
462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer | |
463 | * instead of a pointer to a struct vmw_ttm_backend as argument. | |
464 | * Note that the buffer object must be either pinned or reserved before | |
465 | * calling this function. | |
466 | */ | |
467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) | |
468 | { | |
469 | struct vmw_ttm_tt *vmw_tt = | |
470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
471 | ||
472 | return vmw_ttm_map_dma(vmw_tt); | |
473 | } | |
474 | ||
475 | ||
476 | /** | |
477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device | |
478 | * | |
479 | * @bo: Pointer to a struct ttm_buffer_object | |
480 | * | |
481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer | |
482 | * instead of a pointer to a struct vmw_ttm_backend as argument. | |
483 | */ | |
484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) | |
485 | { | |
486 | struct vmw_ttm_tt *vmw_tt = | |
487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
488 | ||
489 | vmw_ttm_unmap_dma(vmw_tt); | |
490 | } | |
491 | ||
492 | ||
493 | /** | |
494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | |
495 | * TTM buffer object | |
496 | * | |
497 | * @bo: Pointer to a struct ttm_buffer_object | |
498 | * | |
499 | * Returns a pointer to a struct vmw_sg_table object. The object should | |
500 | * not be freed after use. | |
501 | * Note that for the device addresses to be valid, the buffer object must | |
502 | * either be reserved or pinned. | |
503 | */ | |
504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | |
505 | { | |
506 | struct vmw_ttm_tt *vmw_tt = | |
507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
508 | ||
509 | return &vmw_tt->vsgt; | |
510 | } | |
511 | ||
512 | ||
649bf3ca | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
fb1d9738 | 514 | { |
d92d9851 TH |
515 | struct vmw_ttm_tt *vmw_be = |
516 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
517 | int ret; | |
518 | ||
519 | ret = vmw_ttm_map_dma(vmw_be); | |
520 | if (unlikely(ret != 0)) | |
521 | return ret; | |
135cba0d TH |
522 | |
523 | vmw_be->gmr_id = bo_mem->start; | |
6da768aa | 524 | vmw_be->mem_type = bo_mem->mem_type; |
135cba0d | 525 | |
6da768aa TH |
526 | switch (bo_mem->mem_type) { |
527 | case VMW_PL_GMR: | |
528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | |
529 | ttm->num_pages, vmw_be->gmr_id); | |
530 | case VMW_PL_MOB: | |
531 | if (unlikely(vmw_be->mob == NULL)) { | |
532 | vmw_be->mob = | |
533 | vmw_mob_create(ttm->num_pages); | |
534 | if (unlikely(vmw_be->mob == NULL)) | |
535 | return -ENOMEM; | |
536 | } | |
537 | ||
538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, | |
0fd53cfb | 539 | &vmw_be->vsgt, ttm->num_pages, |
6da768aa TH |
540 | vmw_be->gmr_id); |
541 | default: | |
542 | BUG(); | |
543 | } | |
544 | return 0; | |
fb1d9738 JB |
545 | } |
546 | ||
649bf3ca | 547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
fb1d9738 | 548 | { |
d92d9851 TH |
549 | struct vmw_ttm_tt *vmw_be = |
550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
135cba0d | 551 | |
6da768aa TH |
552 | switch (vmw_be->mem_type) { |
553 | case VMW_PL_GMR: | |
554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | |
555 | break; | |
556 | case VMW_PL_MOB: | |
557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); | |
558 | break; | |
559 | default: | |
560 | BUG(); | |
561 | } | |
d92d9851 TH |
562 | |
563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | |
564 | vmw_ttm_unmap_dma(vmw_be); | |
565 | ||
fb1d9738 JB |
566 | return 0; |
567 | } | |
568 | ||
6da768aa | 569 | |
649bf3ca | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
fb1d9738 | 571 | { |
d92d9851 TH |
572 | struct vmw_ttm_tt *vmw_be = |
573 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
574 | ||
575 | vmw_ttm_unmap_dma(vmw_be); | |
576 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | |
577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | |
578 | else | |
579 | ttm_tt_fini(ttm); | |
6da768aa TH |
580 | |
581 | if (vmw_be->mob) | |
582 | vmw_mob_destroy(vmw_be->mob); | |
583 | ||
fb1d9738 JB |
584 | kfree(vmw_be); |
585 | } | |
586 | ||
0fd53cfb | 587 | |
d92d9851 TH |
588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
589 | { | |
590 | struct vmw_ttm_tt *vmw_tt = | |
591 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
592 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
593 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
594 | int ret; | |
595 | ||
596 | if (ttm->state != tt_unpopulated) | |
597 | return 0; | |
598 | ||
599 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | |
600 | size_t size = | |
601 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); | |
602 | ret = ttm_mem_global_alloc(glob, size, false, true); | |
603 | if (unlikely(ret != 0)) | |
604 | return ret; | |
605 | ||
606 | ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); | |
607 | if (unlikely(ret != 0)) | |
608 | ttm_mem_global_free(glob, size); | |
609 | } else | |
610 | ret = ttm_pool_populate(ttm); | |
611 | ||
612 | return ret; | |
613 | } | |
614 | ||
615 | static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |
616 | { | |
617 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, | |
618 | dma_ttm.ttm); | |
619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
621 | ||
6da768aa TH |
622 | |
623 | if (vmw_tt->mob) { | |
624 | vmw_mob_destroy(vmw_tt->mob); | |
625 | vmw_tt->mob = NULL; | |
626 | } | |
627 | ||
d92d9851 TH |
628 | vmw_ttm_unmap_dma(vmw_tt); |
629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | |
630 | size_t size = | |
631 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); | |
632 | ||
633 | ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); | |
634 | ttm_mem_global_free(glob, size); | |
635 | } else | |
636 | ttm_pool_unpopulate(ttm); | |
637 | } | |
638 | ||
fb1d9738 | 639 | static struct ttm_backend_func vmw_ttm_func = { |
fb1d9738 JB |
640 | .bind = vmw_ttm_bind, |
641 | .unbind = vmw_ttm_unbind, | |
642 | .destroy = vmw_ttm_destroy, | |
643 | }; | |
644 | ||
8227622f | 645 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
649bf3ca JG |
646 | unsigned long size, uint32_t page_flags, |
647 | struct page *dummy_read_page) | |
fb1d9738 | 648 | { |
649bf3ca | 649 | struct vmw_ttm_tt *vmw_be; |
d92d9851 | 650 | int ret; |
fb1d9738 | 651 | |
d92d9851 | 652 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
fb1d9738 JB |
653 | if (!vmw_be) |
654 | return NULL; | |
655 | ||
d92d9851 | 656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
135cba0d | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
6da768aa | 658 | vmw_be->mob = NULL; |
fb1d9738 | 659 | |
d92d9851 TH |
660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | |
662 | dummy_read_page); | |
663 | else | |
664 | ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, | |
665 | dummy_read_page); | |
666 | if (unlikely(ret != 0)) | |
667 | goto out_no_init; | |
668 | ||
669 | return &vmw_be->dma_ttm.ttm; | |
670 | out_no_init: | |
671 | kfree(vmw_be); | |
672 | return NULL; | |
fb1d9738 JB |
673 | } |
674 | ||
8227622f | 675 | static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
fb1d9738 JB |
676 | { |
677 | return 0; | |
678 | } | |
679 | ||
8227622f | 680 | static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
fb1d9738 JB |
681 | struct ttm_mem_type_manager *man) |
682 | { | |
fb1d9738 JB |
683 | switch (type) { |
684 | case TTM_PL_SYSTEM: | |
685 | /* System memory */ | |
686 | ||
687 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
135cba0d | 688 | man->available_caching = TTM_PL_FLAG_CACHED; |
fb1d9738 JB |
689 | man->default_caching = TTM_PL_FLAG_CACHED; |
690 | break; | |
691 | case TTM_PL_VRAM: | |
692 | /* "On-card" video ram */ | |
d961db75 | 693 | man->func = &ttm_bo_manager_func; |
fb1d9738 | 694 | man->gpu_offset = 0; |
96bf8b87 | 695 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
135cba0d TH |
696 | man->available_caching = TTM_PL_FLAG_CACHED; |
697 | man->default_caching = TTM_PL_FLAG_CACHED; | |
698 | break; | |
699 | case VMW_PL_GMR: | |
6da768aa | 700 | case VMW_PL_MOB: |
135cba0d TH |
701 | /* |
702 | * "Guest Memory Regions" is an aperture like feature with | |
703 | * one slot per bo. There is an upper limit of the number of | |
704 | * slots as well as the bo size. | |
705 | */ | |
706 | man->func = &vmw_gmrid_manager_func; | |
707 | man->gpu_offset = 0; | |
708 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; | |
709 | man->available_caching = TTM_PL_FLAG_CACHED; | |
710 | man->default_caching = TTM_PL_FLAG_CACHED; | |
fb1d9738 JB |
711 | break; |
712 | default: | |
713 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
714 | return -EINVAL; | |
715 | } | |
716 | return 0; | |
717 | } | |
718 | ||
8227622f | 719 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
fb1d9738 JB |
720 | struct ttm_placement *placement) |
721 | { | |
722 | *placement = vmw_sys_placement; | |
723 | } | |
724 | ||
fb1d9738 JB |
725 | static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
726 | { | |
d08a9b9c TH |
727 | struct ttm_object_file *tfile = |
728 | vmw_fpriv((struct drm_file *)filp->private_data)->tfile; | |
729 | ||
730 | return vmw_user_dmabuf_verify_access(bo, tfile); | |
fb1d9738 JB |
731 | } |
732 | ||
96bf8b87 JG |
733 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
734 | { | |
735 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
736 | struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); | |
737 | ||
738 | mem->bus.addr = NULL; | |
739 | mem->bus.is_iomem = false; | |
740 | mem->bus.offset = 0; | |
741 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
742 | mem->bus.base = 0; | |
743 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
744 | return -EINVAL; | |
745 | switch (mem->mem_type) { | |
746 | case TTM_PL_SYSTEM: | |
135cba0d | 747 | case VMW_PL_GMR: |
6da768aa | 748 | case VMW_PL_MOB: |
96bf8b87 JG |
749 | return 0; |
750 | case TTM_PL_VRAM: | |
d961db75 | 751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
96bf8b87 JG |
752 | mem->bus.base = dev_priv->vram_start; |
753 | mem->bus.is_iomem = true; | |
754 | break; | |
755 | default: | |
756 | return -EINVAL; | |
757 | } | |
758 | return 0; | |
759 | } | |
760 | ||
761 | static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
762 | { | |
763 | } | |
764 | ||
765 | static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
766 | { | |
767 | return 0; | |
768 | } | |
769 | ||
fb1d9738 JB |
770 | /** |
771 | * FIXME: We're using the old vmware polling method to sync. | |
772 | * Do this with fences instead. | |
773 | */ | |
774 | ||
775 | static void *vmw_sync_obj_ref(void *sync_obj) | |
776 | { | |
ae2a1040 TH |
777 | |
778 | return (void *) | |
779 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); | |
fb1d9738 JB |
780 | } |
781 | ||
782 | static void vmw_sync_obj_unref(void **sync_obj) | |
783 | { | |
ae2a1040 | 784 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); |
fb1d9738 JB |
785 | } |
786 | ||
dedfdffd | 787 | static int vmw_sync_obj_flush(void *sync_obj) |
fb1d9738 | 788 | { |
ae2a1040 | 789 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); |
fb1d9738 JB |
790 | return 0; |
791 | } | |
792 | ||
dedfdffd | 793 | static bool vmw_sync_obj_signaled(void *sync_obj) |
fb1d9738 | 794 | { |
ae2a1040 | 795 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, |
be013367 | 796 | DRM_VMW_FENCE_FLAG_EXEC); |
fb1d9738 | 797 | |
fb1d9738 JB |
798 | } |
799 | ||
dedfdffd | 800 | static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
fb1d9738 | 801 | { |
ae2a1040 | 802 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, |
be013367 | 803 | DRM_VMW_FENCE_FLAG_EXEC, |
ae2a1040 TH |
804 | lazy, interruptible, |
805 | VMW_FENCE_WAIT_TIMEOUT); | |
fb1d9738 JB |
806 | } |
807 | ||
6da768aa TH |
808 | /** |
809 | * vmw_move_notify - TTM move_notify_callback | |
810 | * | |
811 | * @bo: The TTM buffer object about to move. | |
812 | * @mem: The truct ttm_mem_reg indicating to what memory | |
813 | * region the move is taking place. | |
814 | * | |
815 | * Calls move_notify for all subsystems needing it. | |
816 | * (currently only resources). | |
817 | */ | |
818 | static void vmw_move_notify(struct ttm_buffer_object *bo, | |
819 | struct ttm_mem_reg *mem) | |
820 | { | |
821 | vmw_resource_move_notify(bo, mem); | |
822 | } | |
823 | ||
824 | ||
825 | /** | |
826 | * vmw_swap_notify - TTM move_notify_callback | |
827 | * | |
828 | * @bo: The TTM buffer object about to be swapped out. | |
829 | */ | |
830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | |
831 | { | |
832 | struct ttm_bo_device *bdev = bo->bdev; | |
833 | ||
834 | spin_lock(&bdev->fence_lock); | |
835 | ttm_bo_wait(bo, false, false, false); | |
836 | spin_unlock(&bdev->fence_lock); | |
837 | } | |
838 | ||
839 | ||
fb1d9738 | 840 | struct ttm_bo_driver vmw_bo_driver = { |
649bf3ca | 841 | .ttm_tt_create = &vmw_ttm_tt_create, |
d92d9851 TH |
842 | .ttm_tt_populate = &vmw_ttm_populate, |
843 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, | |
fb1d9738 JB |
844 | .invalidate_caches = vmw_invalidate_caches, |
845 | .init_mem_type = vmw_init_mem_type, | |
846 | .evict_flags = vmw_evict_flags, | |
847 | .move = NULL, | |
848 | .verify_access = vmw_verify_access, | |
849 | .sync_obj_signaled = vmw_sync_obj_signaled, | |
850 | .sync_obj_wait = vmw_sync_obj_wait, | |
851 | .sync_obj_flush = vmw_sync_obj_flush, | |
852 | .sync_obj_unref = vmw_sync_obj_unref, | |
effe1105 | 853 | .sync_obj_ref = vmw_sync_obj_ref, |
6da768aa TH |
854 | .move_notify = vmw_move_notify, |
855 | .swap_notify = vmw_swap_notify, | |
96bf8b87 JG |
856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | |
858 | .io_mem_free = &vmw_ttm_io_mem_free, | |
fb1d9738 | 859 | }; |