Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "vmwgfx_drv.h" | |
760285e7 DH |
29 | #include <drm/ttm/ttm_bo_driver.h> |
30 | #include <drm/ttm/ttm_placement.h> | |
31 | #include <drm/ttm/ttm_page_alloc.h> | |
fb1d9738 JB |
32 | |
33 | static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | | |
34 | TTM_PL_FLAG_CACHED; | |
35 | ||
36 | static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |
37 | TTM_PL_FLAG_CACHED | | |
38 | TTM_PL_FLAG_NO_EVICT; | |
39 | ||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | |
41 | TTM_PL_FLAG_CACHED; | |
42 | ||
135cba0d TH |
43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
44 | TTM_PL_FLAG_CACHED; | |
45 | ||
d991ef03 JB |
46 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | |
47 | TTM_PL_FLAG_CACHED | | |
48 | TTM_PL_FLAG_NO_EVICT; | |
49 | ||
fb1d9738 JB |
50 | struct ttm_placement vmw_vram_placement = { |
51 | .fpfn = 0, | |
52 | .lpfn = 0, | |
53 | .num_placement = 1, | |
54 | .placement = &vram_placement_flags, | |
55 | .num_busy_placement = 1, | |
56 | .busy_placement = &vram_placement_flags | |
57 | }; | |
58 | ||
135cba0d TH |
59 | static uint32_t vram_gmr_placement_flags[] = { |
60 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | |
61 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
62 | }; | |
63 | ||
5bb39e81 TH |
64 | static uint32_t gmr_vram_placement_flags[] = { |
65 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, | |
66 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | |
67 | }; | |
68 | ||
135cba0d TH |
69 | struct ttm_placement vmw_vram_gmr_placement = { |
70 | .fpfn = 0, | |
71 | .lpfn = 0, | |
72 | .num_placement = 2, | |
73 | .placement = vram_gmr_placement_flags, | |
74 | .num_busy_placement = 1, | |
75 | .busy_placement = &gmr_placement_flags | |
76 | }; | |
77 | ||
d991ef03 JB |
78 | static uint32_t vram_gmr_ne_placement_flags[] = { |
79 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, | |
80 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | |
81 | }; | |
82 | ||
83 | struct ttm_placement vmw_vram_gmr_ne_placement = { | |
84 | .fpfn = 0, | |
85 | .lpfn = 0, | |
86 | .num_placement = 2, | |
87 | .placement = vram_gmr_ne_placement_flags, | |
88 | .num_busy_placement = 1, | |
89 | .busy_placement = &gmr_ne_placement_flags | |
90 | }; | |
91 | ||
8ba5152a TH |
92 | struct ttm_placement vmw_vram_sys_placement = { |
93 | .fpfn = 0, | |
94 | .lpfn = 0, | |
95 | .num_placement = 1, | |
96 | .placement = &vram_placement_flags, | |
97 | .num_busy_placement = 1, | |
98 | .busy_placement = &sys_placement_flags | |
99 | }; | |
100 | ||
fb1d9738 JB |
101 | struct ttm_placement vmw_vram_ne_placement = { |
102 | .fpfn = 0, | |
103 | .lpfn = 0, | |
104 | .num_placement = 1, | |
105 | .placement = &vram_ne_placement_flags, | |
106 | .num_busy_placement = 1, | |
107 | .busy_placement = &vram_ne_placement_flags | |
108 | }; | |
109 | ||
110 | struct ttm_placement vmw_sys_placement = { | |
111 | .fpfn = 0, | |
112 | .lpfn = 0, | |
113 | .num_placement = 1, | |
114 | .placement = &sys_placement_flags, | |
115 | .num_busy_placement = 1, | |
116 | .busy_placement = &sys_placement_flags | |
117 | }; | |
118 | ||
d991ef03 JB |
119 | static uint32_t evictable_placement_flags[] = { |
120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | |
121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | |
122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
123 | }; | |
124 | ||
125 | struct ttm_placement vmw_evictable_placement = { | |
126 | .fpfn = 0, | |
127 | .lpfn = 0, | |
128 | .num_placement = 3, | |
129 | .placement = evictable_placement_flags, | |
130 | .num_busy_placement = 1, | |
131 | .busy_placement = &sys_placement_flags | |
132 | }; | |
133 | ||
5bb39e81 TH |
134 | struct ttm_placement vmw_srf_placement = { |
135 | .fpfn = 0, | |
136 | .lpfn = 0, | |
137 | .num_placement = 1, | |
138 | .num_busy_placement = 2, | |
139 | .placement = &gmr_placement_flags, | |
140 | .busy_placement = gmr_vram_placement_flags | |
141 | }; | |
142 | ||
649bf3ca | 143 | struct vmw_ttm_tt { |
d92d9851 | 144 | struct ttm_dma_tt dma_ttm; |
135cba0d TH |
145 | struct vmw_private *dev_priv; |
146 | int gmr_id; | |
d92d9851 TH |
147 | struct sg_table sgt; |
148 | struct vmw_sg_table vsgt; | |
149 | uint64_t sg_alloc_size; | |
150 | bool mapped; | |
fb1d9738 JB |
151 | }; |
152 | ||
d92d9851 TH |
153 | /** |
154 | * Helper functions to advance a struct vmw_piter iterator. | |
155 | * | |
156 | * @viter: Pointer to the iterator. | |
157 | * | |
158 | * These functions return false if past the end of the list, | |
159 | * true otherwise. Functions are selected depending on the current | |
160 | * DMA mapping mode. | |
161 | */ | |
162 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) | |
163 | { | |
164 | return ++(viter->i) < viter->num_pages; | |
165 | } | |
166 | ||
167 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) | |
168 | { | |
169 | return __sg_page_iter_next(&viter->iter); | |
170 | } | |
171 | ||
172 | ||
173 | /** | |
174 | * Helper functions to return a pointer to the current page. | |
175 | * | |
176 | * @viter: Pointer to the iterator | |
177 | * | |
178 | * These functions return a pointer to the page currently | |
179 | * pointed to by @viter. Functions are selected depending on the | |
180 | * current mapping mode. | |
181 | */ | |
182 | static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) | |
183 | { | |
184 | return viter->pages[viter->i]; | |
185 | } | |
186 | ||
187 | static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) | |
188 | { | |
189 | return sg_page_iter_page(&viter->iter); | |
190 | } | |
191 | ||
192 | ||
193 | /** | |
194 | * Helper functions to return the DMA address of the current page. | |
195 | * | |
196 | * @viter: Pointer to the iterator | |
197 | * | |
198 | * These functions return the DMA address of the page currently | |
199 | * pointed to by @viter. Functions are selected depending on the | |
200 | * current mapping mode. | |
201 | */ | |
202 | static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) | |
203 | { | |
204 | return page_to_phys(viter->pages[viter->i]); | |
205 | } | |
206 | ||
207 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) | |
208 | { | |
209 | return viter->addrs[viter->i]; | |
210 | } | |
211 | ||
212 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) | |
213 | { | |
214 | return sg_page_iter_dma_address(&viter->iter); | |
215 | } | |
216 | ||
217 | ||
218 | /** | |
219 | * vmw_piter_start - Initialize a struct vmw_piter. | |
220 | * | |
221 | * @viter: Pointer to the iterator to initialize | |
222 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from | |
223 | * | |
224 | * Note that we're following the convention of __sg_page_iter_start, so that | |
225 | * the iterator doesn't point to a valid page after initialization; it has | |
226 | * to be advanced one step first. | |
227 | */ | |
228 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |
229 | unsigned long p_offset) | |
230 | { | |
231 | viter->i = p_offset - 1; | |
232 | viter->num_pages = vsgt->num_pages; | |
233 | switch (vsgt->mode) { | |
234 | case vmw_dma_phys: | |
235 | viter->next = &__vmw_piter_non_sg_next; | |
236 | viter->dma_address = &__vmw_piter_phys_addr; | |
237 | viter->page = &__vmw_piter_non_sg_page; | |
238 | viter->pages = vsgt->pages; | |
239 | break; | |
240 | case vmw_dma_alloc_coherent: | |
241 | viter->next = &__vmw_piter_non_sg_next; | |
242 | viter->dma_address = &__vmw_piter_dma_addr; | |
243 | viter->page = &__vmw_piter_non_sg_page; | |
244 | viter->addrs = vsgt->addrs; | |
245 | break; | |
246 | case vmw_dma_map_populate: | |
247 | case vmw_dma_map_bind: | |
248 | viter->next = &__vmw_piter_sg_next; | |
249 | viter->dma_address = &__vmw_piter_sg_addr; | |
250 | viter->page = &__vmw_piter_sg_page; | |
251 | __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, | |
252 | vsgt->sgt->orig_nents, p_offset); | |
253 | break; | |
254 | default: | |
255 | BUG(); | |
256 | } | |
257 | } | |
258 | ||
259 | /** | |
260 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for | |
261 | * TTM pages | |
262 | * | |
263 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
264 | * | |
265 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. | |
266 | */ | |
267 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) | |
268 | { | |
269 | struct device *dev = vmw_tt->dev_priv->dev->dev; | |
270 | ||
271 | dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, | |
272 | DMA_BIDIRECTIONAL); | |
273 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; | |
274 | } | |
275 | ||
276 | /** | |
277 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses | |
278 | * | |
279 | * @vmw_tt: Pointer to a struct vmw_ttm_backend | |
280 | * | |
281 | * This function is used to get device addresses from the kernel DMA layer. | |
282 | * However, it's violating the DMA API in that when this operation has been | |
283 | * performed, it's illegal for the CPU to write to the pages without first | |
284 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is | |
285 | * therefore only legal to call this function if we know that the function | |
286 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most | |
287 | * a CPU write buffer flush. | |
288 | */ | |
289 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) | |
290 | { | |
291 | struct device *dev = vmw_tt->dev_priv->dev->dev; | |
292 | int ret; | |
293 | ||
294 | ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, | |
295 | DMA_BIDIRECTIONAL); | |
296 | if (unlikely(ret == 0)) | |
297 | return -ENOMEM; | |
298 | ||
299 | vmw_tt->sgt.nents = ret; | |
300 | ||
301 | return 0; | |
302 | } | |
303 | ||
304 | /** | |
305 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device | |
306 | * | |
307 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
308 | * | |
309 | * Select the correct function for and make sure the TTM pages are | |
310 | * visible to the device. Allocate storage for the device mappings. | |
311 | * If a mapping has already been performed, indicated by the storage | |
312 | * pointer being non NULL, the function returns success. | |
313 | */ | |
314 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) | |
315 | { | |
316 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
317 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
318 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; | |
319 | struct vmw_piter iter; | |
320 | dma_addr_t old; | |
321 | int ret = 0; | |
322 | static size_t sgl_size; | |
323 | static size_t sgt_size; | |
324 | ||
325 | if (vmw_tt->mapped) | |
326 | return 0; | |
327 | ||
328 | vsgt->mode = dev_priv->map_mode; | |
329 | vsgt->pages = vmw_tt->dma_ttm.ttm.pages; | |
330 | vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; | |
331 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; | |
332 | vsgt->sgt = &vmw_tt->sgt; | |
333 | ||
334 | switch (dev_priv->map_mode) { | |
335 | case vmw_dma_map_bind: | |
336 | case vmw_dma_map_populate: | |
337 | if (unlikely(!sgl_size)) { | |
338 | sgl_size = ttm_round_pot(sizeof(struct scatterlist)); | |
339 | sgt_size = ttm_round_pot(sizeof(struct sg_table)); | |
340 | } | |
341 | vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; | |
342 | ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, | |
343 | true); | |
344 | if (unlikely(ret != 0)) | |
345 | return ret; | |
346 | ||
347 | ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, | |
348 | vsgt->num_pages, 0, | |
349 | (unsigned long) | |
350 | vsgt->num_pages << PAGE_SHIFT, | |
351 | GFP_KERNEL); | |
352 | if (unlikely(ret != 0)) | |
353 | goto out_sg_alloc_fail; | |
354 | ||
355 | if (vsgt->num_pages > vmw_tt->sgt.nents) { | |
356 | uint64_t over_alloc = | |
357 | sgl_size * (vsgt->num_pages - | |
358 | vmw_tt->sgt.nents); | |
359 | ||
360 | ttm_mem_global_free(glob, over_alloc); | |
361 | vmw_tt->sg_alloc_size -= over_alloc; | |
362 | } | |
363 | ||
364 | ret = vmw_ttm_map_for_dma(vmw_tt); | |
365 | if (unlikely(ret != 0)) | |
366 | goto out_map_fail; | |
367 | ||
368 | break; | |
369 | default: | |
370 | break; | |
371 | } | |
372 | ||
373 | old = ~((dma_addr_t) 0); | |
374 | vmw_tt->vsgt.num_regions = 0; | |
375 | for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { | |
376 | dma_addr_t cur = vmw_piter_dma_addr(&iter); | |
377 | ||
378 | if (cur != old + PAGE_SIZE) | |
379 | vmw_tt->vsgt.num_regions++; | |
380 | old = cur; | |
381 | } | |
382 | ||
383 | vmw_tt->mapped = true; | |
384 | return 0; | |
385 | ||
386 | out_map_fail: | |
387 | sg_free_table(vmw_tt->vsgt.sgt); | |
388 | vmw_tt->vsgt.sgt = NULL; | |
389 | out_sg_alloc_fail: | |
390 | ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); | |
391 | return ret; | |
392 | } | |
393 | ||
394 | /** | |
395 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings | |
396 | * | |
397 | * @vmw_tt: Pointer to a struct vmw_ttm_tt | |
398 | * | |
399 | * Tear down any previously set up device DMA mappings and free | |
400 | * any storage space allocated for them. If there are no mappings set up, | |
401 | * this function is a NOP. | |
402 | */ | |
403 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |
404 | { | |
405 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
406 | ||
407 | if (!vmw_tt->vsgt.sgt) | |
408 | return; | |
409 | ||
410 | switch (dev_priv->map_mode) { | |
411 | case vmw_dma_map_bind: | |
412 | case vmw_dma_map_populate: | |
413 | vmw_ttm_unmap_from_dma(vmw_tt); | |
414 | sg_free_table(vmw_tt->vsgt.sgt); | |
415 | vmw_tt->vsgt.sgt = NULL; | |
416 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | |
417 | vmw_tt->sg_alloc_size); | |
418 | break; | |
419 | default: | |
420 | break; | |
421 | } | |
422 | vmw_tt->mapped = false; | |
423 | } | |
424 | ||
649bf3ca | 425 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
fb1d9738 | 426 | { |
d92d9851 TH |
427 | struct vmw_ttm_tt *vmw_be = |
428 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
429 | int ret; | |
430 | ||
431 | ret = vmw_ttm_map_dma(vmw_be); | |
432 | if (unlikely(ret != 0)) | |
433 | return ret; | |
135cba0d TH |
434 | |
435 | vmw_be->gmr_id = bo_mem->start; | |
436 | ||
d92d9851 | 437 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
649bf3ca | 438 | ttm->num_pages, vmw_be->gmr_id); |
fb1d9738 JB |
439 | } |
440 | ||
649bf3ca | 441 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
fb1d9738 | 442 | { |
d92d9851 TH |
443 | struct vmw_ttm_tt *vmw_be = |
444 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
135cba0d TH |
445 | |
446 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | |
d92d9851 TH |
447 | |
448 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | |
449 | vmw_ttm_unmap_dma(vmw_be); | |
450 | ||
fb1d9738 JB |
451 | return 0; |
452 | } | |
453 | ||
649bf3ca | 454 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
fb1d9738 | 455 | { |
d92d9851 TH |
456 | struct vmw_ttm_tt *vmw_be = |
457 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
458 | ||
459 | vmw_ttm_unmap_dma(vmw_be); | |
460 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | |
461 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | |
462 | else | |
463 | ttm_tt_fini(ttm); | |
fb1d9738 JB |
464 | kfree(vmw_be); |
465 | } | |
466 | ||
d92d9851 TH |
467 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
468 | { | |
469 | struct vmw_ttm_tt *vmw_tt = | |
470 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | |
471 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
472 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
473 | int ret; | |
474 | ||
475 | if (ttm->state != tt_unpopulated) | |
476 | return 0; | |
477 | ||
478 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | |
479 | size_t size = | |
480 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); | |
481 | ret = ttm_mem_global_alloc(glob, size, false, true); | |
482 | if (unlikely(ret != 0)) | |
483 | return ret; | |
484 | ||
485 | ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); | |
486 | if (unlikely(ret != 0)) | |
487 | ttm_mem_global_free(glob, size); | |
488 | } else | |
489 | ret = ttm_pool_populate(ttm); | |
490 | ||
491 | return ret; | |
492 | } | |
493 | ||
494 | static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |
495 | { | |
496 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, | |
497 | dma_ttm.ttm); | |
498 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | |
499 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | |
500 | ||
501 | vmw_ttm_unmap_dma(vmw_tt); | |
502 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | |
503 | size_t size = | |
504 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); | |
505 | ||
506 | ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); | |
507 | ttm_mem_global_free(glob, size); | |
508 | } else | |
509 | ttm_pool_unpopulate(ttm); | |
510 | } | |
511 | ||
fb1d9738 | 512 | static struct ttm_backend_func vmw_ttm_func = { |
fb1d9738 JB |
513 | .bind = vmw_ttm_bind, |
514 | .unbind = vmw_ttm_unbind, | |
515 | .destroy = vmw_ttm_destroy, | |
516 | }; | |
517 | ||
649bf3ca JG |
518 | struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
519 | unsigned long size, uint32_t page_flags, | |
520 | struct page *dummy_read_page) | |
fb1d9738 | 521 | { |
649bf3ca | 522 | struct vmw_ttm_tt *vmw_be; |
d92d9851 | 523 | int ret; |
fb1d9738 | 524 | |
d92d9851 | 525 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
fb1d9738 JB |
526 | if (!vmw_be) |
527 | return NULL; | |
528 | ||
d92d9851 | 529 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
135cba0d | 530 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
fb1d9738 | 531 | |
d92d9851 TH |
532 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
533 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | |
534 | dummy_read_page); | |
535 | else | |
536 | ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, | |
537 | dummy_read_page); | |
538 | if (unlikely(ret != 0)) | |
539 | goto out_no_init; | |
540 | ||
541 | return &vmw_be->dma_ttm.ttm; | |
542 | out_no_init: | |
543 | kfree(vmw_be); | |
544 | return NULL; | |
fb1d9738 JB |
545 | } |
546 | ||
547 | int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
548 | { | |
549 | return 0; | |
550 | } | |
551 | ||
552 | int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
553 | struct ttm_mem_type_manager *man) | |
554 | { | |
fb1d9738 JB |
555 | switch (type) { |
556 | case TTM_PL_SYSTEM: | |
557 | /* System memory */ | |
558 | ||
559 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
135cba0d | 560 | man->available_caching = TTM_PL_FLAG_CACHED; |
fb1d9738 JB |
561 | man->default_caching = TTM_PL_FLAG_CACHED; |
562 | break; | |
563 | case TTM_PL_VRAM: | |
564 | /* "On-card" video ram */ | |
d961db75 | 565 | man->func = &ttm_bo_manager_func; |
fb1d9738 | 566 | man->gpu_offset = 0; |
96bf8b87 | 567 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
135cba0d TH |
568 | man->available_caching = TTM_PL_FLAG_CACHED; |
569 | man->default_caching = TTM_PL_FLAG_CACHED; | |
570 | break; | |
571 | case VMW_PL_GMR: | |
572 | /* | |
573 | * "Guest Memory Regions" is an aperture like feature with | |
574 | * one slot per bo. There is an upper limit of the number of | |
575 | * slots as well as the bo size. | |
576 | */ | |
577 | man->func = &vmw_gmrid_manager_func; | |
578 | man->gpu_offset = 0; | |
579 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; | |
580 | man->available_caching = TTM_PL_FLAG_CACHED; | |
581 | man->default_caching = TTM_PL_FLAG_CACHED; | |
fb1d9738 JB |
582 | break; |
583 | default: | |
584 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
585 | return -EINVAL; | |
586 | } | |
587 | return 0; | |
588 | } | |
589 | ||
590 | void vmw_evict_flags(struct ttm_buffer_object *bo, | |
591 | struct ttm_placement *placement) | |
592 | { | |
593 | *placement = vmw_sys_placement; | |
594 | } | |
595 | ||
fb1d9738 JB |
596 | static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
597 | { | |
d08a9b9c TH |
598 | struct ttm_object_file *tfile = |
599 | vmw_fpriv((struct drm_file *)filp->private_data)->tfile; | |
600 | ||
601 | return vmw_user_dmabuf_verify_access(bo, tfile); | |
fb1d9738 JB |
602 | } |
603 | ||
96bf8b87 JG |
604 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
605 | { | |
606 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
607 | struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); | |
608 | ||
609 | mem->bus.addr = NULL; | |
610 | mem->bus.is_iomem = false; | |
611 | mem->bus.offset = 0; | |
612 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
613 | mem->bus.base = 0; | |
614 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
615 | return -EINVAL; | |
616 | switch (mem->mem_type) { | |
617 | case TTM_PL_SYSTEM: | |
135cba0d | 618 | case VMW_PL_GMR: |
96bf8b87 JG |
619 | return 0; |
620 | case TTM_PL_VRAM: | |
d961db75 | 621 | mem->bus.offset = mem->start << PAGE_SHIFT; |
96bf8b87 JG |
622 | mem->bus.base = dev_priv->vram_start; |
623 | mem->bus.is_iomem = true; | |
624 | break; | |
625 | default: | |
626 | return -EINVAL; | |
627 | } | |
628 | return 0; | |
629 | } | |
630 | ||
631 | static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
632 | { | |
633 | } | |
634 | ||
635 | static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
636 | { | |
637 | return 0; | |
638 | } | |
639 | ||
fb1d9738 JB |
640 | /** |
641 | * FIXME: We're using the old vmware polling method to sync. | |
642 | * Do this with fences instead. | |
643 | */ | |
644 | ||
645 | static void *vmw_sync_obj_ref(void *sync_obj) | |
646 | { | |
ae2a1040 TH |
647 | |
648 | return (void *) | |
649 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); | |
fb1d9738 JB |
650 | } |
651 | ||
652 | static void vmw_sync_obj_unref(void **sync_obj) | |
653 | { | |
ae2a1040 | 654 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); |
fb1d9738 JB |
655 | } |
656 | ||
dedfdffd | 657 | static int vmw_sync_obj_flush(void *sync_obj) |
fb1d9738 | 658 | { |
ae2a1040 | 659 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); |
fb1d9738 JB |
660 | return 0; |
661 | } | |
662 | ||
dedfdffd | 663 | static bool vmw_sync_obj_signaled(void *sync_obj) |
fb1d9738 | 664 | { |
ae2a1040 | 665 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, |
be013367 | 666 | DRM_VMW_FENCE_FLAG_EXEC); |
fb1d9738 | 667 | |
fb1d9738 JB |
668 | } |
669 | ||
dedfdffd | 670 | static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
fb1d9738 | 671 | { |
ae2a1040 | 672 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, |
be013367 | 673 | DRM_VMW_FENCE_FLAG_EXEC, |
ae2a1040 TH |
674 | lazy, interruptible, |
675 | VMW_FENCE_WAIT_TIMEOUT); | |
fb1d9738 JB |
676 | } |
677 | ||
678 | struct ttm_bo_driver vmw_bo_driver = { | |
649bf3ca | 679 | .ttm_tt_create = &vmw_ttm_tt_create, |
d92d9851 TH |
680 | .ttm_tt_populate = &vmw_ttm_populate, |
681 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, | |
fb1d9738 JB |
682 | .invalidate_caches = vmw_invalidate_caches, |
683 | .init_mem_type = vmw_init_mem_type, | |
684 | .evict_flags = vmw_evict_flags, | |
685 | .move = NULL, | |
686 | .verify_access = vmw_verify_access, | |
687 | .sync_obj_signaled = vmw_sync_obj_signaled, | |
688 | .sync_obj_wait = vmw_sync_obj_wait, | |
689 | .sync_obj_flush = vmw_sync_obj_flush, | |
690 | .sync_obj_unref = vmw_sync_obj_unref, | |
effe1105 | 691 | .sync_obj_ref = vmw_sync_obj_ref, |
135cba0d TH |
692 | .move_notify = NULL, |
693 | .swap_notify = NULL, | |
96bf8b87 JG |
694 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
695 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | |
696 | .io_mem_free = &vmw_ttm_io_mem_free, | |
fb1d9738 | 697 | }; |