Commit | Line | Data |
---|---|---|
1297bf2e | 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
ba4e7d97 TH |
2 | /************************************************************************** |
3 | * | |
4 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | /* | |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
30 | */ | |
31 | ||
25d0479a JP |
32 | #define pr_fmt(fmt) "[TTM] " fmt |
33 | ||
2da83319 MY |
34 | #include <drm/ttm/ttm_module.h> |
35 | #include <drm/ttm/ttm_bo_driver.h> | |
36 | #include <drm/ttm/ttm_placement.h> | |
72525b3f | 37 | #include <drm/drm_vma_manager.h> |
ba4e7d97 | 38 | #include <linux/mm.h> |
01c8f1c4 | 39 | #include <linux/pfn_t.h> |
ba4e7d97 TH |
40 | #include <linux/rbtree.h> |
41 | #include <linux/module.h> | |
42 | #include <linux/uaccess.h> | |
95cf9264 | 43 | #include <linux/mem_encrypt.h> |
ba4e7d97 | 44 | |
4daa4fba | 45 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
cbe12e74 TH |
46 | struct vm_fault *vmf) |
47 | { | |
4daa4fba SJ |
48 | vm_fault_t ret = 0; |
49 | int err = 0; | |
cbe12e74 | 50 | |
5bc73067 | 51 | if (likely(!bo->moving)) |
cbe12e74 TH |
52 | goto out_unlock; |
53 | ||
54 | /* | |
55 | * Quick non-stalling check for idle. | |
56 | */ | |
f54d1867 | 57 | if (dma_fence_is_signaled(bo->moving)) |
5bc73067 | 58 | goto out_clear; |
cbe12e74 TH |
59 | |
60 | /* | |
c1e8d7c6 | 61 | * If possible, avoid waiting for GPU with mmap_lock |
4064b982 PX |
62 | * held. We only do this if the fault allows retry and this |
63 | * is the first attempt. | |
cbe12e74 | 64 | */ |
4064b982 | 65 | if (fault_flag_allow_retry_first(vmf->flags)) { |
cbe12e74 TH |
66 | ret = VM_FAULT_RETRY; |
67 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | |
68 | goto out_unlock; | |
69 | ||
8129fdad | 70 | ttm_bo_get(bo); |
d8ed45c5 | 71 | mmap_read_unlock(vmf->vma->vm_mm); |
f54d1867 | 72 | (void) dma_fence_wait(bo->moving, true); |
52791eee | 73 | dma_resv_unlock(bo->base.resv); |
f4490759 | 74 | ttm_bo_put(bo); |
cbe12e74 TH |
75 | goto out_unlock; |
76 | } | |
77 | ||
78 | /* | |
79 | * Ordinary wait. | |
80 | */ | |
4daa4fba SJ |
81 | err = dma_fence_wait(bo->moving, true); |
82 | if (unlikely(err != 0)) { | |
83 | ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | |
cbe12e74 | 84 | VM_FAULT_NOPAGE; |
5bc73067 CK |
85 | goto out_unlock; |
86 | } | |
87 | ||
88 | out_clear: | |
f54d1867 | 89 | dma_fence_put(bo->moving); |
5bc73067 | 90 | bo->moving = NULL; |
cbe12e74 TH |
91 | |
92 | out_unlock: | |
cbe12e74 TH |
93 | return ret; |
94 | } | |
95 | ||
c67fa6ed TX |
96 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, |
97 | unsigned long page_offset) | |
98 | { | |
99 | struct ttm_bo_device *bdev = bo->bdev; | |
100 | ||
101 | if (bdev->driver->io_mem_pfn) | |
102 | return bdev->driver->io_mem_pfn(bo, page_offset); | |
103 | ||
e83bf4ad TX |
104 | return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) |
105 | + page_offset; | |
c67fa6ed TX |
106 | } |
107 | ||
7aef29f4 TH |
108 | /** |
109 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback | |
110 | * @bo: The buffer object | |
111 | * @vmf: The fault structure handed to the callback | |
112 | * | |
113 | * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped | |
114 | * during long waits, and after the wait the callback will be restarted. This | |
115 | * is to allow other threads using the same virtual memory space concurrent | |
116 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer | |
117 | * object reservations sometimes wait for GPU and should therefore be | |
118 | * considered long waits. This function reserves the buffer object interruptibly | |
119 | * taking this into account. Starvation is avoided by the vm system not | |
120 | * allowing too many repeated restarts. | |
121 | * This function is intended to be used in customized fault() and _mkwrite() | |
122 | * handlers. | |
123 | * | |
124 | * Return: | |
125 | * 0 on success and the bo was reserved. | |
126 | * VM_FAULT_RETRY if blocking wait. | |
127 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. | |
128 | */ | |
129 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, | |
130 | struct vm_fault *vmf) | |
ba4e7d97 | 131 | { |
ba4e7d97 TH |
132 | /* |
133 | * Work around locking order reversal in fault / nopfn | |
c1e8d7c6 | 134 | * between mmap_lock and bo_reserve: Perform a trylock operation |
c58f009e TH |
135 | * for reserve, and if it fails, retry the fault after waiting |
136 | * for the buffer to become unreserved. | |
ba4e7d97 | 137 | */ |
52791eee | 138 | if (unlikely(!dma_resv_trylock(bo->base.resv))) { |
4064b982 PX |
139 | /* |
140 | * If the fault allows retry and this is the first | |
c1e8d7c6 | 141 | * fault attempt, we try to release the mmap_lock |
4064b982 PX |
142 | * before waiting |
143 | */ | |
144 | if (fault_flag_allow_retry_first(vmf->flags)) { | |
c58f009e | 145 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
8129fdad | 146 | ttm_bo_get(bo); |
d8ed45c5 | 147 | mmap_read_unlock(vmf->vma->vm_mm); |
6b1ce0a2 DV |
148 | if (!dma_resv_lock_interruptible(bo->base.resv, |
149 | NULL)) | |
150 | dma_resv_unlock(bo->base.resv); | |
f4490759 | 151 | ttm_bo_put(bo); |
c58f009e TH |
152 | } |
153 | ||
154 | return VM_FAULT_RETRY; | |
155 | } | |
156 | ||
6b1ce0a2 DV |
157 | if (dma_resv_lock_interruptible(bo->base.resv, NULL)) |
158 | return VM_FAULT_NOPAGE; | |
ba4e7d97 TH |
159 | } |
160 | ||
7aef29f4 TH |
161 | return 0; |
162 | } | |
163 | EXPORT_SYMBOL(ttm_bo_vm_reserve); | |
164 | ||
314b6580 THV |
165 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
166 | /** | |
167 | * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults | |
168 | * @vmf: Fault data | |
169 | * @bo: The buffer object | |
170 | * @page_offset: Page offset from bo start | |
171 | * @fault_page_size: The size of the fault in pages. | |
172 | * @pgprot: The page protections. | |
173 | * Does additional checking whether it's possible to insert a PUD or PMD | |
174 | * pfn and performs the insertion. | |
175 | * | |
176 | * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if | |
177 | * a huge fault was not possible, or on insertion error. | |
178 | */ | |
179 | static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, | |
180 | struct ttm_buffer_object *bo, | |
181 | pgoff_t page_offset, | |
182 | pgoff_t fault_page_size, | |
183 | pgprot_t pgprot) | |
184 | { | |
185 | pgoff_t i; | |
186 | vm_fault_t ret; | |
187 | unsigned long pfn; | |
188 | pfn_t pfnt; | |
189 | struct ttm_tt *ttm = bo->ttm; | |
190 | bool write = vmf->flags & FAULT_FLAG_WRITE; | |
191 | ||
192 | /* Fault should not cross bo boundary. */ | |
193 | page_offset &= ~(fault_page_size - 1); | |
194 | if (page_offset + fault_page_size > bo->num_pages) | |
195 | goto out_fallback; | |
196 | ||
197 | if (bo->mem.bus.is_iomem) | |
198 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); | |
199 | else | |
200 | pfn = page_to_pfn(ttm->pages[page_offset]); | |
201 | ||
202 | /* pfn must be fault_page_size aligned. */ | |
203 | if ((pfn & (fault_page_size - 1)) != 0) | |
204 | goto out_fallback; | |
205 | ||
206 | /* Check that memory is contiguous. */ | |
207 | if (!bo->mem.bus.is_iomem) { | |
208 | for (i = 1; i < fault_page_size; ++i) { | |
209 | if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) | |
210 | goto out_fallback; | |
211 | } | |
212 | } else if (bo->bdev->driver->io_mem_pfn) { | |
213 | for (i = 1; i < fault_page_size; ++i) { | |
214 | if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) | |
215 | goto out_fallback; | |
216 | } | |
217 | } | |
218 | ||
219 | pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); | |
220 | if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) | |
221 | ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); | |
222 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
223 | else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) | |
224 | ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); | |
225 | #endif | |
226 | else | |
227 | WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); | |
228 | ||
229 | if (ret != VM_FAULT_NOPAGE) | |
230 | goto out_fallback; | |
231 | ||
232 | return VM_FAULT_NOPAGE; | |
233 | out_fallback: | |
234 | count_vm_event(THP_FAULT_FALLBACK); | |
235 | return VM_FAULT_FALLBACK; | |
236 | } | |
237 | #else | |
238 | static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, | |
239 | struct ttm_buffer_object *bo, | |
240 | pgoff_t page_offset, | |
241 | pgoff_t fault_page_size, | |
242 | pgprot_t pgprot) | |
243 | { | |
244 | return VM_FAULT_FALLBACK; | |
245 | } | |
246 | #endif | |
247 | ||
7aef29f4 TH |
248 | /** |
249 | * ttm_bo_vm_fault_reserved - TTM fault helper | |
250 | * @vmf: The struct vm_fault given as argument to the fault callback | |
251 | * @prot: The page protection to be used for this memory area. | |
252 | * @num_prefault: Maximum number of prefault pages. The caller may want to | |
253 | * specify this based on madvice settings and the size of the GPU object | |
254 | * backed by the memory. | |
314b6580 | 255 | * @fault_page_size: The size of the fault in pages. |
7aef29f4 TH |
256 | * |
257 | * This function inserts one or more page table entries pointing to the | |
258 | * memory backing the buffer object, and then returns a return code | |
259 | * instructing the caller to retry the page access. | |
260 | * | |
261 | * Return: | |
262 | * VM_FAULT_NOPAGE on success or pending signal | |
263 | * VM_FAULT_SIGBUS on unspecified error | |
264 | * VM_FAULT_OOM on out-of-memory | |
265 | * VM_FAULT_RETRY if retryable wait | |
266 | */ | |
267 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |
268 | pgprot_t prot, | |
314b6580 THV |
269 | pgoff_t num_prefault, |
270 | pgoff_t fault_page_size) | |
7aef29f4 TH |
271 | { |
272 | struct vm_area_struct *vma = vmf->vma; | |
7aef29f4 TH |
273 | struct ttm_buffer_object *bo = vma->vm_private_data; |
274 | struct ttm_bo_device *bdev = bo->bdev; | |
275 | unsigned long page_offset; | |
276 | unsigned long page_last; | |
277 | unsigned long pfn; | |
278 | struct ttm_tt *ttm = NULL; | |
279 | struct page *page; | |
280 | int err; | |
281 | pgoff_t i; | |
282 | vm_fault_t ret = VM_FAULT_NOPAGE; | |
283 | unsigned long address = vmf->address; | |
284 | struct ttm_mem_type_manager *man = | |
285 | &bdev->man[bo->mem.mem_type]; | |
286 | ||
667a50db TH |
287 | /* |
288 | * Refuse to fault imported pages. This should be handled | |
289 | * (if at all) by redirecting mmap to the exporter. | |
290 | */ | |
7aef29f4 TH |
291 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) |
292 | return VM_FAULT_SIGBUS; | |
667a50db | 293 | |
82c5da6b | 294 | if (bdev->driver->fault_reserve_notify) { |
5d50fcbd CK |
295 | struct dma_fence *moving = dma_fence_get(bo->moving); |
296 | ||
4daa4fba SJ |
297 | err = bdev->driver->fault_reserve_notify(bo); |
298 | switch (err) { | |
82c5da6b JG |
299 | case 0: |
300 | break; | |
301 | case -EBUSY: | |
82c5da6b | 302 | case -ERESTARTSYS: |
37cc4b95 | 303 | dma_fence_put(moving); |
7aef29f4 | 304 | return VM_FAULT_NOPAGE; |
82c5da6b | 305 | default: |
37cc4b95 | 306 | dma_fence_put(moving); |
7aef29f4 | 307 | return VM_FAULT_SIGBUS; |
82c5da6b | 308 | } |
5d50fcbd CK |
309 | |
310 | if (bo->moving != moving) { | |
97588b5b | 311 | spin_lock(&ttm_bo_glob.lru_lock); |
5d50fcbd | 312 | ttm_bo_move_to_lru_tail(bo, NULL); |
97588b5b | 313 | spin_unlock(&ttm_bo_glob.lru_lock); |
5d50fcbd CK |
314 | } |
315 | dma_fence_put(moving); | |
82c5da6b | 316 | } |
e024e110 | 317 | |
ba4e7d97 TH |
318 | /* |
319 | * Wait for buffer data in transit, due to a pipelined | |
320 | * move. | |
321 | */ | |
11bac800 | 322 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
7aef29f4 TH |
323 | if (unlikely(ret != 0)) |
324 | return ret; | |
ba4e7d97 | 325 | |
4daa4fba | 326 | err = ttm_mem_io_lock(man, true); |
7aef29f4 TH |
327 | if (unlikely(err != 0)) |
328 | return VM_FAULT_NOPAGE; | |
4daa4fba SJ |
329 | err = ttm_mem_io_reserve_vm(bo); |
330 | if (unlikely(err != 0)) { | |
de8dfb8e | 331 | ret = VM_FAULT_SIGBUS; |
eba67093 TH |
332 | goto out_io_unlock; |
333 | } | |
ba4e7d97 | 334 | |
ba4e7d97 | 335 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
b96f3e7c | 336 | vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); |
d3867355 | 337 | page_last = vma_pages(vma) + vma->vm_pgoff - |
b96f3e7c | 338 | drm_vma_node_start(&bo->base.vma_node); |
ba4e7d97 TH |
339 | |
340 | if (unlikely(page_offset >= bo->num_pages)) { | |
de8dfb8e | 341 | ret = VM_FAULT_SIGBUS; |
eba67093 | 342 | goto out_io_unlock; |
ba4e7d97 TH |
343 | } |
344 | ||
5379e4dd | 345 | prot = ttm_io_prot(bo->mem.placement, prot); |
7aef29f4 | 346 | if (!bo->mem.bus.is_iomem) { |
d0cef9fa RH |
347 | struct ttm_operation_ctx ctx = { |
348 | .interruptible = false, | |
aa7662b6 RH |
349 | .no_wait_gpu = false, |
350 | .flags = TTM_OPT_FLAG_FORCE_ALLOC | |
351 | ||
d0cef9fa RH |
352 | }; |
353 | ||
ba4e7d97 | 354 | ttm = bo->ttm; |
7aef29f4 | 355 | if (ttm_tt_populate(bo->ttm, &ctx)) { |
de8dfb8e | 356 | ret = VM_FAULT_OOM; |
b1e5f172 JG |
357 | goto out_io_unlock; |
358 | } | |
7aef29f4 TH |
359 | } else { |
360 | /* Iomem should not be marked encrypted */ | |
5379e4dd | 361 | prot = pgprot_decrypted(prot); |
ba4e7d97 TH |
362 | } |
363 | ||
314b6580 THV |
364 | /* We don't prefault on huge faults. Yet. */ |
365 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { | |
366 | ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, | |
367 | fault_page_size, prot); | |
368 | goto out_io_unlock; | |
369 | } | |
370 | ||
ba4e7d97 TH |
371 | /* |
372 | * Speculatively prefault a number of pages. Only error on | |
373 | * first page. | |
374 | */ | |
7aef29f4 | 375 | for (i = 0; i < num_prefault; ++i) { |
95cf9264 | 376 | if (bo->mem.bus.is_iomem) { |
c67fa6ed | 377 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
95cf9264 | 378 | } else { |
b1e5f172 | 379 | page = ttm->pages[page_offset]; |
ba4e7d97 | 380 | if (unlikely(!page && i == 0)) { |
de8dfb8e | 381 | ret = VM_FAULT_OOM; |
eba67093 | 382 | goto out_io_unlock; |
ba4e7d97 TH |
383 | } else if (unlikely(!page)) { |
384 | break; | |
385 | } | |
b96f3e7c | 386 | page->index = drm_vma_node_start(&bo->base.vma_node) + |
58aa6622 | 387 | page_offset; |
ba4e7d97 TH |
388 | pfn = page_to_pfn(page); |
389 | } | |
390 | ||
5379e4dd TH |
391 | /* |
392 | * Note that the value of @prot at this point may differ from | |
393 | * the value of @vma->vm_page_prot in the caching- and | |
394 | * encryption bits. This is because the exact location of the | |
395 | * data may not be known at mmap() time and may also change | |
396 | * at arbitrary times while the data is mmap'ed. | |
397 | * See vmf_insert_mixed_prot() for a discussion. | |
398 | */ | |
7dfe8b61 | 399 | if (vma->vm_flags & VM_MIXEDMAP) |
5379e4dd TH |
400 | ret = vmf_insert_mixed_prot(vma, address, |
401 | __pfn_to_pfn_t(pfn, PFN_DEV), | |
402 | prot); | |
7dfe8b61 | 403 | else |
5379e4dd | 404 | ret = vmf_insert_pfn_prot(vma, address, pfn, prot); |
7dfe8b61 | 405 | |
941f2f72 TH |
406 | /* Never error on prefaulted PTEs */ |
407 | if (unlikely((ret & VM_FAULT_ERROR))) { | |
408 | if (i == 0) | |
409 | goto out_io_unlock; | |
410 | else | |
411 | break; | |
412 | } | |
ba4e7d97 TH |
413 | |
414 | address += PAGE_SIZE; | |
415 | if (unlikely(++page_offset >= page_last)) | |
416 | break; | |
417 | } | |
de8dfb8e | 418 | ret = VM_FAULT_NOPAGE; |
eba67093 TH |
419 | out_io_unlock: |
420 | ttm_mem_io_unlock(man); | |
7aef29f4 TH |
421 | return ret; |
422 | } | |
423 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); | |
424 | ||
20c012b2 | 425 | vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
7aef29f4 TH |
426 | { |
427 | struct vm_area_struct *vma = vmf->vma; | |
428 | pgprot_t prot; | |
429 | struct ttm_buffer_object *bo = vma->vm_private_data; | |
430 | vm_fault_t ret; | |
431 | ||
432 | ret = ttm_bo_vm_reserve(bo, vmf); | |
433 | if (ret) | |
434 | return ret; | |
435 | ||
5379e4dd | 436 | prot = vma->vm_page_prot; |
314b6580 | 437 | ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); |
7aef29f4 TH |
438 | if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) |
439 | return ret; | |
440 | ||
52791eee | 441 | dma_resv_unlock(bo->base.resv); |
7aef29f4 | 442 | |
de8dfb8e | 443 | return ret; |
ba4e7d97 | 444 | } |
20c012b2 | 445 | EXPORT_SYMBOL(ttm_bo_vm_fault); |
ba4e7d97 | 446 | |
7aef29f4 | 447 | void ttm_bo_vm_open(struct vm_area_struct *vma) |
ba4e7d97 | 448 | { |
13f8a614 | 449 | struct ttm_buffer_object *bo = vma->vm_private_data; |
ba4e7d97 | 450 | |
58aa6622 TH |
451 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); |
452 | ||
8129fdad | 453 | ttm_bo_get(bo); |
ba4e7d97 | 454 | } |
7aef29f4 | 455 | EXPORT_SYMBOL(ttm_bo_vm_open); |
ba4e7d97 | 456 | |
7aef29f4 | 457 | void ttm_bo_vm_close(struct vm_area_struct *vma) |
ba4e7d97 | 458 | { |
13f8a614 | 459 | struct ttm_buffer_object *bo = vma->vm_private_data; |
ba4e7d97 | 460 | |
f4490759 | 461 | ttm_bo_put(bo); |
ba4e7d97 TH |
462 | vma->vm_private_data = NULL; |
463 | } | |
7aef29f4 | 464 | EXPORT_SYMBOL(ttm_bo_vm_close); |
ba4e7d97 | 465 | |
09ac4fcb FK |
466 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
467 | unsigned long offset, | |
95244db2 | 468 | uint8_t *buf, int len, int write) |
09ac4fcb FK |
469 | { |
470 | unsigned long page = offset >> PAGE_SHIFT; | |
471 | unsigned long bytes_left = len; | |
472 | int ret; | |
473 | ||
474 | /* Copy a page at a time, that way no extra virtual address | |
475 | * mapping is needed | |
476 | */ | |
477 | offset -= page << PAGE_SHIFT; | |
478 | do { | |
479 | unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); | |
480 | struct ttm_bo_kmap_obj map; | |
481 | void *ptr; | |
482 | bool is_iomem; | |
483 | ||
484 | ret = ttm_bo_kmap(bo, page, 1, &map); | |
485 | if (ret) | |
486 | return ret; | |
487 | ||
488 | ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; | |
489 | WARN_ON_ONCE(is_iomem); | |
490 | if (write) | |
491 | memcpy(ptr, buf, bytes); | |
492 | else | |
493 | memcpy(buf, ptr, bytes); | |
494 | ttm_bo_kunmap(&map); | |
495 | ||
496 | page++; | |
95244db2 | 497 | buf += bytes; |
09ac4fcb FK |
498 | bytes_left -= bytes; |
499 | offset = 0; | |
500 | } while (bytes_left); | |
501 | ||
502 | return len; | |
503 | } | |
504 | ||
20c012b2 CK |
505 | int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
506 | void *buf, int len, int write) | |
09ac4fcb | 507 | { |
09ac4fcb | 508 | struct ttm_buffer_object *bo = vma->vm_private_data; |
c0001213 FK |
509 | unsigned long offset = (addr) - vma->vm_start + |
510 | ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) | |
511 | << PAGE_SHIFT); | |
09ac4fcb FK |
512 | int ret; |
513 | ||
514 | if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) | |
515 | return -EIO; | |
516 | ||
517 | ret = ttm_bo_reserve(bo, true, false, NULL); | |
518 | if (ret) | |
519 | return ret; | |
520 | ||
521 | switch (bo->mem.mem_type) { | |
522 | case TTM_PL_SYSTEM: | |
523 | if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
524 | ret = ttm_tt_swapin(bo->ttm); | |
525 | if (unlikely(ret != 0)) | |
526 | return ret; | |
527 | } | |
df561f66 | 528 | fallthrough; |
09ac4fcb FK |
529 | case TTM_PL_TT: |
530 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); | |
531 | break; | |
532 | default: | |
533 | if (bo->bdev->driver->access_memory) | |
534 | ret = bo->bdev->driver->access_memory( | |
535 | bo, offset, buf, len, write); | |
536 | else | |
537 | ret = -EIO; | |
538 | } | |
539 | ||
540 | ttm_bo_unreserve(bo); | |
541 | ||
542 | return ret; | |
543 | } | |
20c012b2 | 544 | EXPORT_SYMBOL(ttm_bo_vm_access); |
09ac4fcb | 545 | |
f0f37e2f | 546 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
ba4e7d97 TH |
547 | .fault = ttm_bo_vm_fault, |
548 | .open = ttm_bo_vm_open, | |
09ac4fcb | 549 | .close = ttm_bo_vm_close, |
314b6580 | 550 | .access = ttm_bo_vm_access, |
ba4e7d97 TH |
551 | }; |
552 | ||
72525b3f DH |
553 | static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, |
554 | unsigned long offset, | |
555 | unsigned long pages) | |
556 | { | |
557 | struct drm_vma_offset_node *node; | |
558 | struct ttm_buffer_object *bo = NULL; | |
559 | ||
9d6f4484 | 560 | drm_vma_offset_lock_lookup(bdev->vma_manager); |
72525b3f | 561 | |
9d6f4484 | 562 | node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); |
72525b3f | 563 | if (likely(node)) { |
b96f3e7c GH |
564 | bo = container_of(node, struct ttm_buffer_object, |
565 | base.vma_node); | |
24dc64c1 | 566 | bo = ttm_bo_get_unless_zero(bo); |
72525b3f DH |
567 | } |
568 | ||
9d6f4484 | 569 | drm_vma_offset_unlock_lookup(bdev->vma_manager); |
72525b3f DH |
570 | |
571 | if (!bo) | |
572 | pr_err("Could not find buffer object to map\n"); | |
573 | ||
574 | return bo; | |
575 | } | |
576 | ||
24e25ea6 GH |
577 | static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma) |
578 | { | |
579 | vma->vm_ops = &ttm_bo_vm_ops; | |
580 | ||
581 | /* | |
582 | * Note: We're transferring the bo reference to | |
583 | * vma->vm_private_data here. | |
584 | */ | |
585 | ||
586 | vma->vm_private_data = bo; | |
587 | ||
588 | /* | |
589 | * We'd like to use VM_PFNMAP on shared mappings, where | |
590 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, | |
591 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very | |
592 | * bad for performance. Until that has been sorted out, use | |
593 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 | |
594 | */ | |
595 | vma->vm_flags |= VM_MIXEDMAP; | |
596 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | |
597 | } | |
598 | ||
ba4e7d97 TH |
599 | int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
600 | struct ttm_bo_device *bdev) | |
601 | { | |
602 | struct ttm_bo_driver *driver; | |
603 | struct ttm_buffer_object *bo; | |
604 | int ret; | |
605 | ||
bf141a88 | 606 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) |
bed2dd84 TZ |
607 | return -EINVAL; |
608 | ||
72525b3f DH |
609 | bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
610 | if (unlikely(!bo)) | |
ba4e7d97 | 611 | return -EINVAL; |
ba4e7d97 TH |
612 | |
613 | driver = bo->bdev->driver; | |
614 | if (unlikely(!driver->verify_access)) { | |
615 | ret = -EPERM; | |
616 | goto out_unref; | |
617 | } | |
618 | ret = driver->verify_access(bo, filp); | |
619 | if (unlikely(ret != 0)) | |
620 | goto out_unref; | |
621 | ||
24e25ea6 | 622 | ttm_bo_mmap_vma_setup(bo, vma); |
ba4e7d97 TH |
623 | return 0; |
624 | out_unref: | |
f4490759 | 625 | ttm_bo_put(bo); |
ba4e7d97 TH |
626 | return ret; |
627 | } | |
628 | EXPORT_SYMBOL(ttm_bo_mmap); | |
629 | ||
12067e0e | 630 | int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) |
ba4e7d97 | 631 | { |
8129fdad | 632 | ttm_bo_get(bo); |
24e25ea6 | 633 | ttm_bo_mmap_vma_setup(bo, vma); |
ba4e7d97 TH |
634 | return 0; |
635 | } | |
12067e0e | 636 | EXPORT_SYMBOL(ttm_bo_mmap_obj); |