Commit | Line | Data |
---|---|---|
1297bf2e | 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
ba4e7d97 TH |
2 | /************************************************************************** |
3 | * | |
4 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | /* | |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
30 | */ | |
31 | ||
25d0479a JP |
32 | #define pr_fmt(fmt) "[TTM] " fmt |
33 | ||
2da83319 MY |
34 | #include <drm/ttm/ttm_module.h> |
35 | #include <drm/ttm/ttm_bo_driver.h> | |
36 | #include <drm/ttm/ttm_placement.h> | |
72525b3f | 37 | #include <drm/drm_vma_manager.h> |
ba4e7d97 | 38 | #include <linux/mm.h> |
01c8f1c4 | 39 | #include <linux/pfn_t.h> |
ba4e7d97 TH |
40 | #include <linux/rbtree.h> |
41 | #include <linux/module.h> | |
42 | #include <linux/uaccess.h> | |
95cf9264 | 43 | #include <linux/mem_encrypt.h> |
ba4e7d97 TH |
44 | |
45 | #define TTM_BO_VM_NUM_PREFAULT 16 | |
46 | ||
4daa4fba | 47 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
cbe12e74 TH |
48 | struct vm_fault *vmf) |
49 | { | |
4daa4fba SJ |
50 | vm_fault_t ret = 0; |
51 | int err = 0; | |
cbe12e74 | 52 | |
5bc73067 | 53 | if (likely(!bo->moving)) |
cbe12e74 TH |
54 | goto out_unlock; |
55 | ||
56 | /* | |
57 | * Quick non-stalling check for idle. | |
58 | */ | |
f54d1867 | 59 | if (dma_fence_is_signaled(bo->moving)) |
5bc73067 | 60 | goto out_clear; |
cbe12e74 TH |
61 | |
62 | /* | |
63 | * If possible, avoid waiting for GPU with mmap_sem | |
64 | * held. | |
65 | */ | |
66 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { | |
67 | ret = VM_FAULT_RETRY; | |
68 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | |
69 | goto out_unlock; | |
70 | ||
8129fdad | 71 | ttm_bo_get(bo); |
11bac800 | 72 | up_read(&vmf->vma->vm_mm->mmap_sem); |
f54d1867 | 73 | (void) dma_fence_wait(bo->moving, true); |
52791eee | 74 | dma_resv_unlock(bo->base.resv); |
f4490759 | 75 | ttm_bo_put(bo); |
cbe12e74 TH |
76 | goto out_unlock; |
77 | } | |
78 | ||
79 | /* | |
80 | * Ordinary wait. | |
81 | */ | |
4daa4fba SJ |
82 | err = dma_fence_wait(bo->moving, true); |
83 | if (unlikely(err != 0)) { | |
84 | ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | |
cbe12e74 | 85 | VM_FAULT_NOPAGE; |
5bc73067 CK |
86 | goto out_unlock; |
87 | } | |
88 | ||
89 | out_clear: | |
f54d1867 | 90 | dma_fence_put(bo->moving); |
5bc73067 | 91 | bo->moving = NULL; |
cbe12e74 TH |
92 | |
93 | out_unlock: | |
cbe12e74 TH |
94 | return ret; |
95 | } | |
96 | ||
c67fa6ed TX |
97 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, |
98 | unsigned long page_offset) | |
99 | { | |
100 | struct ttm_bo_device *bdev = bo->bdev; | |
101 | ||
102 | if (bdev->driver->io_mem_pfn) | |
103 | return bdev->driver->io_mem_pfn(bo, page_offset); | |
104 | ||
e83bf4ad TX |
105 | return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) |
106 | + page_offset; | |
c67fa6ed TX |
107 | } |
108 | ||
4daa4fba | 109 | static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
ba4e7d97 | 110 | { |
11bac800 | 111 | struct vm_area_struct *vma = vmf->vma; |
ba4e7d97 TH |
112 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
113 | vma->vm_private_data; | |
114 | struct ttm_bo_device *bdev = bo->bdev; | |
ba4e7d97 TH |
115 | unsigned long page_offset; |
116 | unsigned long page_last; | |
117 | unsigned long pfn; | |
118 | struct ttm_tt *ttm = NULL; | |
119 | struct page *page; | |
4daa4fba | 120 | int err; |
ba4e7d97 | 121 | int i; |
4daa4fba | 122 | vm_fault_t ret = VM_FAULT_NOPAGE; |
1a29d85e | 123 | unsigned long address = vmf->address; |
eba67093 TH |
124 | struct ttm_mem_type_manager *man = |
125 | &bdev->man[bo->mem.mem_type]; | |
3943875e | 126 | struct vm_area_struct cvma; |
ba4e7d97 TH |
127 | |
128 | /* | |
129 | * Work around locking order reversal in fault / nopfn | |
130 | * between mmap_sem and bo_reserve: Perform a trylock operation | |
c58f009e TH |
131 | * for reserve, and if it fails, retry the fault after waiting |
132 | * for the buffer to become unreserved. | |
ba4e7d97 | 133 | */ |
52791eee | 134 | if (unlikely(!dma_resv_trylock(bo->base.resv))) { |
c58f009e TH |
135 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
136 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | |
8129fdad | 137 | ttm_bo_get(bo); |
11bac800 | 138 | up_read(&vmf->vma->vm_mm->mmap_sem); |
c58f009e | 139 | (void) ttm_bo_wait_unreserved(bo); |
f4490759 | 140 | ttm_bo_put(bo); |
c58f009e TH |
141 | } |
142 | ||
143 | return VM_FAULT_RETRY; | |
144 | } | |
145 | ||
146 | /* | |
147 | * If we'd want to change locking order to | |
148 | * mmap_sem -> bo::reserve, we'd use a blocking reserve here | |
149 | * instead of retrying the fault... | |
150 | */ | |
ba4e7d97 TH |
151 | return VM_FAULT_NOPAGE; |
152 | } | |
153 | ||
667a50db TH |
154 | /* |
155 | * Refuse to fault imported pages. This should be handled | |
156 | * (if at all) by redirecting mmap to the exporter. | |
157 | */ | |
158 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { | |
de8dfb8e | 159 | ret = VM_FAULT_SIGBUS; |
667a50db TH |
160 | goto out_unlock; |
161 | } | |
162 | ||
82c5da6b | 163 | if (bdev->driver->fault_reserve_notify) { |
5d50fcbd CK |
164 | struct dma_fence *moving = dma_fence_get(bo->moving); |
165 | ||
4daa4fba SJ |
166 | err = bdev->driver->fault_reserve_notify(bo); |
167 | switch (err) { | |
82c5da6b JG |
168 | case 0: |
169 | break; | |
170 | case -EBUSY: | |
82c5da6b | 171 | case -ERESTARTSYS: |
de8dfb8e | 172 | ret = VM_FAULT_NOPAGE; |
82c5da6b JG |
173 | goto out_unlock; |
174 | default: | |
de8dfb8e | 175 | ret = VM_FAULT_SIGBUS; |
82c5da6b JG |
176 | goto out_unlock; |
177 | } | |
5d50fcbd CK |
178 | |
179 | if (bo->moving != moving) { | |
180 | spin_lock(&bdev->glob->lru_lock); | |
181 | ttm_bo_move_to_lru_tail(bo, NULL); | |
182 | spin_unlock(&bdev->glob->lru_lock); | |
183 | } | |
184 | dma_fence_put(moving); | |
82c5da6b | 185 | } |
e024e110 | 186 | |
ba4e7d97 TH |
187 | /* |
188 | * Wait for buffer data in transit, due to a pipelined | |
189 | * move. | |
190 | */ | |
11bac800 | 191 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
cbe12e74 | 192 | if (unlikely(ret != 0)) { |
de8dfb8e | 193 | if (ret == VM_FAULT_RETRY && |
3089c1df NH |
194 | !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
195 | /* The BO has already been unreserved. */ | |
de8dfb8e | 196 | return ret; |
3089c1df NH |
197 | } |
198 | ||
cbe12e74 TH |
199 | goto out_unlock; |
200 | } | |
ba4e7d97 | 201 | |
4daa4fba SJ |
202 | err = ttm_mem_io_lock(man, true); |
203 | if (unlikely(err != 0)) { | |
de8dfb8e | 204 | ret = VM_FAULT_NOPAGE; |
ba4e7d97 TH |
205 | goto out_unlock; |
206 | } | |
4daa4fba SJ |
207 | err = ttm_mem_io_reserve_vm(bo); |
208 | if (unlikely(err != 0)) { | |
de8dfb8e | 209 | ret = VM_FAULT_SIGBUS; |
eba67093 TH |
210 | goto out_io_unlock; |
211 | } | |
ba4e7d97 | 212 | |
ba4e7d97 | 213 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
b96f3e7c | 214 | vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); |
d3867355 | 215 | page_last = vma_pages(vma) + vma->vm_pgoff - |
b96f3e7c | 216 | drm_vma_node_start(&bo->base.vma_node); |
ba4e7d97 TH |
217 | |
218 | if (unlikely(page_offset >= bo->num_pages)) { | |
de8dfb8e | 219 | ret = VM_FAULT_SIGBUS; |
eba67093 | 220 | goto out_io_unlock; |
ba4e7d97 TH |
221 | } |
222 | ||
223 | /* | |
3943875e TH |
224 | * Make a local vma copy to modify the page_prot member |
225 | * and vm_flags if necessary. The vma parameter is protected | |
226 | * by mmap_sem in write mode. | |
ba4e7d97 | 227 | */ |
3943875e TH |
228 | cvma = *vma; |
229 | cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); | |
230 | ||
82c5da6b | 231 | if (bo->mem.bus.is_iomem) { |
3943875e TH |
232 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
233 | cvma.vm_page_prot); | |
ba4e7d97 | 234 | } else { |
d0cef9fa RH |
235 | struct ttm_operation_ctx ctx = { |
236 | .interruptible = false, | |
aa7662b6 RH |
237 | .no_wait_gpu = false, |
238 | .flags = TTM_OPT_FLAG_FORCE_ALLOC | |
239 | ||
d0cef9fa RH |
240 | }; |
241 | ||
ba4e7d97 | 242 | ttm = bo->ttm; |
94318d50 BH |
243 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
244 | cvma.vm_page_prot); | |
b1e5f172 JG |
245 | |
246 | /* Allocate all page at once, most common usage */ | |
25893a14 | 247 | if (ttm_tt_populate(ttm, &ctx)) { |
de8dfb8e | 248 | ret = VM_FAULT_OOM; |
b1e5f172 JG |
249 | goto out_io_unlock; |
250 | } | |
ba4e7d97 TH |
251 | } |
252 | ||
253 | /* | |
254 | * Speculatively prefault a number of pages. Only error on | |
255 | * first page. | |
256 | */ | |
ba4e7d97 | 257 | for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { |
95cf9264 TL |
258 | if (bo->mem.bus.is_iomem) { |
259 | /* Iomem should not be marked encrypted */ | |
260 | cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); | |
c67fa6ed | 261 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
95cf9264 | 262 | } else { |
b1e5f172 | 263 | page = ttm->pages[page_offset]; |
ba4e7d97 | 264 | if (unlikely(!page && i == 0)) { |
de8dfb8e | 265 | ret = VM_FAULT_OOM; |
eba67093 | 266 | goto out_io_unlock; |
ba4e7d97 TH |
267 | } else if (unlikely(!page)) { |
268 | break; | |
269 | } | |
b96f3e7c | 270 | page->index = drm_vma_node_start(&bo->base.vma_node) + |
58aa6622 | 271 | page_offset; |
ba4e7d97 TH |
272 | pfn = page_to_pfn(page); |
273 | } | |
274 | ||
7dfe8b61 | 275 | if (vma->vm_flags & VM_MIXEDMAP) |
4daa4fba | 276 | ret = vmf_insert_mixed(&cvma, address, |
01c8f1c4 | 277 | __pfn_to_pfn_t(pfn, PFN_DEV)); |
7dfe8b61 | 278 | else |
4daa4fba | 279 | ret = vmf_insert_pfn(&cvma, address, pfn); |
7dfe8b61 | 280 | |
ba4e7d97 TH |
281 | /* |
282 | * Somebody beat us to this PTE or prefaulting to | |
283 | * an already populated PTE, or prefaulting error. | |
284 | */ | |
285 | ||
4daa4fba | 286 | if (unlikely((ret == VM_FAULT_NOPAGE && i > 0))) |
ba4e7d97 | 287 | break; |
4daa4fba | 288 | else if (unlikely(ret & VM_FAULT_ERROR)) |
eba67093 | 289 | goto out_io_unlock; |
ba4e7d97 TH |
290 | |
291 | address += PAGE_SIZE; | |
292 | if (unlikely(++page_offset >= page_last)) | |
293 | break; | |
294 | } | |
de8dfb8e | 295 | ret = VM_FAULT_NOPAGE; |
eba67093 TH |
296 | out_io_unlock: |
297 | ttm_mem_io_unlock(man); | |
ba4e7d97 | 298 | out_unlock: |
52791eee | 299 | dma_resv_unlock(bo->base.resv); |
de8dfb8e | 300 | return ret; |
ba4e7d97 TH |
301 | } |
302 | ||
303 | static void ttm_bo_vm_open(struct vm_area_struct *vma) | |
304 | { | |
305 | struct ttm_buffer_object *bo = | |
306 | (struct ttm_buffer_object *)vma->vm_private_data; | |
307 | ||
58aa6622 TH |
308 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); |
309 | ||
8129fdad | 310 | ttm_bo_get(bo); |
ba4e7d97 TH |
311 | } |
312 | ||
313 | static void ttm_bo_vm_close(struct vm_area_struct *vma) | |
314 | { | |
82c5da6b | 315 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; |
ba4e7d97 | 316 | |
f4490759 | 317 | ttm_bo_put(bo); |
ba4e7d97 TH |
318 | vma->vm_private_data = NULL; |
319 | } | |
320 | ||
09ac4fcb FK |
321 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
322 | unsigned long offset, | |
95244db2 | 323 | uint8_t *buf, int len, int write) |
09ac4fcb FK |
324 | { |
325 | unsigned long page = offset >> PAGE_SHIFT; | |
326 | unsigned long bytes_left = len; | |
327 | int ret; | |
328 | ||
329 | /* Copy a page at a time, that way no extra virtual address | |
330 | * mapping is needed | |
331 | */ | |
332 | offset -= page << PAGE_SHIFT; | |
333 | do { | |
334 | unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); | |
335 | struct ttm_bo_kmap_obj map; | |
336 | void *ptr; | |
337 | bool is_iomem; | |
338 | ||
339 | ret = ttm_bo_kmap(bo, page, 1, &map); | |
340 | if (ret) | |
341 | return ret; | |
342 | ||
343 | ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; | |
344 | WARN_ON_ONCE(is_iomem); | |
345 | if (write) | |
346 | memcpy(ptr, buf, bytes); | |
347 | else | |
348 | memcpy(buf, ptr, bytes); | |
349 | ttm_bo_kunmap(&map); | |
350 | ||
351 | page++; | |
95244db2 | 352 | buf += bytes; |
09ac4fcb FK |
353 | bytes_left -= bytes; |
354 | offset = 0; | |
355 | } while (bytes_left); | |
356 | ||
357 | return len; | |
358 | } | |
359 | ||
360 | static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, | |
361 | void *buf, int len, int write) | |
362 | { | |
363 | unsigned long offset = (addr) - vma->vm_start; | |
364 | struct ttm_buffer_object *bo = vma->vm_private_data; | |
365 | int ret; | |
366 | ||
367 | if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) | |
368 | return -EIO; | |
369 | ||
370 | ret = ttm_bo_reserve(bo, true, false, NULL); | |
371 | if (ret) | |
372 | return ret; | |
373 | ||
374 | switch (bo->mem.mem_type) { | |
375 | case TTM_PL_SYSTEM: | |
376 | if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
377 | ret = ttm_tt_swapin(bo->ttm); | |
378 | if (unlikely(ret != 0)) | |
379 | return ret; | |
380 | } | |
381 | /* fall through */ | |
382 | case TTM_PL_TT: | |
383 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); | |
384 | break; | |
385 | default: | |
386 | if (bo->bdev->driver->access_memory) | |
387 | ret = bo->bdev->driver->access_memory( | |
388 | bo, offset, buf, len, write); | |
389 | else | |
390 | ret = -EIO; | |
391 | } | |
392 | ||
393 | ttm_bo_unreserve(bo); | |
394 | ||
395 | return ret; | |
396 | } | |
397 | ||
f0f37e2f | 398 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
ba4e7d97 TH |
399 | .fault = ttm_bo_vm_fault, |
400 | .open = ttm_bo_vm_open, | |
09ac4fcb FK |
401 | .close = ttm_bo_vm_close, |
402 | .access = ttm_bo_vm_access | |
ba4e7d97 TH |
403 | }; |
404 | ||
72525b3f DH |
405 | static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, |
406 | unsigned long offset, | |
407 | unsigned long pages) | |
408 | { | |
409 | struct drm_vma_offset_node *node; | |
410 | struct ttm_buffer_object *bo = NULL; | |
411 | ||
412 | drm_vma_offset_lock_lookup(&bdev->vma_manager); | |
413 | ||
414 | node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); | |
415 | if (likely(node)) { | |
b96f3e7c GH |
416 | bo = container_of(node, struct ttm_buffer_object, |
417 | base.vma_node); | |
24dc64c1 | 418 | bo = ttm_bo_get_unless_zero(bo); |
72525b3f DH |
419 | } |
420 | ||
421 | drm_vma_offset_unlock_lookup(&bdev->vma_manager); | |
422 | ||
423 | if (!bo) | |
424 | pr_err("Could not find buffer object to map\n"); | |
425 | ||
426 | return bo; | |
427 | } | |
428 | ||
ba4e7d97 TH |
429 | int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
430 | struct ttm_bo_device *bdev) | |
431 | { | |
432 | struct ttm_bo_driver *driver; | |
433 | struct ttm_buffer_object *bo; | |
434 | int ret; | |
435 | ||
bf141a88 | 436 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) |
bed2dd84 TZ |
437 | return -EINVAL; |
438 | ||
72525b3f DH |
439 | bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
440 | if (unlikely(!bo)) | |
ba4e7d97 | 441 | return -EINVAL; |
ba4e7d97 TH |
442 | |
443 | driver = bo->bdev->driver; | |
444 | if (unlikely(!driver->verify_access)) { | |
445 | ret = -EPERM; | |
446 | goto out_unref; | |
447 | } | |
448 | ret = driver->verify_access(bo, filp); | |
449 | if (unlikely(ret != 0)) | |
450 | goto out_unref; | |
451 | ||
452 | vma->vm_ops = &ttm_bo_vm_ops; | |
453 | ||
454 | /* | |
455 | * Note: We're transferring the bo reference to | |
456 | * vma->vm_private_data here. | |
457 | */ | |
458 | ||
459 | vma->vm_private_data = bo; | |
7dfe8b61 TH |
460 | |
461 | /* | |
0e6d6ec0 TH |
462 | * We'd like to use VM_PFNMAP on shared mappings, where |
463 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, | |
464 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very | |
465 | * bad for performance. Until that has been sorted out, use | |
466 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 | |
7dfe8b61 | 467 | */ |
0e6d6ec0 | 468 | vma->vm_flags |= VM_MIXEDMAP; |
7dfe8b61 | 469 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
ba4e7d97 TH |
470 | return 0; |
471 | out_unref: | |
f4490759 | 472 | ttm_bo_put(bo); |
ba4e7d97 TH |
473 | return ret; |
474 | } | |
475 | EXPORT_SYMBOL(ttm_bo_mmap); | |
476 | ||
477 | int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |
478 | { | |
479 | if (vma->vm_pgoff != 0) | |
480 | return -EACCES; | |
481 | ||
8129fdad TZ |
482 | ttm_bo_get(bo); |
483 | ||
ba4e7d97 | 484 | vma->vm_ops = &ttm_bo_vm_ops; |
8129fdad | 485 | vma->vm_private_data = bo; |
0e6d6ec0 | 486 | vma->vm_flags |= VM_MIXEDMAP; |
7dfe8b61 | 487 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
ba4e7d97 TH |
488 | return 0; |
489 | } | |
490 | EXPORT_SYMBOL(ttm_fbdev_mmap); |