Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
760285e7 DH |
30 | #include <drm/drmP.h> |
31 | #include <drm/ttm/ttm_page_alloc.h> | |
6ee73861 | 32 | |
760285e7 | 33 | #include <drm/nouveau_drm.h> |
6ee73861 BS |
34 | #include "nouveau_drv.h" |
35 | #include "nouveau_dma.h" | |
f869ef88 BS |
36 | #include "nouveau_mm.h" |
37 | #include "nouveau_vm.h" | |
d375e7d5 | 38 | #include "nouveau_fence.h" |
d1b167e1 | 39 | #include "nouveau_ramht.h" |
6ee73861 | 40 | |
a510604d | 41 | #include <linux/log2.h> |
5a0e3ad6 | 42 | #include <linux/slab.h> |
a510604d | 43 | |
6ee73861 BS |
44 | static void |
45 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
46 | { | |
47 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
a0af9add | 48 | struct drm_device *dev = dev_priv->dev; |
6ee73861 BS |
49 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
50 | ||
6ee73861 BS |
51 | if (unlikely(nvbo->gem)) |
52 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
53 | ||
a5cf68b0 | 54 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
6ee73861 BS |
55 | kfree(nvbo); |
56 | } | |
57 | ||
a0af9add | 58 | static void |
db5c8e29 | 59 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
f91bac5b | 60 | int *align, int *size) |
a0af9add | 61 | { |
bfd83aca | 62 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
a0af9add | 63 | |
573a2a37 | 64 | if (dev_priv->card_type < NV_50) { |
bfd83aca | 65 | if (nvbo->tile_mode) { |
a0af9add FJ |
66 | if (dev_priv->chipset >= 0x40) { |
67 | *align = 65536; | |
bfd83aca | 68 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add FJ |
69 | |
70 | } else if (dev_priv->chipset >= 0x30) { | |
71 | *align = 32768; | |
bfd83aca | 72 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add FJ |
73 | |
74 | } else if (dev_priv->chipset >= 0x20) { | |
75 | *align = 16384; | |
bfd83aca | 76 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add FJ |
77 | |
78 | } else if (dev_priv->chipset >= 0x10) { | |
79 | *align = 16384; | |
bfd83aca | 80 | *size = roundup(*size, 32 * nvbo->tile_mode); |
a0af9add FJ |
81 | } |
82 | } | |
bfd83aca | 83 | } else { |
f91bac5b BS |
84 | *size = roundup(*size, (1 << nvbo->page_shift)); |
85 | *align = max((1 << nvbo->page_shift), *align); | |
a0af9add FJ |
86 | } |
87 | ||
1c7059e4 | 88 | *size = roundup(*size, PAGE_SIZE); |
a0af9add FJ |
89 | } |
90 | ||
6ee73861 | 91 | int |
7375c95b BS |
92 | nouveau_bo_new(struct drm_device *dev, int size, int align, |
93 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, | |
22b33e8e | 94 | struct sg_table *sg, |
7375c95b | 95 | struct nouveau_bo **pnvbo) |
6ee73861 BS |
96 | { |
97 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
98 | struct nouveau_bo *nvbo; | |
57de4ba9 | 99 | size_t acc_size; |
f91bac5b | 100 | int ret; |
22b33e8e DA |
101 | int type = ttm_bo_type_device; |
102 | ||
103 | if (sg) | |
104 | type = ttm_bo_type_sg; | |
6ee73861 BS |
105 | |
106 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
107 | if (!nvbo) | |
108 | return -ENOMEM; | |
109 | INIT_LIST_HEAD(&nvbo->head); | |
110 | INIT_LIST_HEAD(&nvbo->entry); | |
fd2871af | 111 | INIT_LIST_HEAD(&nvbo->vma_list); |
6ee73861 BS |
112 | nvbo->tile_mode = tile_mode; |
113 | nvbo->tile_flags = tile_flags; | |
699ddfd9 | 114 | nvbo->bo.bdev = &dev_priv->ttm.bdev; |
6ee73861 | 115 | |
f91bac5b BS |
116 | nvbo->page_shift = 12; |
117 | if (dev_priv->bar1_vm) { | |
118 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) | |
119 | nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; | |
120 | } | |
121 | ||
122 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); | |
fd2871af BS |
123 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
124 | nouveau_bo_placement_set(nvbo, flags, 0); | |
6ee73861 | 125 | |
57de4ba9 JG |
126 | acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, |
127 | sizeof(struct nouveau_bo)); | |
128 | ||
6ee73861 | 129 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
22b33e8e DA |
130 | type, &nvbo->placement, |
131 | align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, | |
fd2871af | 132 | nouveau_bo_del_ttm); |
6ee73861 BS |
133 | if (ret) { |
134 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
135 | return ret; | |
136 | } | |
137 | ||
6ee73861 BS |
138 | *pnvbo = nvbo; |
139 | return 0; | |
140 | } | |
141 | ||
78ad0f7b FJ |
142 | static void |
143 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |
144 | { | |
145 | *n = 0; | |
146 | ||
147 | if (type & TTM_PL_FLAG_VRAM) | |
148 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | |
149 | if (type & TTM_PL_FLAG_TT) | |
150 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | |
151 | if (type & TTM_PL_FLAG_SYSTEM) | |
152 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | |
153 | } | |
154 | ||
699ddfd9 FJ |
155 | static void |
156 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | |
157 | { | |
158 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
812f219a | 159 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; |
699ddfd9 FJ |
160 | |
161 | if (dev_priv->card_type == NV_10 && | |
812f219a | 162 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
4beb116a | 163 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
699ddfd9 FJ |
164 | /* |
165 | * Make sure that the color and depth buffers are handled | |
166 | * by independent memory controller units. Up to a 9x | |
167 | * speed up when alpha-blending and depth-test are enabled | |
168 | * at the same time. | |
169 | */ | |
699ddfd9 FJ |
170 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
171 | nvbo->placement.fpfn = vram_pages / 2; | |
172 | nvbo->placement.lpfn = ~0; | |
173 | } else { | |
174 | nvbo->placement.fpfn = 0; | |
175 | nvbo->placement.lpfn = vram_pages / 2; | |
176 | } | |
177 | } | |
178 | } | |
179 | ||
6ee73861 | 180 | void |
78ad0f7b | 181 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
6ee73861 | 182 | { |
78ad0f7b FJ |
183 | struct ttm_placement *pl = &nvbo->placement; |
184 | uint32_t flags = TTM_PL_MASK_CACHING | | |
185 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); | |
186 | ||
187 | pl->placement = nvbo->placements; | |
188 | set_placement_list(nvbo->placements, &pl->num_placement, | |
189 | type, flags); | |
190 | ||
191 | pl->busy_placement = nvbo->busy_placements; | |
192 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | |
193 | type | busy, flags); | |
699ddfd9 FJ |
194 | |
195 | set_placement_range(nvbo, type); | |
6ee73861 BS |
196 | } |
197 | ||
198 | int | |
199 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
200 | { | |
201 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
202 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 203 | int ret; |
6ee73861 BS |
204 | |
205 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
206 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | |
207 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | |
208 | 1 << bo->mem.mem_type, memtype); | |
209 | return -EINVAL; | |
210 | } | |
211 | ||
212 | if (nvbo->pin_refcnt++) | |
213 | return 0; | |
214 | ||
215 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
216 | if (ret) | |
217 | goto out; | |
218 | ||
78ad0f7b | 219 | nouveau_bo_placement_set(nvbo, memtype, 0); |
6ee73861 | 220 | |
7a45d764 | 221 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
222 | if (ret == 0) { |
223 | switch (bo->mem.mem_type) { | |
224 | case TTM_PL_VRAM: | |
225 | dev_priv->fb_aper_free -= bo->mem.size; | |
226 | break; | |
227 | case TTM_PL_TT: | |
228 | dev_priv->gart_info.aper_free -= bo->mem.size; | |
229 | break; | |
230 | default: | |
231 | break; | |
232 | } | |
233 | } | |
234 | ttm_bo_unreserve(bo); | |
235 | out: | |
236 | if (unlikely(ret)) | |
237 | nvbo->pin_refcnt--; | |
238 | return ret; | |
239 | } | |
240 | ||
241 | int | |
242 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
243 | { | |
244 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
245 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 246 | int ret; |
6ee73861 BS |
247 | |
248 | if (--nvbo->pin_refcnt) | |
249 | return 0; | |
250 | ||
251 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
252 | if (ret) | |
253 | return ret; | |
254 | ||
78ad0f7b | 255 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
6ee73861 | 256 | |
7a45d764 | 257 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
258 | if (ret == 0) { |
259 | switch (bo->mem.mem_type) { | |
260 | case TTM_PL_VRAM: | |
261 | dev_priv->fb_aper_free += bo->mem.size; | |
262 | break; | |
263 | case TTM_PL_TT: | |
264 | dev_priv->gart_info.aper_free += bo->mem.size; | |
265 | break; | |
266 | default: | |
267 | break; | |
268 | } | |
269 | } | |
270 | ||
271 | ttm_bo_unreserve(bo); | |
272 | return ret; | |
273 | } | |
274 | ||
275 | int | |
276 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
277 | { | |
278 | int ret; | |
279 | ||
280 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
281 | if (ret) | |
282 | return ret; | |
283 | ||
284 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
285 | ttm_bo_unreserve(&nvbo->bo); | |
286 | return ret; | |
287 | } | |
288 | ||
289 | void | |
290 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
291 | { | |
9d59e8a1 BS |
292 | if (nvbo) |
293 | ttm_bo_kunmap(&nvbo->kmap); | |
6ee73861 BS |
294 | } |
295 | ||
7a45d764 BS |
296 | int |
297 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |
298 | bool no_wait_reserve, bool no_wait_gpu) | |
299 | { | |
300 | int ret; | |
301 | ||
302 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, | |
303 | no_wait_reserve, no_wait_gpu); | |
304 | if (ret) | |
305 | return ret; | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
6ee73861 BS |
310 | u16 |
311 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
312 | { | |
313 | bool is_iomem; | |
314 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
315 | mem = &mem[index]; | |
316 | if (is_iomem) | |
317 | return ioread16_native((void __force __iomem *)mem); | |
318 | else | |
319 | return *mem; | |
320 | } | |
321 | ||
322 | void | |
323 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
324 | { | |
325 | bool is_iomem; | |
326 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
327 | mem = &mem[index]; | |
328 | if (is_iomem) | |
329 | iowrite16_native(val, (void __force __iomem *)mem); | |
330 | else | |
331 | *mem = val; | |
332 | } | |
333 | ||
334 | u32 | |
335 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
336 | { | |
337 | bool is_iomem; | |
338 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
339 | mem = &mem[index]; | |
340 | if (is_iomem) | |
341 | return ioread32_native((void __force __iomem *)mem); | |
342 | else | |
343 | return *mem; | |
344 | } | |
345 | ||
346 | void | |
347 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
348 | { | |
349 | bool is_iomem; | |
350 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
351 | mem = &mem[index]; | |
352 | if (is_iomem) | |
353 | iowrite32_native(val, (void __force __iomem *)mem); | |
354 | else | |
355 | *mem = val; | |
356 | } | |
357 | ||
649bf3ca JG |
358 | static struct ttm_tt * |
359 | nouveau_ttm_tt_create(struct ttm_bo_device *bdev, | |
360 | unsigned long size, uint32_t page_flags, | |
361 | struct page *dummy_read_page) | |
6ee73861 BS |
362 | { |
363 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
364 | struct drm_device *dev = dev_priv->dev; | |
365 | ||
366 | switch (dev_priv->gart_info.type) { | |
b694dfb2 | 367 | #if __OS_HAS_AGP |
6ee73861 | 368 | case NOUVEAU_GART_AGP: |
649bf3ca JG |
369 | return ttm_agp_tt_create(bdev, dev->agp->bridge, |
370 | size, page_flags, dummy_read_page); | |
b694dfb2 | 371 | #endif |
58e6c7a9 BS |
372 | case NOUVEAU_GART_PDMA: |
373 | case NOUVEAU_GART_HW: | |
649bf3ca JG |
374 | return nouveau_sgdma_create_ttm(bdev, size, page_flags, |
375 | dummy_read_page); | |
6ee73861 BS |
376 | default: |
377 | NV_ERROR(dev, "Unknown GART type %d\n", | |
378 | dev_priv->gart_info.type); | |
379 | break; | |
380 | } | |
381 | ||
382 | return NULL; | |
383 | } | |
384 | ||
385 | static int | |
386 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
387 | { | |
388 | /* We'll do this from user space. */ | |
389 | return 0; | |
390 | } | |
391 | ||
392 | static int | |
393 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
394 | struct ttm_mem_type_manager *man) | |
395 | { | |
396 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
397 | struct drm_device *dev = dev_priv->dev; | |
398 | ||
399 | switch (type) { | |
400 | case TTM_PL_SYSTEM: | |
401 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
402 | man->available_caching = TTM_PL_MASK_CACHING; | |
403 | man->default_caching = TTM_PL_FLAG_CACHED; | |
404 | break; | |
405 | case TTM_PL_VRAM: | |
8984e046 | 406 | if (dev_priv->card_type >= NV_50) { |
573a2a37 | 407 | man->func = &nouveau_vram_manager; |
f869ef88 BS |
408 | man->io_reserve_fastpath = false; |
409 | man->use_io_reserve_lru = true; | |
410 | } else { | |
573a2a37 | 411 | man->func = &ttm_bo_manager_func; |
f869ef88 | 412 | } |
6ee73861 | 413 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
f32f02fd | 414 | TTM_MEMTYPE_FLAG_MAPPABLE; |
6ee73861 BS |
415 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
416 | TTM_PL_FLAG_WC; | |
417 | man->default_caching = TTM_PL_FLAG_WC; | |
6ee73861 BS |
418 | break; |
419 | case TTM_PL_TT: | |
26c0c9e3 BS |
420 | if (dev_priv->card_type >= NV_50) |
421 | man->func = &nouveau_gart_manager; | |
422 | else | |
423 | man->func = &ttm_bo_manager_func; | |
6ee73861 BS |
424 | switch (dev_priv->gart_info.type) { |
425 | case NOUVEAU_GART_AGP: | |
f32f02fd | 426 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
a3d487ea FJ |
427 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
428 | TTM_PL_FLAG_WC; | |
429 | man->default_caching = TTM_PL_FLAG_WC; | |
6ee73861 | 430 | break; |
58e6c7a9 BS |
431 | case NOUVEAU_GART_PDMA: |
432 | case NOUVEAU_GART_HW: | |
6ee73861 BS |
433 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
434 | TTM_MEMTYPE_FLAG_CMA; | |
435 | man->available_caching = TTM_PL_MASK_CACHING; | |
436 | man->default_caching = TTM_PL_FLAG_CACHED; | |
437 | break; | |
438 | default: | |
439 | NV_ERROR(dev, "Unknown GART type: %d\n", | |
440 | dev_priv->gart_info.type); | |
441 | return -EINVAL; | |
442 | } | |
6ee73861 BS |
443 | break; |
444 | default: | |
445 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | |
446 | return -EINVAL; | |
447 | } | |
448 | return 0; | |
449 | } | |
450 | ||
451 | static void | |
452 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
453 | { | |
454 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
455 | ||
456 | switch (bo->mem.mem_type) { | |
22fbd538 | 457 | case TTM_PL_VRAM: |
78ad0f7b FJ |
458 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
459 | TTM_PL_FLAG_SYSTEM); | |
22fbd538 | 460 | break; |
6ee73861 | 461 | default: |
78ad0f7b | 462 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
6ee73861 BS |
463 | break; |
464 | } | |
22fbd538 FJ |
465 | |
466 | *pl = nvbo->placement; | |
6ee73861 BS |
467 | } |
468 | ||
469 | ||
470 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
471 | * TTM_PL_{VRAM,TT} directly. | |
472 | */ | |
a0af9add | 473 | |
6ee73861 BS |
474 | static int |
475 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
9d87fa21 JG |
476 | struct nouveau_bo *nvbo, bool evict, |
477 | bool no_wait_reserve, bool no_wait_gpu, | |
6ee73861 BS |
478 | struct ttm_mem_reg *new_mem) |
479 | { | |
480 | struct nouveau_fence *fence = NULL; | |
481 | int ret; | |
482 | ||
d375e7d5 | 483 | ret = nouveau_fence_new(chan, &fence); |
6ee73861 BS |
484 | if (ret) |
485 | return ret; | |
486 | ||
64798817 | 487 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, |
311ab694 | 488 | no_wait_reserve, no_wait_gpu, new_mem); |
382d62e5 | 489 | nouveau_fence_unref(&fence); |
6ee73861 BS |
490 | return ret; |
491 | } | |
492 | ||
c6b7e895 BS |
493 | static int |
494 | nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
495 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
496 | { | |
497 | struct nouveau_mem *node = old_mem->mm_node; | |
498 | int ret = RING_SPACE(chan, 10); | |
499 | if (ret == 0) { | |
6d597027 | 500 | BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); |
c6b7e895 BS |
501 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); |
502 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | |
503 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | |
504 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | |
505 | OUT_RING (chan, PAGE_SIZE); | |
506 | OUT_RING (chan, PAGE_SIZE); | |
507 | OUT_RING (chan, PAGE_SIZE); | |
508 | OUT_RING (chan, new_mem->num_pages); | |
6d597027 | 509 | BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); |
c6b7e895 BS |
510 | } |
511 | return ret; | |
512 | } | |
513 | ||
d1b167e1 BS |
514 | static int |
515 | nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
516 | { | |
517 | int ret = RING_SPACE(chan, 2); | |
518 | if (ret == 0) { | |
519 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | |
520 | OUT_RING (chan, handle); | |
521 | } | |
522 | return ret; | |
523 | } | |
524 | ||
1a46098e BS |
525 | static int |
526 | nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
527 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
528 | { | |
529 | struct nouveau_mem *node = old_mem->mm_node; | |
530 | u64 src_offset = node->vma[0].offset; | |
531 | u64 dst_offset = node->vma[1].offset; | |
532 | u32 page_count = new_mem->num_pages; | |
533 | int ret; | |
534 | ||
535 | page_count = new_mem->num_pages; | |
536 | while (page_count) { | |
537 | int line_count = (page_count > 8191) ? 8191 : page_count; | |
538 | ||
539 | ret = RING_SPACE(chan, 11); | |
540 | if (ret) | |
541 | return ret; | |
542 | ||
543 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); | |
544 | OUT_RING (chan, upper_32_bits(src_offset)); | |
545 | OUT_RING (chan, lower_32_bits(src_offset)); | |
546 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
547 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
548 | OUT_RING (chan, PAGE_SIZE); | |
549 | OUT_RING (chan, PAGE_SIZE); | |
550 | OUT_RING (chan, PAGE_SIZE); | |
551 | OUT_RING (chan, line_count); | |
552 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); | |
553 | OUT_RING (chan, 0x00000110); | |
554 | ||
555 | page_count -= line_count; | |
556 | src_offset += (PAGE_SIZE * line_count); | |
557 | dst_offset += (PAGE_SIZE * line_count); | |
558 | } | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
183720b8 BS |
563 | static int |
564 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
565 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
566 | { | |
d2f96666 BS |
567 | struct nouveau_mem *node = old_mem->mm_node; |
568 | u64 src_offset = node->vma[0].offset; | |
569 | u64 dst_offset = node->vma[1].offset; | |
183720b8 BS |
570 | u32 page_count = new_mem->num_pages; |
571 | int ret; | |
572 | ||
183720b8 BS |
573 | page_count = new_mem->num_pages; |
574 | while (page_count) { | |
575 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
576 | ||
577 | ret = RING_SPACE(chan, 12); | |
578 | if (ret) | |
579 | return ret; | |
580 | ||
d1b167e1 | 581 | BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); |
183720b8 BS |
582 | OUT_RING (chan, upper_32_bits(dst_offset)); |
583 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
d1b167e1 | 584 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); |
183720b8 BS |
585 | OUT_RING (chan, upper_32_bits(src_offset)); |
586 | OUT_RING (chan, lower_32_bits(src_offset)); | |
587 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
588 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
589 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
590 | OUT_RING (chan, line_count); | |
d1b167e1 | 591 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
183720b8 BS |
592 | OUT_RING (chan, 0x00100110); |
593 | ||
594 | page_count -= line_count; | |
595 | src_offset += (PAGE_SIZE * line_count); | |
596 | dst_offset += (PAGE_SIZE * line_count); | |
597 | } | |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
fdf53241 BS |
602 | static int |
603 | nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
604 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
605 | { | |
606 | struct nouveau_mem *node = old_mem->mm_node; | |
607 | u64 src_offset = node->vma[0].offset; | |
608 | u64 dst_offset = node->vma[1].offset; | |
609 | u32 page_count = new_mem->num_pages; | |
610 | int ret; | |
611 | ||
612 | page_count = new_mem->num_pages; | |
613 | while (page_count) { | |
614 | int line_count = (page_count > 8191) ? 8191 : page_count; | |
615 | ||
616 | ret = RING_SPACE(chan, 11); | |
617 | if (ret) | |
618 | return ret; | |
619 | ||
620 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); | |
621 | OUT_RING (chan, upper_32_bits(src_offset)); | |
622 | OUT_RING (chan, lower_32_bits(src_offset)); | |
623 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
624 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
625 | OUT_RING (chan, PAGE_SIZE); | |
626 | OUT_RING (chan, PAGE_SIZE); | |
627 | OUT_RING (chan, PAGE_SIZE); | |
628 | OUT_RING (chan, line_count); | |
629 | BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); | |
630 | OUT_RING (chan, 0x00000110); | |
631 | ||
632 | page_count -= line_count; | |
633 | src_offset += (PAGE_SIZE * line_count); | |
634 | dst_offset += (PAGE_SIZE * line_count); | |
635 | } | |
636 | ||
637 | return 0; | |
638 | } | |
639 | ||
5490e5df BS |
640 | static int |
641 | nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
642 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
643 | { | |
644 | struct nouveau_mem *node = old_mem->mm_node; | |
645 | int ret = RING_SPACE(chan, 7); | |
646 | if (ret == 0) { | |
647 | BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); | |
648 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | |
649 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | |
650 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | |
651 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | |
652 | OUT_RING (chan, 0x00000000 /* COPY */); | |
653 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); | |
654 | } | |
655 | return ret; | |
656 | } | |
657 | ||
4c193d25 BS |
658 | static int |
659 | nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
660 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
661 | { | |
662 | struct nouveau_mem *node = old_mem->mm_node; | |
663 | int ret = RING_SPACE(chan, 7); | |
664 | if (ret == 0) { | |
665 | BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); | |
666 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); | |
667 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | |
668 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | |
669 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | |
670 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | |
671 | OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); | |
672 | } | |
673 | return ret; | |
674 | } | |
675 | ||
d1b167e1 BS |
676 | static int |
677 | nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
678 | { | |
679 | int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, | |
680 | &chan->m2mf_ntfy); | |
681 | if (ret == 0) { | |
682 | ret = RING_SPACE(chan, 6); | |
683 | if (ret == 0) { | |
684 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | |
685 | OUT_RING (chan, handle); | |
686 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); | |
687 | OUT_RING (chan, NvNotify0); | |
688 | OUT_RING (chan, NvDmaFB); | |
689 | OUT_RING (chan, NvDmaFB); | |
690 | } else { | |
691 | nouveau_ramht_remove(chan, NvNotify0); | |
692 | } | |
693 | } | |
694 | ||
695 | return ret; | |
696 | } | |
697 | ||
6ee73861 | 698 | static int |
f1ab0cc9 BS |
699 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
700 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
6ee73861 | 701 | { |
d2f96666 | 702 | struct nouveau_mem *node = old_mem->mm_node; |
f1ab0cc9 BS |
703 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
704 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | |
d2f96666 BS |
705 | u64 src_offset = node->vma[0].offset; |
706 | u64 dst_offset = node->vma[1].offset; | |
6ee73861 BS |
707 | int ret; |
708 | ||
f1ab0cc9 BS |
709 | while (length) { |
710 | u32 amount, stride, height; | |
711 | ||
5220b3c1 BS |
712 | amount = min(length, (u64)(4 * 1024 * 1024)); |
713 | stride = 16 * 4; | |
f1ab0cc9 BS |
714 | height = amount / stride; |
715 | ||
f13b3263 FJ |
716 | if (new_mem->mem_type == TTM_PL_VRAM && |
717 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
718 | ret = RING_SPACE(chan, 8); |
719 | if (ret) | |
720 | return ret; | |
721 | ||
d1b167e1 | 722 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
f1ab0cc9 | 723 | OUT_RING (chan, 0); |
5220b3c1 | 724 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
725 | OUT_RING (chan, stride); |
726 | OUT_RING (chan, height); | |
727 | OUT_RING (chan, 1); | |
728 | OUT_RING (chan, 0); | |
729 | OUT_RING (chan, 0); | |
730 | } else { | |
731 | ret = RING_SPACE(chan, 2); | |
732 | if (ret) | |
733 | return ret; | |
734 | ||
d1b167e1 | 735 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
f1ab0cc9 BS |
736 | OUT_RING (chan, 1); |
737 | } | |
f13b3263 FJ |
738 | if (old_mem->mem_type == TTM_PL_VRAM && |
739 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
740 | ret = RING_SPACE(chan, 8); |
741 | if (ret) | |
742 | return ret; | |
743 | ||
d1b167e1 | 744 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
f1ab0cc9 | 745 | OUT_RING (chan, 0); |
5220b3c1 | 746 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
747 | OUT_RING (chan, stride); |
748 | OUT_RING (chan, height); | |
749 | OUT_RING (chan, 1); | |
750 | OUT_RING (chan, 0); | |
751 | OUT_RING (chan, 0); | |
752 | } else { | |
753 | ret = RING_SPACE(chan, 2); | |
754 | if (ret) | |
755 | return ret; | |
756 | ||
d1b167e1 | 757 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
f1ab0cc9 BS |
758 | OUT_RING (chan, 1); |
759 | } | |
760 | ||
761 | ret = RING_SPACE(chan, 14); | |
6ee73861 BS |
762 | if (ret) |
763 | return ret; | |
f1ab0cc9 | 764 | |
d1b167e1 | 765 | BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
f1ab0cc9 BS |
766 | OUT_RING (chan, upper_32_bits(src_offset)); |
767 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
d1b167e1 | 768 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
f1ab0cc9 BS |
769 | OUT_RING (chan, lower_32_bits(src_offset)); |
770 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
771 | OUT_RING (chan, stride); | |
772 | OUT_RING (chan, stride); | |
773 | OUT_RING (chan, stride); | |
774 | OUT_RING (chan, height); | |
775 | OUT_RING (chan, 0x00000101); | |
776 | OUT_RING (chan, 0x00000000); | |
d1b167e1 | 777 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 BS |
778 | OUT_RING (chan, 0); |
779 | ||
780 | length -= amount; | |
781 | src_offset += amount; | |
782 | dst_offset += amount; | |
6ee73861 BS |
783 | } |
784 | ||
f1ab0cc9 BS |
785 | return 0; |
786 | } | |
787 | ||
d1b167e1 BS |
788 | static int |
789 | nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
790 | { | |
791 | int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, | |
792 | &chan->m2mf_ntfy); | |
793 | if (ret == 0) { | |
794 | ret = RING_SPACE(chan, 4); | |
795 | if (ret == 0) { | |
796 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | |
797 | OUT_RING (chan, handle); | |
798 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); | |
799 | OUT_RING (chan, NvNotify0); | |
800 | } | |
801 | } | |
802 | ||
803 | return ret; | |
804 | } | |
805 | ||
a6704788 BS |
806 | static inline uint32_t |
807 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | |
808 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | |
809 | { | |
810 | if (mem->mem_type == TTM_PL_TT) | |
811 | return chan->gart_handle; | |
812 | return chan->vram_handle; | |
813 | } | |
814 | ||
f1ab0cc9 BS |
815 | static int |
816 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
817 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
818 | { | |
d961db75 BS |
819 | u32 src_offset = old_mem->start << PAGE_SHIFT; |
820 | u32 dst_offset = new_mem->start << PAGE_SHIFT; | |
f1ab0cc9 BS |
821 | u32 page_count = new_mem->num_pages; |
822 | int ret; | |
823 | ||
824 | ret = RING_SPACE(chan, 3); | |
825 | if (ret) | |
826 | return ret; | |
827 | ||
d1b167e1 | 828 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
f1ab0cc9 BS |
829 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); |
830 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
831 | ||
6ee73861 BS |
832 | page_count = new_mem->num_pages; |
833 | while (page_count) { | |
834 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
835 | ||
6ee73861 BS |
836 | ret = RING_SPACE(chan, 11); |
837 | if (ret) | |
838 | return ret; | |
f1ab0cc9 | 839 | |
d1b167e1 | 840 | BEGIN_NV04(chan, NvSubCopy, |
6ee73861 | 841 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); |
f1ab0cc9 BS |
842 | OUT_RING (chan, src_offset); |
843 | OUT_RING (chan, dst_offset); | |
844 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
845 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
846 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
847 | OUT_RING (chan, line_count); | |
848 | OUT_RING (chan, 0x00000101); | |
849 | OUT_RING (chan, 0x00000000); | |
d1b167e1 | 850 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 | 851 | OUT_RING (chan, 0); |
6ee73861 BS |
852 | |
853 | page_count -= line_count; | |
854 | src_offset += (PAGE_SIZE * line_count); | |
855 | dst_offset += (PAGE_SIZE * line_count); | |
856 | } | |
857 | ||
f1ab0cc9 BS |
858 | return 0; |
859 | } | |
860 | ||
d2f96666 BS |
861 | static int |
862 | nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | |
863 | struct ttm_mem_reg *mem, struct nouveau_vma *vma) | |
864 | { | |
865 | struct nouveau_mem *node = mem->mm_node; | |
866 | int ret; | |
867 | ||
868 | ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, | |
869 | node->page_shift, NV_MEM_ACCESS_RO, vma); | |
870 | if (ret) | |
871 | return ret; | |
872 | ||
873 | if (mem->mem_type == TTM_PL_VRAM) | |
874 | nouveau_vm_map(vma, node); | |
875 | else | |
f7b24c42 | 876 | nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node); |
d2f96666 BS |
877 | |
878 | return 0; | |
879 | } | |
880 | ||
f1ab0cc9 BS |
881 | static int |
882 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |
883 | bool no_wait_reserve, bool no_wait_gpu, | |
884 | struct ttm_mem_reg *new_mem) | |
885 | { | |
886 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
accf9496 | 887 | struct nouveau_channel *chan = chan = dev_priv->channel; |
f1ab0cc9 | 888 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
3425df48 | 889 | struct ttm_mem_reg *old_mem = &bo->mem; |
f1ab0cc9 BS |
890 | int ret; |
891 | ||
accf9496 | 892 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
f1ab0cc9 | 893 | |
d2f96666 BS |
894 | /* create temporary vmas for the transfer and attach them to the |
895 | * old nouveau_mem node, these will get cleaned up after ttm has | |
896 | * destroyed the ttm_mem_reg | |
3425df48 | 897 | */ |
26c0c9e3 | 898 | if (dev_priv->card_type >= NV_50) { |
d5f42394 | 899 | struct nouveau_mem *node = old_mem->mm_node; |
3425df48 | 900 | |
d2f96666 BS |
901 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); |
902 | if (ret) | |
903 | goto out; | |
904 | ||
905 | ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); | |
906 | if (ret) | |
907 | goto out; | |
3425df48 BS |
908 | } |
909 | ||
d1b167e1 | 910 | ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); |
6a6b73f2 BS |
911 | if (ret == 0) { |
912 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | |
913 | no_wait_reserve, | |
914 | no_wait_gpu, new_mem); | |
915 | } | |
f1ab0cc9 | 916 | |
3425df48 | 917 | out: |
accf9496 | 918 | mutex_unlock(&chan->mutex); |
6a6b73f2 | 919 | return ret; |
6ee73861 BS |
920 | } |
921 | ||
d1b167e1 BS |
922 | void |
923 | nouveau_bo_move_init(struct nouveau_channel *chan) | |
924 | { | |
925 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | |
926 | static const struct { | |
927 | const char *name; | |
1a46098e | 928 | int engine; |
d1b167e1 BS |
929 | u32 oclass; |
930 | int (*exec)(struct nouveau_channel *, | |
931 | struct ttm_buffer_object *, | |
932 | struct ttm_mem_reg *, struct ttm_mem_reg *); | |
933 | int (*init)(struct nouveau_channel *, u32 handle); | |
934 | } _methods[] = { | |
1a46098e BS |
935 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
936 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, | |
937 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | |
938 | { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, | |
939 | { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, | |
940 | { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, | |
941 | { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, | |
942 | { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, | |
5490e5df | 943 | {}, |
1a46098e | 944 | { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, |
d1b167e1 BS |
945 | }, *mthd = _methods; |
946 | const char *name = "CPU"; | |
947 | int ret; | |
948 | ||
949 | do { | |
1a46098e BS |
950 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
951 | ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass); | |
d1b167e1 | 952 | if (ret == 0) { |
1a46098e | 953 | ret = mthd->init(chan, handle); |
d1b167e1 BS |
954 | if (ret == 0) { |
955 | dev_priv->ttm.move = mthd->exec; | |
956 | name = mthd->name; | |
957 | break; | |
958 | } | |
959 | } | |
960 | } while ((++mthd)->exec); | |
961 | ||
962 | NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); | |
963 | } | |
964 | ||
6ee73861 BS |
965 | static int |
966 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
967 | bool no_wait_reserve, bool no_wait_gpu, |
968 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
969 | { |
970 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
971 | struct ttm_placement placement; | |
972 | struct ttm_mem_reg tmp_mem; | |
973 | int ret; | |
974 | ||
975 | placement.fpfn = placement.lpfn = 0; | |
976 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 977 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
978 | |
979 | tmp_mem = *new_mem; | |
980 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 981 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
982 | if (ret) |
983 | return ret; | |
984 | ||
985 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
986 | if (ret) | |
987 | goto out; | |
988 | ||
9d87fa21 | 989 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
990 | if (ret) |
991 | goto out; | |
992 | ||
b8884da6 | 993 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 994 | out: |
42311ff9 | 995 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
996 | return ret; |
997 | } | |
998 | ||
999 | static int | |
1000 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
1001 | bool no_wait_reserve, bool no_wait_gpu, |
1002 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
1003 | { |
1004 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
1005 | struct ttm_placement placement; | |
1006 | struct ttm_mem_reg tmp_mem; | |
1007 | int ret; | |
1008 | ||
1009 | placement.fpfn = placement.lpfn = 0; | |
1010 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 1011 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
1012 | |
1013 | tmp_mem = *new_mem; | |
1014 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 1015 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
1016 | if (ret) |
1017 | return ret; | |
1018 | ||
b8884da6 | 1019 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
1020 | if (ret) |
1021 | goto out; | |
1022 | ||
b8884da6 | 1023 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
1024 | if (ret) |
1025 | goto out; | |
1026 | ||
1027 | out: | |
42311ff9 | 1028 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
1029 | return ret; |
1030 | } | |
1031 | ||
a4154bbf BS |
1032 | static void |
1033 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |
1034 | { | |
a4154bbf | 1035 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
fd2871af BS |
1036 | struct nouveau_vma *vma; |
1037 | ||
9f1feed2 BS |
1038 | /* ttm can now (stupidly) pass the driver bos it didn't create... */ |
1039 | if (bo->destroy != nouveau_bo_del_ttm) | |
1040 | return; | |
1041 | ||
fd2871af | 1042 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
dc97b340 | 1043 | if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { |
fd2871af BS |
1044 | nouveau_vm_map(vma, new_mem->mm_node); |
1045 | } else | |
dc97b340 | 1046 | if (new_mem && new_mem->mem_type == TTM_PL_TT && |
fd2871af | 1047 | nvbo->page_shift == vma->vm->spg_shift) { |
22b33e8e DA |
1048 | if (((struct nouveau_mem *)new_mem->mm_node)->sg) |
1049 | nouveau_vm_map_sg_table(vma, 0, new_mem-> | |
1050 | num_pages << PAGE_SHIFT, | |
1051 | new_mem->mm_node); | |
1052 | else | |
1053 | nouveau_vm_map_sg(vma, 0, new_mem-> | |
1054 | num_pages << PAGE_SHIFT, | |
1055 | new_mem->mm_node); | |
fd2871af BS |
1056 | } else { |
1057 | nouveau_vm_unmap(vma); | |
1058 | } | |
a4154bbf BS |
1059 | } |
1060 | } | |
1061 | ||
6ee73861 | 1062 | static int |
a0af9add FJ |
1063 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
1064 | struct nouveau_tile_reg **new_tile) | |
6ee73861 BS |
1065 | { |
1066 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
6ee73861 | 1067 | struct drm_device *dev = dev_priv->dev; |
a0af9add | 1068 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
a4154bbf | 1069 | u64 offset = new_mem->start << PAGE_SHIFT; |
6ee73861 | 1070 | |
a4154bbf BS |
1071 | *new_tile = NULL; |
1072 | if (new_mem->mem_type != TTM_PL_VRAM) | |
a0af9add | 1073 | return 0; |
a0af9add | 1074 | |
a4154bbf | 1075 | if (dev_priv->card_type >= NV_10) { |
a0af9add | 1076 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, |
a5cf68b0 FJ |
1077 | nvbo->tile_mode, |
1078 | nvbo->tile_flags); | |
6ee73861 BS |
1079 | } |
1080 | ||
a0af9add FJ |
1081 | return 0; |
1082 | } | |
1083 | ||
1084 | static void | |
1085 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
1086 | struct nouveau_tile_reg *new_tile, | |
1087 | struct nouveau_tile_reg **old_tile) | |
1088 | { | |
1089 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
1090 | struct drm_device *dev = dev_priv->dev; | |
1091 | ||
a4154bbf BS |
1092 | nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); |
1093 | *old_tile = new_tile; | |
a0af9add FJ |
1094 | } |
1095 | ||
1096 | static int | |
1097 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
1098 | bool no_wait_reserve, bool no_wait_gpu, |
1099 | struct ttm_mem_reg *new_mem) | |
a0af9add FJ |
1100 | { |
1101 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
1102 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
1103 | struct ttm_mem_reg *old_mem = &bo->mem; | |
1104 | struct nouveau_tile_reg *new_tile = NULL; | |
1105 | int ret = 0; | |
1106 | ||
a4154bbf BS |
1107 | if (dev_priv->card_type < NV_50) { |
1108 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | |
1109 | if (ret) | |
1110 | return ret; | |
1111 | } | |
a0af9add | 1112 | |
a0af9add | 1113 | /* Fake bo copy. */ |
6ee73861 BS |
1114 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
1115 | BUG_ON(bo->mem.mm_node != NULL); | |
1116 | bo->mem = *new_mem; | |
1117 | new_mem->mm_node = NULL; | |
a0af9add | 1118 | goto out; |
6ee73861 BS |
1119 | } |
1120 | ||
d1b167e1 BS |
1121 | /* CPU copy if we have no accelerated method available */ |
1122 | if (!dev_priv->ttm.move) { | |
b8a6a804 BS |
1123 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
1124 | goto out; | |
1125 | } | |
1126 | ||
a0af9add FJ |
1127 | /* Hardware assisted copy. */ |
1128 | if (new_mem->mem_type == TTM_PL_SYSTEM) | |
9d87fa21 | 1129 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 1130 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
9d87fa21 | 1131 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 1132 | else |
9d87fa21 | 1133 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 1134 | |
a0af9add FJ |
1135 | if (!ret) |
1136 | goto out; | |
1137 | ||
1138 | /* Fallback to software copy. */ | |
9d87fa21 | 1139 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add FJ |
1140 | |
1141 | out: | |
a4154bbf BS |
1142 | if (dev_priv->card_type < NV_50) { |
1143 | if (ret) | |
1144 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
1145 | else | |
1146 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
1147 | } | |
a0af9add FJ |
1148 | |
1149 | return ret; | |
6ee73861 BS |
1150 | } |
1151 | ||
1152 | static int | |
1153 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
1154 | { | |
1155 | return 0; | |
1156 | } | |
1157 | ||
f32f02fd JG |
1158 | static int |
1159 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1160 | { | |
1161 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
1162 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
1163 | struct drm_device *dev = dev_priv->dev; | |
f869ef88 | 1164 | int ret; |
f32f02fd JG |
1165 | |
1166 | mem->bus.addr = NULL; | |
1167 | mem->bus.offset = 0; | |
1168 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
1169 | mem->bus.base = 0; | |
1170 | mem->bus.is_iomem = false; | |
1171 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
1172 | return -EINVAL; | |
1173 | switch (mem->mem_type) { | |
1174 | case TTM_PL_SYSTEM: | |
1175 | /* System memory */ | |
1176 | return 0; | |
1177 | case TTM_PL_TT: | |
1178 | #if __OS_HAS_AGP | |
1179 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | |
d961db75 | 1180 | mem->bus.offset = mem->start << PAGE_SHIFT; |
f32f02fd JG |
1181 | mem->bus.base = dev_priv->gart_info.aper_base; |
1182 | mem->bus.is_iomem = true; | |
1183 | } | |
1184 | #endif | |
1185 | break; | |
1186 | case TTM_PL_VRAM: | |
f869ef88 | 1187 | { |
d5f42394 | 1188 | struct nouveau_mem *node = mem->mm_node; |
8984e046 | 1189 | u8 page_shift; |
f869ef88 BS |
1190 | |
1191 | if (!dev_priv->bar1_vm) { | |
1192 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
1193 | mem->bus.base = pci_resource_start(dev->pdev, 1); | |
1194 | mem->bus.is_iomem = true; | |
1195 | break; | |
1196 | } | |
1197 | ||
2e9733ff | 1198 | if (dev_priv->card_type >= NV_C0) |
d5f42394 | 1199 | page_shift = node->page_shift; |
8984e046 BS |
1200 | else |
1201 | page_shift = 12; | |
1202 | ||
4c74eb7f | 1203 | ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, |
8984e046 | 1204 | page_shift, NV_MEM_ACCESS_RW, |
d5f42394 | 1205 | &node->bar_vma); |
f869ef88 BS |
1206 | if (ret) |
1207 | return ret; | |
1208 | ||
d5f42394 | 1209 | nouveau_vm_map(&node->bar_vma, node); |
f869ef88 | 1210 | if (ret) { |
d5f42394 | 1211 | nouveau_vm_put(&node->bar_vma); |
f869ef88 BS |
1212 | return ret; |
1213 | } | |
1214 | ||
d5f42394 | 1215 | mem->bus.offset = node->bar_vma.offset; |
8984e046 BS |
1216 | if (dev_priv->card_type == NV_50) /*XXX*/ |
1217 | mem->bus.offset -= 0x0020000000ULL; | |
01d73a69 | 1218 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
f32f02fd | 1219 | mem->bus.is_iomem = true; |
f869ef88 | 1220 | } |
f32f02fd JG |
1221 | break; |
1222 | default: | |
1223 | return -EINVAL; | |
1224 | } | |
1225 | return 0; | |
1226 | } | |
1227 | ||
1228 | static void | |
1229 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1230 | { | |
f869ef88 | 1231 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); |
d5f42394 | 1232 | struct nouveau_mem *node = mem->mm_node; |
f869ef88 BS |
1233 | |
1234 | if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) | |
1235 | return; | |
1236 | ||
d5f42394 | 1237 | if (!node->bar_vma.node) |
f869ef88 BS |
1238 | return; |
1239 | ||
d5f42394 BS |
1240 | nouveau_vm_unmap(&node->bar_vma); |
1241 | nouveau_vm_put(&node->bar_vma); | |
f32f02fd JG |
1242 | } |
1243 | ||
1244 | static int | |
1245 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
1246 | { | |
e1429b4c BS |
1247 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
1248 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
1249 | ||
1250 | /* as long as the bo isn't in vram, and isn't tiled, we've got | |
1251 | * nothing to do here. | |
1252 | */ | |
1253 | if (bo->mem.mem_type != TTM_PL_VRAM) { | |
f13b3263 FJ |
1254 | if (dev_priv->card_type < NV_50 || |
1255 | !nouveau_bo_tile_layout(nvbo)) | |
e1429b4c BS |
1256 | return 0; |
1257 | } | |
1258 | ||
1259 | /* make sure bo is in mappable vram */ | |
d961db75 | 1260 | if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) |
e1429b4c BS |
1261 | return 0; |
1262 | ||
1263 | ||
1264 | nvbo->placement.fpfn = 0; | |
1265 | nvbo->placement.lpfn = dev_priv->fb_mappable_pages; | |
c284815d | 1266 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); |
7a45d764 | 1267 | return nouveau_bo_validate(nvbo, false, true, false); |
f32f02fd JG |
1268 | } |
1269 | ||
3230cfc3 KRW |
1270 | static int |
1271 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |
1272 | { | |
8e7e7052 | 1273 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
3230cfc3 KRW |
1274 | struct drm_nouveau_private *dev_priv; |
1275 | struct drm_device *dev; | |
1276 | unsigned i; | |
1277 | int r; | |
22b33e8e | 1278 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
3230cfc3 KRW |
1279 | |
1280 | if (ttm->state != tt_unpopulated) | |
1281 | return 0; | |
1282 | ||
22b33e8e DA |
1283 | if (slave && ttm->sg) { |
1284 | /* make userspace faulting work */ | |
1285 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
1286 | ttm_dma->dma_address, ttm->num_pages); | |
1287 | ttm->state = tt_unbound; | |
1288 | return 0; | |
1289 | } | |
1290 | ||
3230cfc3 KRW |
1291 | dev_priv = nouveau_bdev(ttm->bdev); |
1292 | dev = dev_priv->dev; | |
1293 | ||
dea7e0ac JG |
1294 | #if __OS_HAS_AGP |
1295 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | |
1296 | return ttm_agp_tt_populate(ttm); | |
1297 | } | |
1298 | #endif | |
1299 | ||
3230cfc3 KRW |
1300 | #ifdef CONFIG_SWIOTLB |
1301 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 1302 | return ttm_dma_populate((void *)ttm, dev->dev); |
3230cfc3 KRW |
1303 | } |
1304 | #endif | |
1305 | ||
1306 | r = ttm_pool_populate(ttm); | |
1307 | if (r) { | |
1308 | return r; | |
1309 | } | |
1310 | ||
1311 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 | 1312 | ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], |
3230cfc3 KRW |
1313 | 0, PAGE_SIZE, |
1314 | PCI_DMA_BIDIRECTIONAL); | |
8e7e7052 | 1315 | if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { |
3230cfc3 | 1316 | while (--i) { |
8e7e7052 | 1317 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], |
3230cfc3 | 1318 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 1319 | ttm_dma->dma_address[i] = 0; |
3230cfc3 KRW |
1320 | } |
1321 | ttm_pool_unpopulate(ttm); | |
1322 | return -EFAULT; | |
1323 | } | |
1324 | } | |
1325 | return 0; | |
1326 | } | |
1327 | ||
1328 | static void | |
1329 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
1330 | { | |
8e7e7052 | 1331 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
3230cfc3 KRW |
1332 | struct drm_nouveau_private *dev_priv; |
1333 | struct drm_device *dev; | |
1334 | unsigned i; | |
22b33e8e DA |
1335 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
1336 | ||
1337 | if (slave) | |
1338 | return; | |
3230cfc3 KRW |
1339 | |
1340 | dev_priv = nouveau_bdev(ttm->bdev); | |
1341 | dev = dev_priv->dev; | |
1342 | ||
dea7e0ac JG |
1343 | #if __OS_HAS_AGP |
1344 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | |
1345 | ttm_agp_tt_unpopulate(ttm); | |
1346 | return; | |
1347 | } | |
1348 | #endif | |
1349 | ||
3230cfc3 KRW |
1350 | #ifdef CONFIG_SWIOTLB |
1351 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 1352 | ttm_dma_unpopulate((void *)ttm, dev->dev); |
3230cfc3 KRW |
1353 | return; |
1354 | } | |
1355 | #endif | |
1356 | ||
1357 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
1358 | if (ttm_dma->dma_address[i]) { |
1359 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], | |
3230cfc3 KRW |
1360 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
1361 | } | |
1362 | } | |
1363 | ||
1364 | ttm_pool_unpopulate(ttm); | |
1365 | } | |
1366 | ||
875ac34a BS |
1367 | void |
1368 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | |
1369 | { | |
1370 | struct nouveau_fence *old_fence = NULL; | |
1371 | ||
1372 | if (likely(fence)) | |
1373 | nouveau_fence_ref(fence); | |
1374 | ||
1375 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
1376 | old_fence = nvbo->bo.sync_obj; | |
1377 | nvbo->bo.sync_obj = fence; | |
1378 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
1379 | ||
1380 | nouveau_fence_unref(&old_fence); | |
1381 | } | |
1382 | ||
1383 | static void | |
1384 | nouveau_bo_fence_unref(void **sync_obj) | |
1385 | { | |
1386 | nouveau_fence_unref((struct nouveau_fence **)sync_obj); | |
1387 | } | |
1388 | ||
1389 | static void * | |
1390 | nouveau_bo_fence_ref(void *sync_obj) | |
1391 | { | |
1392 | return nouveau_fence_ref(sync_obj); | |
1393 | } | |
1394 | ||
1395 | static bool | |
1396 | nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg) | |
1397 | { | |
d375e7d5 | 1398 | return nouveau_fence_done(sync_obj); |
875ac34a BS |
1399 | } |
1400 | ||
1401 | static int | |
1402 | nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) | |
1403 | { | |
1404 | return nouveau_fence_wait(sync_obj, lazy, intr); | |
1405 | } | |
1406 | ||
1407 | static int | |
1408 | nouveau_bo_fence_flush(void *sync_obj, void *sync_arg) | |
1409 | { | |
1410 | return 0; | |
1411 | } | |
1412 | ||
6ee73861 | 1413 | struct ttm_bo_driver nouveau_bo_driver = { |
649bf3ca | 1414 | .ttm_tt_create = &nouveau_ttm_tt_create, |
3230cfc3 KRW |
1415 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
1416 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, | |
6ee73861 BS |
1417 | .invalidate_caches = nouveau_bo_invalidate_caches, |
1418 | .init_mem_type = nouveau_bo_init_mem_type, | |
1419 | .evict_flags = nouveau_bo_evict_flags, | |
a4154bbf | 1420 | .move_notify = nouveau_bo_move_ntfy, |
6ee73861 BS |
1421 | .move = nouveau_bo_move, |
1422 | .verify_access = nouveau_bo_verify_access, | |
875ac34a BS |
1423 | .sync_obj_signaled = nouveau_bo_fence_signalled, |
1424 | .sync_obj_wait = nouveau_bo_fence_wait, | |
1425 | .sync_obj_flush = nouveau_bo_fence_flush, | |
1426 | .sync_obj_unref = nouveau_bo_fence_unref, | |
1427 | .sync_obj_ref = nouveau_bo_fence_ref, | |
f32f02fd JG |
1428 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
1429 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | |
1430 | .io_mem_free = &nouveau_ttm_io_mem_free, | |
6ee73861 BS |
1431 | }; |
1432 | ||
fd2871af BS |
1433 | struct nouveau_vma * |
1434 | nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) | |
1435 | { | |
1436 | struct nouveau_vma *vma; | |
1437 | list_for_each_entry(vma, &nvbo->vma_list, head) { | |
1438 | if (vma->vm == vm) | |
1439 | return vma; | |
1440 | } | |
1441 | ||
1442 | return NULL; | |
1443 | } | |
1444 | ||
1445 | int | |
1446 | nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |
1447 | struct nouveau_vma *vma) | |
1448 | { | |
1449 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | |
1450 | struct nouveau_mem *node = nvbo->bo.mem.mm_node; | |
1451 | int ret; | |
1452 | ||
1453 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, | |
1454 | NV_MEM_ACCESS_RW, vma); | |
1455 | if (ret) | |
1456 | return ret; | |
1457 | ||
1458 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | |
1459 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); | |
22b33e8e DA |
1460 | else if (nvbo->bo.mem.mem_type == TTM_PL_TT) { |
1461 | if (node->sg) | |
1462 | nouveau_vm_map_sg_table(vma, 0, size, node); | |
1463 | else | |
1464 | nouveau_vm_map_sg(vma, 0, size, node); | |
1465 | } | |
fd2871af BS |
1466 | |
1467 | list_add_tail(&vma->head, &nvbo->vma_list); | |
2fd3db6f | 1468 | vma->refcount = 1; |
fd2871af BS |
1469 | return 0; |
1470 | } | |
1471 | ||
1472 | void | |
1473 | nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |
1474 | { | |
1475 | if (vma->node) { | |
1476 | if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { | |
1477 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
1717c0e2 | 1478 | ttm_bo_wait(&nvbo->bo, false, false, false); |
fd2871af BS |
1479 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
1480 | nouveau_vm_unmap(vma); | |
1481 | } | |
1482 | ||
1483 | nouveau_vm_put(vma); | |
1484 | list_del(&vma->head); | |
1485 | } | |
1486 | } |