drm/nouveau: create userspace clients as subclients
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
fdb751ef 30#include <linux/dma-mapping.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
4dc28134 33#include "nouveau_drv.h"
6ee73861 34#include "nouveau_dma.h"
d375e7d5 35#include "nouveau_fence.h"
6ee73861 36
ebb945a9
BS
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
a510604d 40
bc9e7b9a
BS
41/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
ebb945a9
BS
46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 48{
77145f1c 49 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 50 int i = reg - drm->tile.reg;
c85ee6ca
BS
51 struct nvkm_device *device = nvxx_device(&drm->device);
52 struct nvkm_fb *fb = device->fb;
b1e4553c 53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
bc9e7b9a 54
ebb945a9 55 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
56
57 if (tile->pitch)
03c8952f 58 nvkm_fb_tile_fini(fb, i, tile);
bc9e7b9a
BS
59
60 if (pitch)
03c8952f 61 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
bc9e7b9a 62
03c8952f 63 nvkm_fb_tile_prog(fb, i, tile);
bc9e7b9a
BS
64}
65
ebb945a9 66static struct nouveau_drm_tile *
bc9e7b9a
BS
67nv10_bo_get_tile_region(struct drm_device *dev, int i)
68{
77145f1c 69 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 70 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 71
ebb945a9 72 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
73
74 if (!tile->used &&
75 (!tile->fence || nouveau_fence_done(tile->fence)))
76 tile->used = true;
77 else
78 tile = NULL;
79
ebb945a9 80 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
81 return tile;
82}
83
84static void
ebb945a9 85nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
f54d1867 86 struct dma_fence *fence)
bc9e7b9a 87{
77145f1c 88 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
89
90 if (tile) {
ebb945a9 91 spin_lock(&drm->tile.lock);
f54d1867 92 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
bc9e7b9a 93 tile->used = false;
ebb945a9 94 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
95 }
96}
97
ebb945a9
BS
98static struct nouveau_drm_tile *
99nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags)
bc9e7b9a 101{
77145f1c 102 struct nouveau_drm *drm = nouveau_drm(dev);
b1e4553c 103 struct nvkm_fb *fb = nvxx_fb(&drm->device);
ebb945a9 104 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
105 int i;
106
b1e4553c 107 for (i = 0; i < fb->tile.regions; i++) {
bc9e7b9a
BS
108 tile = nv10_bo_get_tile_region(dev, i);
109
110 if (pitch && !found) {
111 found = tile;
112 continue;
113
b1e4553c 114 } else if (tile && fb->tile.region[i].pitch) {
bc9e7b9a
BS
115 /* Kill an unused tile region. */
116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
117 }
118
119 nv10_bo_put_tile_region(dev, tile, NULL);
120 }
121
122 if (found)
123 nv10_bo_update_tile_region(dev, found, addr, size,
124 pitch, flags);
125 return found;
126}
127
6ee73861
BS
128static void
129nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
130{
ebb945a9
BS
131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
6ee73861
BS
133 struct nouveau_bo *nvbo = nouveau_bo(bo);
134
55fb74ad 135 if (unlikely(nvbo->gem.filp))
6ee73861 136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
4f385599 137 WARN_ON(nvbo->pin_refcnt > 0);
bc9e7b9a 138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
139 kfree(nvbo);
140}
141
a0af9add 142static void
db5c8e29 143nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 144 int *align, int *size)
a0af9add 145{
ebb945a9 146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
967e7bde 147 struct nvif_device *device = &drm->device;
a0af9add 148
967e7bde 149 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
bfd83aca 150 if (nvbo->tile_mode) {
967e7bde 151 if (device->info.chipset >= 0x40) {
a0af9add 152 *align = 65536;
bfd83aca 153 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 154
967e7bde 155 } else if (device->info.chipset >= 0x30) {
a0af9add 156 *align = 32768;
bfd83aca 157 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 158
967e7bde 159 } else if (device->info.chipset >= 0x20) {
a0af9add 160 *align = 16384;
bfd83aca 161 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 162
967e7bde 163 } else if (device->info.chipset >= 0x10) {
a0af9add 164 *align = 16384;
bfd83aca 165 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
166 }
167 }
bfd83aca 168 } else {
f91bac5b
BS
169 *size = roundup(*size, (1 << nvbo->page_shift));
170 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
171 }
172
1c7059e4 173 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
174}
175
6ee73861 176int
bab7cc18 177nouveau_bo_new(struct nouveau_cli *cli, int size, int align,
7375c95b 178 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
bb6178b0 179 struct sg_table *sg, struct reservation_object *robj,
7375c95b 180 struct nouveau_bo **pnvbo)
6ee73861 181{
bab7cc18 182 struct nouveau_drm *drm = nouveau_drm(cli->dev);
6ee73861 183 struct nouveau_bo *nvbo;
57de4ba9 184 size_t acc_size;
f91bac5b 185 int ret;
22b33e8e 186 int type = ttm_bo_type_device;
35095f75
ML
187 int lpg_shift = 12;
188 int max_size;
189
3ee6f5b5 190 if (drm->client.vm)
5ce3bf3c 191 lpg_shift = drm->client.vm->mmu->lpg_shift;
35095f75 192 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
0108bc80
ML
193
194 if (size <= 0 || size > max_size) {
fa2bade9 195 NV_WARN(drm, "skipped size %x\n", (u32)size);
0108bc80
ML
196 return -EINVAL;
197 }
22b33e8e
DA
198
199 if (sg)
200 type = ttm_bo_type_sg;
6ee73861
BS
201
202 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
203 if (!nvbo)
204 return -ENOMEM;
205 INIT_LIST_HEAD(&nvbo->head);
206 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 207 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
208 nvbo->tile_mode = tile_mode;
209 nvbo->tile_flags = tile_flags;
ebb945a9 210 nvbo->bo.bdev = &drm->ttm.bdev;
bab7cc18 211 nvbo->cli = cli;
6ee73861 212
bad3d80f
KH
213 if (!nvxx_device(&drm->device)->func->cpu_coherent)
214 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
c3a0c771 215
f91bac5b 216 nvbo->page_shift = 12;
3ee6f5b5 217 if (drm->client.vm) {
f91bac5b 218 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
5ce3bf3c 219 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
f91bac5b
BS
220 }
221
222 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
223 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
224 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 225
ebb945a9 226 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
227 sizeof(struct nouveau_bo));
228
ebb945a9 229 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 230 type, &nvbo->placement,
0b91c4a1 231 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
bb6178b0 232 robj, nouveau_bo_del_ttm);
6ee73861
BS
233 if (ret) {
234 /* ttm will call nouveau_bo_del_ttm if it fails.. */
235 return ret;
236 }
237
6ee73861
BS
238 *pnvbo = nvbo;
239 return 0;
240}
241
78ad0f7b 242static void
f1217ed0 243set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
78ad0f7b
FJ
244{
245 *n = 0;
246
247 if (type & TTM_PL_FLAG_VRAM)
f1217ed0 248 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
78ad0f7b 249 if (type & TTM_PL_FLAG_TT)
f1217ed0 250 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
78ad0f7b 251 if (type & TTM_PL_FLAG_SYSTEM)
f1217ed0 252 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
78ad0f7b
FJ
253}
254
699ddfd9
FJ
255static void
256set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
257{
ebb945a9 258 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
f392ec4b 259 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
f1217ed0 260 unsigned i, fpfn, lpfn;
699ddfd9 261
967e7bde 262 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
812f219a 263 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 264 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
265 /*
266 * Make sure that the color and depth buffers are handled
267 * by independent memory controller units. Up to a 9x
268 * speed up when alpha-blending and depth-test are enabled
269 * at the same time.
270 */
699ddfd9 271 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
f1217ed0
CK
272 fpfn = vram_pages / 2;
273 lpfn = ~0;
699ddfd9 274 } else {
f1217ed0
CK
275 fpfn = 0;
276 lpfn = vram_pages / 2;
277 }
278 for (i = 0; i < nvbo->placement.num_placement; ++i) {
279 nvbo->placements[i].fpfn = fpfn;
280 nvbo->placements[i].lpfn = lpfn;
281 }
282 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
283 nvbo->busy_placements[i].fpfn = fpfn;
284 nvbo->busy_placements[i].lpfn = lpfn;
699ddfd9
FJ
285 }
286 }
287}
288
6ee73861 289void
78ad0f7b 290nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 291{
78ad0f7b 292 struct ttm_placement *pl = &nvbo->placement;
c3a0c771
AC
293 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
294 TTM_PL_MASK_CACHING) |
295 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
78ad0f7b
FJ
296
297 pl->placement = nvbo->placements;
298 set_placement_list(nvbo->placements, &pl->num_placement,
299 type, flags);
300
301 pl->busy_placement = nvbo->busy_placements;
302 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
303 type | busy, flags);
699ddfd9
FJ
304
305 set_placement_range(nvbo, type);
6ee73861
BS
306}
307
308int
ad76b3f7 309nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
6ee73861 310{
ebb945a9 311 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 312 struct ttm_buffer_object *bo = &nvbo->bo;
ad76b3f7 313 bool force = false, evict = false;
78ad0f7b 314 int ret;
6ee73861 315
dfd5e50e 316 ret = ttm_bo_reserve(bo, false, false, NULL);
0ae6d7bc 317 if (ret)
50ab2e52 318 return ret;
0ae6d7bc 319
ad76b3f7
BS
320 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
321 memtype == TTM_PL_FLAG_VRAM && contig) {
322 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
323 if (bo->mem.mem_type == TTM_PL_VRAM) {
be83cd4e 324 struct nvkm_mem *mem = bo->mem.mm_node;
134fdc1a 325 if (!nvkm_mm_contiguous(mem->mem))
ad76b3f7
BS
326 evict = true;
327 }
328 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
329 force = true;
330 }
6ee73861
BS
331 }
332
ad76b3f7
BS
333 if (nvbo->pin_refcnt) {
334 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
335 NV_ERROR(drm, "bo %p pinned elsewhere: "
336 "0x%08x vs 0x%08x\n", bo,
337 1 << bo->mem.mem_type, memtype);
338 ret = -EBUSY;
339 }
340 nvbo->pin_refcnt++;
50ab2e52 341 goto out;
ad76b3f7
BS
342 }
343
344 if (evict) {
345 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
346 ret = nouveau_bo_validate(nvbo, false, false);
347 if (ret)
348 goto out;
349 }
6ee73861 350
ad76b3f7 351 nvbo->pin_refcnt++;
78ad0f7b 352 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 353
50ab2e52
BS
354 /* drop pin_refcnt temporarily, so we don't trip the assertion
355 * in nouveau_bo_move() that makes sure we're not trying to
356 * move a pinned buffer
357 */
358 nvbo->pin_refcnt--;
97a875cb 359 ret = nouveau_bo_validate(nvbo, false, false);
6aac6ced
BS
360 if (ret)
361 goto out;
50ab2e52 362 nvbo->pin_refcnt++;
6aac6ced
BS
363
364 switch (bo->mem.mem_type) {
365 case TTM_PL_VRAM:
366 drm->gem.vram_available -= bo->mem.size;
367 break;
368 case TTM_PL_TT:
369 drm->gem.gart_available -= bo->mem.size;
370 break;
371 default:
372 break;
6ee73861 373 }
5be5a15a 374
6ee73861 375out:
ad76b3f7
BS
376 if (force && ret)
377 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
0ae6d7bc 378 ttm_bo_unreserve(bo);
6ee73861
BS
379 return ret;
380}
381
382int
383nouveau_bo_unpin(struct nouveau_bo *nvbo)
384{
ebb945a9 385 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 386 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 387 int ret, ref;
6ee73861 388
dfd5e50e 389 ret = ttm_bo_reserve(bo, false, false, NULL);
6ee73861
BS
390 if (ret)
391 return ret;
392
4f385599
ML
393 ref = --nvbo->pin_refcnt;
394 WARN_ON_ONCE(ref < 0);
395 if (ref)
0ae6d7bc
DV
396 goto out;
397
78ad0f7b 398 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 399
97a875cb 400 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
401 if (ret == 0) {
402 switch (bo->mem.mem_type) {
403 case TTM_PL_VRAM:
ebb945a9 404 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
405 break;
406 case TTM_PL_TT:
ebb945a9 407 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
408 break;
409 default:
410 break;
411 }
412 }
413
0ae6d7bc 414out:
6ee73861
BS
415 ttm_bo_unreserve(bo);
416 return ret;
417}
418
419int
420nouveau_bo_map(struct nouveau_bo *nvbo)
421{
422 int ret;
423
dfd5e50e 424 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
6ee73861
BS
425 if (ret)
426 return ret;
427
36a471ba 428 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
c3a0c771 429
6ee73861
BS
430 ttm_bo_unreserve(&nvbo->bo);
431 return ret;
432}
433
434void
435nouveau_bo_unmap(struct nouveau_bo *nvbo)
436{
c3a0c771
AC
437 if (!nvbo)
438 return;
439
36a471ba 440 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
441}
442
b22870ba
AC
443void
444nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
445{
446 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
be83cd4e 447 struct nvkm_device *device = nvxx_device(&drm->device);
b22870ba
AC
448 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
449 int i;
450
451 if (!ttm_dma)
452 return;
453
454 /* Don't waste time looping if the object is coherent */
455 if (nvbo->force_coherent)
456 return;
457
458 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
26c9e8ef
BS
459 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
460 PAGE_SIZE, DMA_TO_DEVICE);
b22870ba
AC
461}
462
463void
464nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
465{
466 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
be83cd4e 467 struct nvkm_device *device = nvxx_device(&drm->device);
b22870ba
AC
468 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
469 int i;
470
471 if (!ttm_dma)
472 return;
473
474 /* Don't waste time looping if the object is coherent */
475 if (nvbo->force_coherent)
476 return;
477
478 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
26c9e8ef
BS
479 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
480 PAGE_SIZE, DMA_FROM_DEVICE);
b22870ba
AC
481}
482
7a45d764
BS
483int
484nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 485 bool no_wait_gpu)
7a45d764
BS
486{
487 int ret;
488
97a875cb
ML
489 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
490 interruptible, no_wait_gpu);
7a45d764
BS
491 if (ret)
492 return ret;
493
b22870ba
AC
494 nouveau_bo_sync_for_device(nvbo);
495
7a45d764
BS
496 return 0;
497}
498
6ee73861
BS
499void
500nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
501{
502 bool is_iomem;
503 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771 504
36a471ba 505 mem += index;
c3a0c771 506
6ee73861
BS
507 if (is_iomem)
508 iowrite16_native(val, (void __force __iomem *)mem);
509 else
510 *mem = val;
511}
512
513u32
514nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
515{
516 bool is_iomem;
517 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771 518
36a471ba 519 mem += index;
c3a0c771 520
6ee73861
BS
521 if (is_iomem)
522 return ioread32_native((void __force __iomem *)mem);
523 else
524 return *mem;
525}
526
527void
528nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
529{
530 bool is_iomem;
531 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771 532
36a471ba 533 mem += index;
c3a0c771 534
6ee73861
BS
535 if (is_iomem)
536 iowrite32_native(val, (void __force __iomem *)mem);
537 else
538 *mem = val;
539}
540
649bf3ca 541static struct ttm_tt *
ebb945a9
BS
542nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
543 uint32_t page_flags, struct page *dummy_read)
6ee73861 544{
a7fb8a23 545#if IS_ENABLED(CONFIG_AGP)
ebb945a9 546 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861 547
340b0e7c
BS
548 if (drm->agp.bridge) {
549 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
ebb945a9 550 page_flags, dummy_read);
6ee73861 551 }
df1b4b91 552#endif
6ee73861 553
ebb945a9 554 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
555}
556
557static int
558nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
559{
560 /* We'll do this from user space. */
561 return 0;
562}
563
564static int
565nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
566 struct ttm_mem_type_manager *man)
567{
ebb945a9 568 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
569
570 switch (type) {
571 case TTM_PL_SYSTEM:
572 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
573 man->available_caching = TTM_PL_MASK_CACHING;
574 man->default_caching = TTM_PL_FLAG_CACHED;
575 break;
576 case TTM_PL_VRAM:
e2a4e78c
AC
577 man->flags = TTM_MEMTYPE_FLAG_FIXED |
578 TTM_MEMTYPE_FLAG_MAPPABLE;
579 man->available_caching = TTM_PL_FLAG_UNCACHED |
580 TTM_PL_FLAG_WC;
581 man->default_caching = TTM_PL_FLAG_WC;
582
967e7bde 583 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
e2a4e78c 584 /* Some BARs do not support being ioremapped WC */
989aa5b7 585 if (nvxx_bar(&drm->device)->iomap_uncached) {
e2a4e78c
AC
586 man->available_caching = TTM_PL_FLAG_UNCACHED;
587 man->default_caching = TTM_PL_FLAG_UNCACHED;
588 }
589
573a2a37 590 man->func = &nouveau_vram_manager;
f869ef88
BS
591 man->io_reserve_fastpath = false;
592 man->use_io_reserve_lru = true;
593 } else {
573a2a37 594 man->func = &ttm_bo_manager_func;
f869ef88 595 }
6ee73861
BS
596 break;
597 case TTM_PL_TT:
967e7bde 598 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
26c0c9e3 599 man->func = &nouveau_gart_manager;
3863c9bc 600 else
340b0e7c 601 if (!drm->agp.bridge)
3863c9bc 602 man->func = &nv04_gart_manager;
26c0c9e3
BS
603 else
604 man->func = &ttm_bo_manager_func;
ebb945a9 605
340b0e7c 606 if (drm->agp.bridge) {
f32f02fd 607 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
608 man->available_caching = TTM_PL_FLAG_UNCACHED |
609 TTM_PL_FLAG_WC;
610 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 611 } else {
6ee73861
BS
612 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
613 TTM_MEMTYPE_FLAG_CMA;
614 man->available_caching = TTM_PL_MASK_CACHING;
615 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 616 }
ebb945a9 617
6ee73861
BS
618 break;
619 default:
6ee73861
BS
620 return -EINVAL;
621 }
622 return 0;
623}
624
625static void
626nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
627{
628 struct nouveau_bo *nvbo = nouveau_bo(bo);
629
630 switch (bo->mem.mem_type) {
22fbd538 631 case TTM_PL_VRAM:
78ad0f7b
FJ
632 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
633 TTM_PL_FLAG_SYSTEM);
22fbd538 634 break;
6ee73861 635 default:
78ad0f7b 636 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
637 break;
638 }
22fbd538
FJ
639
640 *pl = nvbo->placement;
6ee73861
BS
641}
642
643
49981046
BS
644static int
645nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
646{
647 int ret = RING_SPACE(chan, 2);
648 if (ret == 0) {
649 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
00fc6f6f 650 OUT_RING (chan, handle & 0x0000ffff);
49981046
BS
651 FIRE_RING (chan);
652 }
653 return ret;
654}
655
c6b7e895
BS
656static int
657nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
658 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
659{
be83cd4e 660 struct nvkm_mem *node = old_mem->mm_node;
c6b7e895
BS
661 int ret = RING_SPACE(chan, 10);
662 if (ret == 0) {
6d597027 663 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
664 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
665 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
666 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
667 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
668 OUT_RING (chan, PAGE_SIZE);
669 OUT_RING (chan, PAGE_SIZE);
670 OUT_RING (chan, PAGE_SIZE);
671 OUT_RING (chan, new_mem->num_pages);
6d597027 672 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
673 }
674 return ret;
675}
676
d1b167e1
BS
677static int
678nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
679{
680 int ret = RING_SPACE(chan, 2);
681 if (ret == 0) {
682 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
683 OUT_RING (chan, handle);
684 }
685 return ret;
686}
687
1a46098e
BS
688static int
689nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
690 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
691{
be83cd4e 692 struct nvkm_mem *node = old_mem->mm_node;
1a46098e
BS
693 u64 src_offset = node->vma[0].offset;
694 u64 dst_offset = node->vma[1].offset;
695 u32 page_count = new_mem->num_pages;
696 int ret;
697
698 page_count = new_mem->num_pages;
699 while (page_count) {
700 int line_count = (page_count > 8191) ? 8191 : page_count;
701
702 ret = RING_SPACE(chan, 11);
703 if (ret)
704 return ret;
705
706 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
707 OUT_RING (chan, upper_32_bits(src_offset));
708 OUT_RING (chan, lower_32_bits(src_offset));
709 OUT_RING (chan, upper_32_bits(dst_offset));
710 OUT_RING (chan, lower_32_bits(dst_offset));
711 OUT_RING (chan, PAGE_SIZE);
712 OUT_RING (chan, PAGE_SIZE);
713 OUT_RING (chan, PAGE_SIZE);
714 OUT_RING (chan, line_count);
715 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
716 OUT_RING (chan, 0x00000110);
717
718 page_count -= line_count;
719 src_offset += (PAGE_SIZE * line_count);
720 dst_offset += (PAGE_SIZE * line_count);
721 }
722
723 return 0;
724}
725
183720b8
BS
726static int
727nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
728 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
729{
be83cd4e 730 struct nvkm_mem *node = old_mem->mm_node;
d2f96666
BS
731 u64 src_offset = node->vma[0].offset;
732 u64 dst_offset = node->vma[1].offset;
183720b8
BS
733 u32 page_count = new_mem->num_pages;
734 int ret;
735
183720b8
BS
736 page_count = new_mem->num_pages;
737 while (page_count) {
738 int line_count = (page_count > 2047) ? 2047 : page_count;
739
740 ret = RING_SPACE(chan, 12);
741 if (ret)
742 return ret;
743
d1b167e1 744 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
745 OUT_RING (chan, upper_32_bits(dst_offset));
746 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 747 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
748 OUT_RING (chan, upper_32_bits(src_offset));
749 OUT_RING (chan, lower_32_bits(src_offset));
750 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
751 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
752 OUT_RING (chan, PAGE_SIZE); /* line_length */
753 OUT_RING (chan, line_count);
d1b167e1 754 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
755 OUT_RING (chan, 0x00100110);
756
757 page_count -= line_count;
758 src_offset += (PAGE_SIZE * line_count);
759 dst_offset += (PAGE_SIZE * line_count);
760 }
761
762 return 0;
763}
764
fdf53241
BS
765static int
766nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
767 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
768{
be83cd4e 769 struct nvkm_mem *node = old_mem->mm_node;
fdf53241
BS
770 u64 src_offset = node->vma[0].offset;
771 u64 dst_offset = node->vma[1].offset;
772 u32 page_count = new_mem->num_pages;
773 int ret;
774
775 page_count = new_mem->num_pages;
776 while (page_count) {
777 int line_count = (page_count > 8191) ? 8191 : page_count;
778
779 ret = RING_SPACE(chan, 11);
780 if (ret)
781 return ret;
782
783 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
784 OUT_RING (chan, upper_32_bits(src_offset));
785 OUT_RING (chan, lower_32_bits(src_offset));
786 OUT_RING (chan, upper_32_bits(dst_offset));
787 OUT_RING (chan, lower_32_bits(dst_offset));
788 OUT_RING (chan, PAGE_SIZE);
789 OUT_RING (chan, PAGE_SIZE);
790 OUT_RING (chan, PAGE_SIZE);
791 OUT_RING (chan, line_count);
792 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
793 OUT_RING (chan, 0x00000110);
794
795 page_count -= line_count;
796 src_offset += (PAGE_SIZE * line_count);
797 dst_offset += (PAGE_SIZE * line_count);
798 }
799
800 return 0;
801}
802
5490e5df
BS
803static int
804nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
805 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
806{
be83cd4e 807 struct nvkm_mem *node = old_mem->mm_node;
5490e5df
BS
808 int ret = RING_SPACE(chan, 7);
809 if (ret == 0) {
810 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
811 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
812 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
813 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
814 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
815 OUT_RING (chan, 0x00000000 /* COPY */);
816 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
817 }
818 return ret;
819}
820
4c193d25
BS
821static int
822nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
823 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
824{
be83cd4e 825 struct nvkm_mem *node = old_mem->mm_node;
4c193d25
BS
826 int ret = RING_SPACE(chan, 7);
827 if (ret == 0) {
828 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
829 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
830 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
831 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
832 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
833 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
834 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
835 }
836 return ret;
837}
838
d1b167e1
BS
839static int
840nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
841{
ebb945a9 842 int ret = RING_SPACE(chan, 6);
d1b167e1 843 if (ret == 0) {
ebb945a9
BS
844 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
845 OUT_RING (chan, handle);
846 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
f45f55c4
BS
847 OUT_RING (chan, chan->drm->ntfy.handle);
848 OUT_RING (chan, chan->vram.handle);
849 OUT_RING (chan, chan->vram.handle);
d1b167e1
BS
850 }
851
852 return ret;
853}
854
6ee73861 855static int
f1ab0cc9
BS
856nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
857 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 858{
be83cd4e 859 struct nvkm_mem *node = old_mem->mm_node;
f1ab0cc9 860 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
861 u64 src_offset = node->vma[0].offset;
862 u64 dst_offset = node->vma[1].offset;
ce8f7699 863 int src_tiled = !!node->memtype;
be83cd4e 864 int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
6ee73861
BS
865 int ret;
866
f1ab0cc9
BS
867 while (length) {
868 u32 amount, stride, height;
869
ce8f7699
ML
870 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
871 if (ret)
872 return ret;
873
5220b3c1
BS
874 amount = min(length, (u64)(4 * 1024 * 1024));
875 stride = 16 * 4;
f1ab0cc9
BS
876 height = amount / stride;
877
ce8f7699 878 if (src_tiled) {
d1b167e1 879 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 880 OUT_RING (chan, 0);
5220b3c1 881 OUT_RING (chan, 0);
f1ab0cc9
BS
882 OUT_RING (chan, stride);
883 OUT_RING (chan, height);
884 OUT_RING (chan, 1);
885 OUT_RING (chan, 0);
886 OUT_RING (chan, 0);
887 } else {
d1b167e1 888 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
889 OUT_RING (chan, 1);
890 }
ce8f7699 891 if (dst_tiled) {
d1b167e1 892 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 893 OUT_RING (chan, 0);
5220b3c1 894 OUT_RING (chan, 0);
f1ab0cc9
BS
895 OUT_RING (chan, stride);
896 OUT_RING (chan, height);
897 OUT_RING (chan, 1);
898 OUT_RING (chan, 0);
899 OUT_RING (chan, 0);
900 } else {
d1b167e1 901 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
902 OUT_RING (chan, 1);
903 }
904
d1b167e1 905 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
906 OUT_RING (chan, upper_32_bits(src_offset));
907 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 908 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
909 OUT_RING (chan, lower_32_bits(src_offset));
910 OUT_RING (chan, lower_32_bits(dst_offset));
911 OUT_RING (chan, stride);
912 OUT_RING (chan, stride);
913 OUT_RING (chan, stride);
914 OUT_RING (chan, height);
915 OUT_RING (chan, 0x00000101);
916 OUT_RING (chan, 0x00000000);
d1b167e1 917 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
918 OUT_RING (chan, 0);
919
920 length -= amount;
921 src_offset += amount;
922 dst_offset += amount;
6ee73861
BS
923 }
924
f1ab0cc9
BS
925 return 0;
926}
927
d1b167e1
BS
928static int
929nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
930{
ebb945a9 931 int ret = RING_SPACE(chan, 4);
d1b167e1 932 if (ret == 0) {
ebb945a9
BS
933 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
934 OUT_RING (chan, handle);
935 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
f45f55c4 936 OUT_RING (chan, chan->drm->ntfy.handle);
d1b167e1
BS
937 }
938
939 return ret;
940}
941
a6704788
BS
942static inline uint32_t
943nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
944 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
945{
946 if (mem->mem_type == TTM_PL_TT)
ebb945a9 947 return NvDmaTT;
f45f55c4 948 return chan->vram.handle;
a6704788
BS
949}
950
f1ab0cc9
BS
951static int
952nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
953 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
954{
d961db75
BS
955 u32 src_offset = old_mem->start << PAGE_SHIFT;
956 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
957 u32 page_count = new_mem->num_pages;
958 int ret;
959
960 ret = RING_SPACE(chan, 3);
961 if (ret)
962 return ret;
963
d1b167e1 964 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
965 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
966 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
967
6ee73861
BS
968 page_count = new_mem->num_pages;
969 while (page_count) {
970 int line_count = (page_count > 2047) ? 2047 : page_count;
971
6ee73861
BS
972 ret = RING_SPACE(chan, 11);
973 if (ret)
974 return ret;
f1ab0cc9 975
d1b167e1 976 BEGIN_NV04(chan, NvSubCopy,
6ee73861 977 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
978 OUT_RING (chan, src_offset);
979 OUT_RING (chan, dst_offset);
980 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
981 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
982 OUT_RING (chan, PAGE_SIZE); /* line_length */
983 OUT_RING (chan, line_count);
984 OUT_RING (chan, 0x00000101);
985 OUT_RING (chan, 0x00000000);
d1b167e1 986 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 987 OUT_RING (chan, 0);
6ee73861
BS
988
989 page_count -= line_count;
990 src_offset += (PAGE_SIZE * line_count);
991 dst_offset += (PAGE_SIZE * line_count);
992 }
993
f1ab0cc9
BS
994 return 0;
995}
996
d2f96666 997static int
3c57d85d
BS
998nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
999 struct ttm_mem_reg *mem)
d2f96666 1000{
be83cd4e
BS
1001 struct nvkm_mem *old_node = bo->mem.mm_node;
1002 struct nvkm_mem *new_node = mem->mm_node;
3c57d85d 1003 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
d2f96666
BS
1004 int ret;
1005
be83cd4e
BS
1006 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1007 NV_MEM_ACCESS_RW, &old_node->vma[0]);
d2f96666
BS
1008 if (ret)
1009 return ret;
1010
be83cd4e
BS
1011 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1012 NV_MEM_ACCESS_RW, &old_node->vma[1]);
3c57d85d 1013 if (ret) {
be83cd4e 1014 nvkm_vm_put(&old_node->vma[0]);
3c57d85d
BS
1015 return ret;
1016 }
1017
be83cd4e
BS
1018 nvkm_vm_map(&old_node->vma[0], old_node);
1019 nvkm_vm_map(&old_node->vma[1], new_node);
d2f96666
BS
1020 return 0;
1021}
1022
f1ab0cc9
BS
1023static int
1024nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 1025 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 1026{
ebb945a9 1027 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 1028 struct nouveau_channel *chan = drm->ttm.chan;
a01ca78c 1029 struct nouveau_cli *cli = (void *)chan->user.client;
35b8141b 1030 struct nouveau_fence *fence;
f1ab0cc9
BS
1031 int ret;
1032
d2f96666 1033 /* create temporary vmas for the transfer and attach them to the
be83cd4e 1034 * old nvkm_mem node, these will get cleaned up after ttm has
d2f96666 1035 * destroyed the ttm_mem_reg
3425df48 1036 */
967e7bde 1037 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
3c57d85d 1038 ret = nouveau_bo_move_prep(drm, bo, new_mem);
d2f96666 1039 if (ret)
3c57d85d 1040 return ret;
3425df48
BS
1041 }
1042
0ad72863 1043 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
e3be4c23 1044 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
6a6b73f2 1045 if (ret == 0) {
35b8141b
BS
1046 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1047 if (ret == 0) {
1048 ret = nouveau_fence_new(chan, false, &fence);
1049 if (ret == 0) {
f2c24b83
ML
1050 ret = ttm_bo_move_accel_cleanup(bo,
1051 &fence->base,
35b8141b 1052 evict,
35b8141b
BS
1053 new_mem);
1054 nouveau_fence_unref(&fence);
1055 }
1056 }
6a6b73f2 1057 }
0ad72863 1058 mutex_unlock(&cli->mutex);
6a6b73f2 1059 return ret;
6ee73861
BS
1060}
1061
d1b167e1 1062void
49981046 1063nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 1064{
d1b167e1
BS
1065 static const struct {
1066 const char *name;
1a46098e 1067 int engine;
315a8b2e 1068 s32 oclass;
d1b167e1
BS
1069 int (*exec)(struct nouveau_channel *,
1070 struct ttm_buffer_object *,
1071 struct ttm_mem_reg *, struct ttm_mem_reg *);
1072 int (*init)(struct nouveau_channel *, u32 handle);
1073 } _methods[] = {
146cfe24
BS
1074 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1075 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
8e7e1586
BS
1076 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1077 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
990b4547
BS
1078 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1079 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
00fc6f6f 1080 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 1081 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
1082 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1083 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1084 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1085 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1086 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1087 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1088 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1089 {},
1a46098e 1090 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1091 }, *mthd = _methods;
1092 const char *name = "CPU";
1093 int ret;
1094
1095 do {
49981046 1096 struct nouveau_channel *chan;
ebb945a9 1097
00fc6f6f 1098 if (mthd->engine)
49981046
BS
1099 chan = drm->cechan;
1100 else
1101 chan = drm->channel;
1102 if (chan == NULL)
1103 continue;
1104
a01ca78c 1105 ret = nvif_object_init(&chan->user,
0ad72863
BS
1106 mthd->oclass | (mthd->engine << 16),
1107 mthd->oclass, NULL, 0,
1108 &drm->ttm.copy);
d1b167e1 1109 if (ret == 0) {
0ad72863 1110 ret = mthd->init(chan, drm->ttm.copy.handle);
ebb945a9 1111 if (ret) {
0ad72863 1112 nvif_object_fini(&drm->ttm.copy);
ebb945a9 1113 continue;
d1b167e1 1114 }
ebb945a9
BS
1115
1116 drm->ttm.move = mthd->exec;
1bb3f6a2 1117 drm->ttm.chan = chan;
ebb945a9
BS
1118 name = mthd->name;
1119 break;
d1b167e1
BS
1120 }
1121 } while ((++mthd)->exec);
1122
ebb945a9 1123 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1124}
1125
6ee73861
BS
1126static int
1127nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1128 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861 1129{
f1217ed0
CK
1130 struct ttm_place placement_memtype = {
1131 .fpfn = 0,
1132 .lpfn = 0,
1133 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1134 };
6ee73861
BS
1135 struct ttm_placement placement;
1136 struct ttm_mem_reg tmp_mem;
1137 int ret;
1138
6ee73861 1139 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1140 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1141
1142 tmp_mem = *new_mem;
1143 tmp_mem.mm_node = NULL;
97a875cb 1144 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1145 if (ret)
1146 return ret;
1147
1148 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1149 if (ret)
1150 goto out;
1151
97a875cb 1152 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1153 if (ret)
1154 goto out;
1155
4e2f0caa 1156 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
6ee73861 1157out:
42311ff9 1158 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1159 return ret;
1160}
1161
1162static int
1163nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1164 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861 1165{
f1217ed0
CK
1166 struct ttm_place placement_memtype = {
1167 .fpfn = 0,
1168 .lpfn = 0,
1169 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1170 };
6ee73861
BS
1171 struct ttm_placement placement;
1172 struct ttm_mem_reg tmp_mem;
1173 int ret;
1174
6ee73861 1175 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1176 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1177
1178 tmp_mem = *new_mem;
1179 tmp_mem.mm_node = NULL;
97a875cb 1180 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1181 if (ret)
1182 return ret;
1183
4e2f0caa 1184 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1185 if (ret)
1186 goto out;
1187
97a875cb 1188 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1189 if (ret)
1190 goto out;
1191
1192out:
42311ff9 1193 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1194 return ret;
1195}
1196
a4154bbf 1197static void
66257db7
NH
1198nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1199 struct ttm_mem_reg *new_mem)
a4154bbf 1200{
a4154bbf 1201 struct nouveau_bo *nvbo = nouveau_bo(bo);
be83cd4e 1202 struct nvkm_vma *vma;
fd2871af 1203
9f1feed2
BS
1204 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1205 if (bo->destroy != nouveau_bo_del_ttm)
1206 return;
1207
fd2871af 1208 list_for_each_entry(vma, &nvbo->vma_list, head) {
2e2cfbe6
BS
1209 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1210 (new_mem->mem_type == TTM_PL_VRAM ||
5ce3bf3c 1211 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
be83cd4e 1212 nvkm_vm_map(vma, new_mem->mm_node);
fd2871af 1213 } else {
10dcab3e 1214 WARN_ON(ttm_bo_wait(bo, false, false));
be83cd4e 1215 nvkm_vm_unmap(vma);
fd2871af 1216 }
a4154bbf
BS
1217 }
1218}
1219
6ee73861 1220static int
a0af9add 1221nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1222 struct nouveau_drm_tile **new_tile)
6ee73861 1223{
ebb945a9
BS
1224 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1225 struct drm_device *dev = drm->dev;
a0af9add 1226 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1227 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1228
a4154bbf
BS
1229 *new_tile = NULL;
1230 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1231 return 0;
a0af9add 1232
967e7bde 1233 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
bc9e7b9a 1234 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1235 nvbo->tile_mode,
1236 nvbo->tile_flags);
6ee73861
BS
1237 }
1238
a0af9add
FJ
1239 return 0;
1240}
1241
1242static void
1243nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1244 struct nouveau_drm_tile *new_tile,
1245 struct nouveau_drm_tile **old_tile)
a0af9add 1246{
ebb945a9
BS
1247 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1248 struct drm_device *dev = drm->dev;
f54d1867 1249 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
a0af9add 1250
f2c24b83 1251 nv10_bo_put_tile_region(dev, *old_tile, fence);
a4154bbf 1252 *old_tile = new_tile;
a0af9add
FJ
1253}
1254
1255static int
1256nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1257 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1258{
ebb945a9 1259 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1260 struct nouveau_bo *nvbo = nouveau_bo(bo);
1261 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1262 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1263 int ret = 0;
1264
88932a7b
CK
1265 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1266 if (ret)
1267 return ret;
1268
5be5a15a
AC
1269 if (nvbo->pin_refcnt)
1270 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1271
967e7bde 1272 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1273 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1274 if (ret)
1275 return ret;
1276 }
a0af9add 1277
a0af9add 1278 /* Fake bo copy. */
6ee73861
BS
1279 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1280 BUG_ON(bo->mem.mm_node != NULL);
1281 bo->mem = *new_mem;
1282 new_mem->mm_node = NULL;
a0af9add 1283 goto out;
6ee73861
BS
1284 }
1285
a0af9add 1286 /* Hardware assisted copy. */
cef9e99e
BS
1287 if (drm->ttm.move) {
1288 if (new_mem->mem_type == TTM_PL_SYSTEM)
1289 ret = nouveau_bo_move_flipd(bo, evict, intr,
1290 no_wait_gpu, new_mem);
1291 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1292 ret = nouveau_bo_move_flips(bo, evict, intr,
1293 no_wait_gpu, new_mem);
1294 else
1295 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1296 no_wait_gpu, new_mem);
1297 if (!ret)
1298 goto out;
1299 }
a0af9add
FJ
1300
1301 /* Fallback to software copy. */
8aa6d4fc 1302 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
cef9e99e 1303 if (ret == 0)
4499f2ac 1304 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
a0af9add
FJ
1305
1306out:
967e7bde 1307 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1308 if (ret)
1309 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1310 else
1311 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1312 }
a0af9add
FJ
1313
1314 return ret;
6ee73861
BS
1315}
1316
1317static int
1318nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1319{
acb46527
DH
1320 struct nouveau_bo *nvbo = nouveau_bo(bo);
1321
d9a1f0b4
DH
1322 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1323 filp->private_data);
6ee73861
BS
1324}
1325
f32f02fd
JG
1326static int
1327nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1328{
1329 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9 1330 struct nouveau_drm *drm = nouveau_bdev(bdev);
7e8820fe 1331 struct nvkm_device *device = nvxx_device(&drm->device);
be83cd4e 1332 struct nvkm_mem *node = mem->mm_node;
f869ef88 1333 int ret;
f32f02fd
JG
1334
1335 mem->bus.addr = NULL;
1336 mem->bus.offset = 0;
1337 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1338 mem->bus.base = 0;
1339 mem->bus.is_iomem = false;
1340 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1341 return -EINVAL;
1342 switch (mem->mem_type) {
1343 case TTM_PL_SYSTEM:
1344 /* System memory */
1345 return 0;
1346 case TTM_PL_TT:
a7fb8a23 1347#if IS_ENABLED(CONFIG_AGP)
340b0e7c 1348 if (drm->agp.bridge) {
d961db75 1349 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1350 mem->bus.base = drm->agp.base;
340b0e7c 1351 mem->bus.is_iomem = !drm->agp.cma;
f32f02fd
JG
1352 }
1353#endif
967e7bde 1354 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
a5540906
ML
1355 /* untiled */
1356 break;
1357 /* fallthrough, tiled memory */
f32f02fd 1358 case TTM_PL_VRAM:
3863c9bc 1359 mem->bus.offset = mem->start << PAGE_SHIFT;
7e8820fe 1360 mem->bus.base = device->func->resource_addr(device, 1);
3863c9bc 1361 mem->bus.is_iomem = true;
967e7bde 1362 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
be83cd4e 1363 struct nvkm_bar *bar = nvxx_bar(&drm->device);
d8e83994
BS
1364 int page_shift = 12;
1365 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1366 page_shift = node->page_shift;
8984e046 1367
32932281
BS
1368 ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1369 &node->bar_vma);
3863c9bc
BS
1370 if (ret)
1371 return ret;
f869ef88 1372
d8e83994 1373 nvkm_vm_map(&node->bar_vma, node);
3863c9bc 1374 mem->bus.offset = node->bar_vma.offset;
f869ef88 1375 }
f32f02fd
JG
1376 break;
1377 default:
1378 return -EINVAL;
1379 }
1380 return 0;
1381}
1382
1383static void
1384nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1385{
be83cd4e 1386 struct nvkm_mem *node = mem->mm_node;
f869ef88 1387
d5f42394 1388 if (!node->bar_vma.node)
f869ef88
BS
1389 return;
1390
32932281
BS
1391 nvkm_vm_unmap(&node->bar_vma);
1392 nvkm_vm_put(&node->bar_vma);
f32f02fd
JG
1393}
1394
1395static int
1396nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1397{
ebb945a9 1398 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1399 struct nouveau_bo *nvbo = nouveau_bo(bo);
7e8820fe
BS
1400 struct nvkm_device *device = nvxx_device(&drm->device);
1401 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
f1217ed0 1402 int i, ret;
e1429b4c
BS
1403
1404 /* as long as the bo isn't in vram, and isn't tiled, we've got
1405 * nothing to do here.
1406 */
1407 if (bo->mem.mem_type != TTM_PL_VRAM) {
967e7bde 1408 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
f13b3263 1409 !nouveau_bo_tile_layout(nvbo))
e1429b4c 1410 return 0;
a5540906
ML
1411
1412 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1413 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1414
1415 ret = nouveau_bo_validate(nvbo, false, false);
1416 if (ret)
1417 return ret;
1418 }
1419 return 0;
e1429b4c
BS
1420 }
1421
1422 /* make sure bo is in mappable vram */
967e7bde 1423 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
a5540906 1424 bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1425 return 0;
1426
f1217ed0
CK
1427 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1428 nvbo->placements[i].fpfn = 0;
1429 nvbo->placements[i].lpfn = mappable;
1430 }
1431
1432 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1433 nvbo->busy_placements[i].fpfn = 0;
1434 nvbo->busy_placements[i].lpfn = mappable;
1435 }
e1429b4c 1436
c284815d 1437 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1438 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1439}
1440
3230cfc3
KRW
1441static int
1442nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1443{
8e7e7052 1444 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1445 struct nouveau_drm *drm;
be83cd4e 1446 struct nvkm_device *device;
3230cfc3 1447 struct drm_device *dev;
fd1496a0 1448 struct device *pdev;
3230cfc3
KRW
1449 unsigned i;
1450 int r;
22b33e8e 1451 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1452
1453 if (ttm->state != tt_unpopulated)
1454 return 0;
1455
22b33e8e
DA
1456 if (slave && ttm->sg) {
1457 /* make userspace faulting work */
1458 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1459 ttm_dma->dma_address, ttm->num_pages);
1460 ttm->state = tt_unbound;
1461 return 0;
1462 }
1463
ebb945a9 1464 drm = nouveau_bdev(ttm->bdev);
989aa5b7 1465 device = nvxx_device(&drm->device);
ebb945a9 1466 dev = drm->dev;
26c9e8ef 1467 pdev = device->dev;
3230cfc3 1468
a7fb8a23 1469#if IS_ENABLED(CONFIG_AGP)
340b0e7c 1470 if (drm->agp.bridge) {
dea7e0ac
JG
1471 return ttm_agp_tt_populate(ttm);
1472 }
1473#endif
1474
9bcd38de 1475#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
3230cfc3 1476 if (swiotlb_nr_tbl()) {
8e7e7052 1477 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1478 }
1479#endif
1480
1481 r = ttm_pool_populate(ttm);
1482 if (r) {
1483 return r;
1484 }
1485
1486 for (i = 0; i < ttm->num_pages; i++) {
fd1496a0
AC
1487 dma_addr_t addr;
1488
1489 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1490 DMA_BIDIRECTIONAL);
1491
1492 if (dma_mapping_error(pdev, addr)) {
4fbbed46 1493 while (i--) {
fd1496a0
AC
1494 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1495 PAGE_SIZE, DMA_BIDIRECTIONAL);
8e7e7052 1496 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1497 }
1498 ttm_pool_unpopulate(ttm);
1499 return -EFAULT;
1500 }
fd1496a0
AC
1501
1502 ttm_dma->dma_address[i] = addr;
3230cfc3
KRW
1503 }
1504 return 0;
1505}
1506
1507static void
1508nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1509{
8e7e7052 1510 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1511 struct nouveau_drm *drm;
be83cd4e 1512 struct nvkm_device *device;
3230cfc3 1513 struct drm_device *dev;
fd1496a0 1514 struct device *pdev;
3230cfc3 1515 unsigned i;
22b33e8e
DA
1516 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1517
1518 if (slave)
1519 return;
3230cfc3 1520
ebb945a9 1521 drm = nouveau_bdev(ttm->bdev);
989aa5b7 1522 device = nvxx_device(&drm->device);
ebb945a9 1523 dev = drm->dev;
26c9e8ef 1524 pdev = device->dev;
3230cfc3 1525
a7fb8a23 1526#if IS_ENABLED(CONFIG_AGP)
340b0e7c 1527 if (drm->agp.bridge) {
dea7e0ac
JG
1528 ttm_agp_tt_unpopulate(ttm);
1529 return;
1530 }
1531#endif
1532
9bcd38de 1533#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
3230cfc3 1534 if (swiotlb_nr_tbl()) {
8e7e7052 1535 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1536 return;
1537 }
1538#endif
1539
1540 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1541 if (ttm_dma->dma_address[i]) {
fd1496a0
AC
1542 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1543 DMA_BIDIRECTIONAL);
3230cfc3
KRW
1544 }
1545 }
1546
1547 ttm_pool_unpopulate(ttm);
1548}
1549
875ac34a 1550void
809e9447 1551nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
875ac34a 1552{
29ba89b2 1553 struct reservation_object *resv = nvbo->bo.resv;
bdaf7ddf 1554
809e9447
ML
1555 if (exclusive)
1556 reservation_object_add_excl_fence(resv, &fence->base);
1557 else if (fence)
1558 reservation_object_add_shared_fence(resv, &fence->base);
875ac34a
BS
1559}
1560
6ee73861 1561struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1562 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1563 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1564 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1565 .invalidate_caches = nouveau_bo_invalidate_caches,
1566 .init_mem_type = nouveau_bo_init_mem_type,
a2ab19fe 1567 .eviction_valuable = ttm_bo_eviction_valuable,
6ee73861 1568 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1569 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1570 .move = nouveau_bo_move,
1571 .verify_access = nouveau_bo_verify_access,
f32f02fd
JG
1572 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1573 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1574 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1575};
1576
be83cd4e
BS
1577struct nvkm_vma *
1578nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
fd2871af 1579{
be83cd4e 1580 struct nvkm_vma *vma;
fd2871af
BS
1581 list_for_each_entry(vma, &nvbo->vma_list, head) {
1582 if (vma->vm == vm)
1583 return vma;
1584 }
1585
1586 return NULL;
1587}
1588
1589int
be83cd4e
BS
1590nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1591 struct nvkm_vma *vma)
fd2871af
BS
1592{
1593 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
fd2871af
BS
1594 int ret;
1595
be83cd4e 1596 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
fd2871af
BS
1597 NV_MEM_ACCESS_RW, vma);
1598 if (ret)
1599 return ret;
1600
2e2cfbe6
BS
1601 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1602 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
5ce3bf3c 1603 nvbo->page_shift != vma->vm->mmu->lpg_shift))
be83cd4e 1604 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
fd2871af
BS
1605
1606 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1607 vma->refcount = 1;
fd2871af
BS
1608 return 0;
1609}
1610
1611void
be83cd4e 1612nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
fd2871af
BS
1613{
1614 if (vma->node) {
c4c7044f 1615 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
be83cd4e
BS
1616 nvkm_vm_unmap(vma);
1617 nvkm_vm_put(vma);
fd2871af
BS
1618 list_del(&vma->head);
1619 }
1620}