drm: mxsfb: Implement .format_mod_supported
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
fdb751ef 30#include <linux/dma-mapping.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
4dc28134 33#include "nouveau_drv.h"
8b9d5d63 34#include "nouveau_chan.h"
d375e7d5 35#include "nouveau_fence.h"
6ee73861 36
ebb945a9
BS
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
9ce523cc 40#include "nouveau_mem.h"
24e8375b 41#include "nouveau_vmm.h"
a510604d 42
d7722134
BS
43#include <nvif/class.h>
44#include <nvif/if500b.h>
45#include <nvif/if900b.h>
46
cae515f4
DA
47static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
48 struct ttm_resource *reg);
49
bc9e7b9a
BS
50/*
51 * NV10-NV40 tiling helpers
52 */
53
54static void
ebb945a9
BS
55nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
56 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 57{
77145f1c 58 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 59 int i = reg - drm->tile.reg;
359088d5 60 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
b1e4553c 61 struct nvkm_fb_tile *tile = &fb->tile.region[i];
bc9e7b9a 62
ebb945a9 63 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
64
65 if (tile->pitch)
03c8952f 66 nvkm_fb_tile_fini(fb, i, tile);
bc9e7b9a
BS
67
68 if (pitch)
03c8952f 69 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
bc9e7b9a 70
03c8952f 71 nvkm_fb_tile_prog(fb, i, tile);
bc9e7b9a
BS
72}
73
ebb945a9 74static struct nouveau_drm_tile *
bc9e7b9a
BS
75nv10_bo_get_tile_region(struct drm_device *dev, int i)
76{
77145f1c 77 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 78 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 79
ebb945a9 80 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
81
82 if (!tile->used &&
83 (!tile->fence || nouveau_fence_done(tile->fence)))
84 tile->used = true;
85 else
86 tile = NULL;
87
ebb945a9 88 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
89 return tile;
90}
91
92static void
ebb945a9 93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
f54d1867 94 struct dma_fence *fence)
bc9e7b9a 95{
77145f1c 96 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
97
98 if (tile) {
ebb945a9 99 spin_lock(&drm->tile.lock);
f54d1867 100 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
bc9e7b9a 101 tile->used = false;
ebb945a9 102 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
103 }
104}
105
ebb945a9
BS
106static struct nouveau_drm_tile *
107nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
7760a2e3 108 u32 size, u32 pitch, u32 zeta)
bc9e7b9a 109{
77145f1c 110 struct nouveau_drm *drm = nouveau_drm(dev);
1167c6bc 111 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
ebb945a9 112 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
113 int i;
114
b1e4553c 115 for (i = 0; i < fb->tile.regions; i++) {
bc9e7b9a
BS
116 tile = nv10_bo_get_tile_region(dev, i);
117
118 if (pitch && !found) {
119 found = tile;
120 continue;
121
b1e4553c 122 } else if (tile && fb->tile.region[i].pitch) {
bc9e7b9a
BS
123 /* Kill an unused tile region. */
124 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
125 }
126
127 nv10_bo_put_tile_region(dev, tile, NULL);
128 }
129
130 if (found)
7760a2e3 131 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
bc9e7b9a
BS
132 return found;
133}
134
6ee73861
BS
135static void
136nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
137{
ebb945a9
BS
138 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
139 struct drm_device *dev = drm->dev;
6ee73861
BS
140 struct nouveau_bo *nvbo = nouveau_bo(bo);
141
4f385599 142 WARN_ON(nvbo->pin_refcnt > 0);
141b15e5 143 nouveau_bo_del_io_reserve_lru(bo);
bc9e7b9a 144 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
641f53c0
TR
145
146 /*
147 * If nouveau_bo_new() allocated this buffer, the GEM object was never
148 * initialized, so don't attempt to release it.
149 */
150 if (bo->base.dev)
151 drm_gem_object_release(&bo->base);
152
6ee73861
BS
153 kfree(nvbo);
154}
155
4d8b3d34
BS
156static inline u64
157roundup_64(u64 x, u32 y)
158{
159 x += y - 1;
160 do_div(x, y);
161 return x * y;
162}
163
a0af9add 164static void
81b61579 165nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
a0af9add 166{
ebb945a9 167 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
1167c6bc 168 struct nvif_device *device = &drm->client.device;
a0af9add 169
967e7bde 170 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
7760a2e3 171 if (nvbo->mode) {
967e7bde 172 if (device->info.chipset >= 0x40) {
a0af9add 173 *align = 65536;
7760a2e3 174 *size = roundup_64(*size, 64 * nvbo->mode);
a0af9add 175
967e7bde 176 } else if (device->info.chipset >= 0x30) {
a0af9add 177 *align = 32768;
7760a2e3 178 *size = roundup_64(*size, 64 * nvbo->mode);
a0af9add 179
967e7bde 180 } else if (device->info.chipset >= 0x20) {
a0af9add 181 *align = 16384;
7760a2e3 182 *size = roundup_64(*size, 64 * nvbo->mode);
a0af9add 183
967e7bde 184 } else if (device->info.chipset >= 0x10) {
a0af9add 185 *align = 16384;
7760a2e3 186 *size = roundup_64(*size, 32 * nvbo->mode);
a0af9add
FJ
187 }
188 }
bfd83aca 189 } else {
7760a2e3
BS
190 *size = roundup_64(*size, (1 << nvbo->page));
191 *align = max((1 << nvbo->page), *align);
a0af9add
FJ
192 }
193
4d8b3d34 194 *size = roundup_64(*size, PAGE_SIZE);
a0af9add
FJ
195}
196
019cbd4a 197struct nouveau_bo *
81b61579 198nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
9ca7f796 199 u32 tile_mode, u32 tile_flags)
6ee73861 200{
e75c091b 201 struct nouveau_drm *drm = cli->drm;
6ee73861 202 struct nouveau_bo *nvbo;
a220dd73 203 struct nvif_mmu *mmu = &cli->mmu;
bfe91afa 204 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
019cbd4a 205 int i, pi = -1;
0108bc80 206
9ca7f796
TR
207 if (!*size) {
208 NV_WARN(drm, "skipped size %016llx\n", *size);
019cbd4a 209 return ERR_PTR(-EINVAL);
0108bc80 210 }
22b33e8e 211
6ee73861
BS
212 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
213 if (!nvbo)
019cbd4a 214 return ERR_PTR(-ENOMEM);
6ee73861
BS
215 INIT_LIST_HEAD(&nvbo->head);
216 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 217 INIT_LIST_HEAD(&nvbo->vma_list);
ebb945a9 218 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 219
acb16cfa
BS
220 /* This is confusing, and doesn't actually mean we want an uncached
221 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
222 * into in nouveau_gem_new().
223 */
81b61579 224 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
acb16cfa
BS
225 /* Determine if we can get a cache-coherent map, forcing
226 * uncached mapping if we can't.
227 */
74a39954 228 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
acb16cfa
BS
229 nvbo->force_coherent = true;
230 }
c3a0c771 231
7760a2e3
BS
232 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
233 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
a220dd73
BS
234 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
235 kfree(nvbo);
019cbd4a 236 return ERR_PTR(-EINVAL);
a220dd73
BS
237 }
238
239 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
7760a2e3
BS
240 } else
241 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
242 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
243 nvbo->comp = (tile_flags & 0x00030000) >> 16;
a220dd73
BS
244 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
245 kfree(nvbo);
019cbd4a 246 return ERR_PTR(-EINVAL);
a220dd73 247 }
7760a2e3
BS
248 } else {
249 nvbo->zeta = (tile_flags & 0x00000007);
250 }
251 nvbo->mode = tile_mode;
252 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
253
7dc6a446
BS
254 /* Determine the desirable target GPU page size for the buffer. */
255 for (i = 0; i < vmm->page_nr; i++) {
256 /* Because we cannot currently allow VMM maps to fail
257 * during buffer migration, we need to determine page
258 * size for the buffer up-front, and pre-allocate its
259 * page tables.
260 *
261 * Skip page sizes that can't support needed domains.
262 */
263 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
81b61579 264 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
7dc6a446 265 continue;
81b61579 266 if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
f29f18eb 267 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
7dc6a446
BS
268 continue;
269
270 /* Select this page size if it's the first that supports
271 * the potential memory domains, or when it's compatible
272 * with the requested compression settings.
273 */
274 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
275 pi = i;
276
277 /* Stop once the buffer is larger than the current page size. */
9ca7f796 278 if (*size >= 1ULL << vmm->page[i].shift)
7dc6a446
BS
279 break;
280 }
281
282 if (WARN_ON(pi < 0))
019cbd4a 283 return ERR_PTR(-EINVAL);
7dc6a446
BS
284
285 /* Disable compression if suitable settings couldn't be found. */
286 if (nvbo->comp && !vmm->page[pi].comp) {
287 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
288 nvbo->kind = mmu->kind[nvbo->kind];
289 nvbo->comp = 0;
f91bac5b 290 }
7dc6a446 291 nvbo->page = vmm->page[pi].shift;
f91bac5b 292
81b61579 293 nouveau_bo_fixup_align(nvbo, align, size);
9ca7f796 294
019cbd4a
TR
295 return nvbo;
296}
297
298int
81b61579 299nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
019cbd4a
TR
300 struct sg_table *sg, struct dma_resv *robj)
301{
302 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
303 size_t acc_size;
304 int ret;
305
306 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
307
fd2871af 308 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
81b61579 309 nouveau_bo_placement_set(nvbo, domain, 0);
141b15e5 310 INIT_LIST_HEAD(&nvbo->io_reserve_lru);
6ee73861 311
019cbd4a
TR
312 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
313 &nvbo->placement, align >> PAGE_SHIFT, false,
314 acc_size, sg, robj, nouveau_bo_del_ttm);
6ee73861
BS
315 if (ret) {
316 /* ttm will call nouveau_bo_del_ttm if it fails.. */
317 return ret;
318 }
319
019cbd4a
TR
320 return 0;
321}
322
323int
324nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
81b61579 325 uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
019cbd4a
TR
326 struct sg_table *sg, struct dma_resv *robj,
327 struct nouveau_bo **pnvbo)
328{
329 struct nouveau_bo *nvbo;
330 int ret;
331
81b61579 332 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
9ca7f796 333 tile_flags);
019cbd4a
TR
334 if (IS_ERR(nvbo))
335 return PTR_ERR(nvbo);
336
81b61579 337 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
019cbd4a
TR
338 if (ret)
339 return ret;
340
6ee73861
BS
341 *pnvbo = nvbo;
342 return 0;
343}
344
78ad0f7b 345static void
5839172f
CK
346set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
347 uint32_t domain, uint32_t flags)
78ad0f7b
FJ
348{
349 *n = 0;
350
48e07c23 351 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
5839172f
CK
352 struct nvif_mmu *mmu = &drm->client.mmu;
353 const u8 type = mmu->type[drm->ttm.type_vram].type;
354
48e07c23 355 pl[*n].mem_type = TTM_PL_VRAM;
5839172f
CK
356 pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
357
358 /* Some BARs do not support being ioremapped WC */
359 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
360 type & NVIF_MEM_UNCACHED)
361 pl[*n].flags &= ~TTM_PL_FLAG_WC;
362
363 (*n)++;
48e07c23
CK
364 }
365 if (domain & NOUVEAU_GEM_DOMAIN_GART) {
366 pl[*n].mem_type = TTM_PL_TT;
5839172f
CK
367 pl[*n].flags = flags;
368
369 if (drm->agp.bridge)
370 pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
371
372 (*n)++;
48e07c23
CK
373 }
374 if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
375 pl[*n].mem_type = TTM_PL_SYSTEM;
376 pl[(*n)++].flags = flags;
377 }
78ad0f7b
FJ
378}
379
699ddfd9 380static void
81b61579 381set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
699ddfd9 382{
ebb945a9 383 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
1167c6bc 384 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
f1217ed0 385 unsigned i, fpfn, lpfn;
699ddfd9 386
1167c6bc 387 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
81b61579 388 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
4beb116a 389 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
390 /*
391 * Make sure that the color and depth buffers are handled
392 * by independent memory controller units. Up to a 9x
393 * speed up when alpha-blending and depth-test are enabled
394 * at the same time.
395 */
7760a2e3 396 if (nvbo->zeta) {
f1217ed0
CK
397 fpfn = vram_pages / 2;
398 lpfn = ~0;
699ddfd9 399 } else {
f1217ed0
CK
400 fpfn = 0;
401 lpfn = vram_pages / 2;
402 }
403 for (i = 0; i < nvbo->placement.num_placement; ++i) {
404 nvbo->placements[i].fpfn = fpfn;
405 nvbo->placements[i].lpfn = lpfn;
406 }
407 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
408 nvbo->busy_placements[i].fpfn = fpfn;
409 nvbo->busy_placements[i].lpfn = lpfn;
699ddfd9
FJ
410 }
411 }
412}
413
6ee73861 414void
81b61579
CK
415nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
416 uint32_t busy)
6ee73861 417{
5839172f 418 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
78ad0f7b 419 struct ttm_placement *pl = &nvbo->placement;
c3a0c771
AC
420 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
421 TTM_PL_MASK_CACHING) |
422 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
78ad0f7b
FJ
423
424 pl->placement = nvbo->placements;
5839172f 425 set_placement_list(drm, nvbo->placements, &pl->num_placement,
81b61579 426 domain, flags);
78ad0f7b
FJ
427
428 pl->busy_placement = nvbo->busy_placements;
5839172f 429 set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
81b61579 430 domain | busy, flags);
699ddfd9 431
81b61579 432 set_placement_range(nvbo, domain);
6ee73861
BS
433}
434
435int
81b61579 436nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
6ee73861 437{
ebb945a9 438 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 439 struct ttm_buffer_object *bo = &nvbo->bo;
ad76b3f7 440 bool force = false, evict = false;
78ad0f7b 441 int ret;
6ee73861 442
dfd5e50e 443 ret = ttm_bo_reserve(bo, false, false, NULL);
0ae6d7bc 444 if (ret)
50ab2e52 445 return ret;
0ae6d7bc 446
1167c6bc 447 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
81b61579 448 domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
7760a2e3
BS
449 if (!nvbo->contig) {
450 nvbo->contig = true;
ad76b3f7 451 force = true;
7760a2e3 452 evict = true;
ad76b3f7 453 }
6ee73861
BS
454 }
455
ad76b3f7 456 if (nvbo->pin_refcnt) {
81b61579
CK
457 bool error = evict;
458
459 switch (bo->mem.mem_type) {
460 case TTM_PL_VRAM:
461 error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
462 break;
463 case TTM_PL_TT:
464 error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
465 default:
466 break;
467 }
468
469 if (error) {
ad76b3f7
BS
470 NV_ERROR(drm, "bo %p pinned elsewhere: "
471 "0x%08x vs 0x%08x\n", bo,
81b61579 472 bo->mem.mem_type, domain);
ad76b3f7
BS
473 ret = -EBUSY;
474 }
475 nvbo->pin_refcnt++;
50ab2e52 476 goto out;
ad76b3f7
BS
477 }
478
479 if (evict) {
81b61579 480 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ad76b3f7
BS
481 ret = nouveau_bo_validate(nvbo, false, false);
482 if (ret)
483 goto out;
484 }
6ee73861 485
ad76b3f7 486 nvbo->pin_refcnt++;
81b61579 487 nouveau_bo_placement_set(nvbo, domain, 0);
6ee73861 488
50ab2e52
BS
489 /* drop pin_refcnt temporarily, so we don't trip the assertion
490 * in nouveau_bo_move() that makes sure we're not trying to
491 * move a pinned buffer
492 */
493 nvbo->pin_refcnt--;
97a875cb 494 ret = nouveau_bo_validate(nvbo, false, false);
6aac6ced
BS
495 if (ret)
496 goto out;
50ab2e52 497 nvbo->pin_refcnt++;
6aac6ced
BS
498
499 switch (bo->mem.mem_type) {
500 case TTM_PL_VRAM:
501 drm->gem.vram_available -= bo->mem.size;
502 break;
503 case TTM_PL_TT:
504 drm->gem.gart_available -= bo->mem.size;
505 break;
506 default:
507 break;
6ee73861 508 }
5be5a15a 509
6ee73861 510out:
ad76b3f7 511 if (force && ret)
7760a2e3 512 nvbo->contig = false;
0ae6d7bc 513 ttm_bo_unreserve(bo);
6ee73861
BS
514 return ret;
515}
516
517int
518nouveau_bo_unpin(struct nouveau_bo *nvbo)
519{
ebb945a9 520 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 521 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 522 int ret, ref;
6ee73861 523
dfd5e50e 524 ret = ttm_bo_reserve(bo, false, false, NULL);
6ee73861
BS
525 if (ret)
526 return ret;
527
4f385599
ML
528 ref = --nvbo->pin_refcnt;
529 WARN_ON_ONCE(ref < 0);
530 if (ref)
0ae6d7bc
DV
531 goto out;
532
81b61579
CK
533 switch (bo->mem.mem_type) {
534 case TTM_PL_VRAM:
535 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
536 break;
537 case TTM_PL_TT:
538 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
539 break;
540 default:
541 break;
542 }
6ee73861 543
97a875cb 544 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
545 if (ret == 0) {
546 switch (bo->mem.mem_type) {
547 case TTM_PL_VRAM:
ebb945a9 548 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
549 break;
550 case TTM_PL_TT:
ebb945a9 551 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
552 break;
553 default:
554 break;
555 }
556 }
557
0ae6d7bc 558out:
6ee73861
BS
559 ttm_bo_unreserve(bo);
560 return ret;
561}
562
563int
564nouveau_bo_map(struct nouveau_bo *nvbo)
565{
566 int ret;
567
dfd5e50e 568 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
6ee73861
BS
569 if (ret)
570 return ret;
571
36a471ba 572 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
c3a0c771 573
6ee73861
BS
574 ttm_bo_unreserve(&nvbo->bo);
575 return ret;
576}
577
578void
579nouveau_bo_unmap(struct nouveau_bo *nvbo)
580{
c3a0c771
AC
581 if (!nvbo)
582 return;
583
36a471ba 584 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
585}
586
b22870ba
AC
587void
588nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
589{
590 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
b22870ba
AC
591 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
592 int i;
593
594 if (!ttm_dma)
595 return;
596
597 /* Don't waste time looping if the object is coherent */
598 if (nvbo->force_coherent)
599 return;
600
601 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
359088d5
BS
602 dma_sync_single_for_device(drm->dev->dev,
603 ttm_dma->dma_address[i],
26c9e8ef 604 PAGE_SIZE, DMA_TO_DEVICE);
b22870ba
AC
605}
606
607void
608nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
609{
610 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
b22870ba
AC
611 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
612 int i;
613
614 if (!ttm_dma)
615 return;
616
617 /* Don't waste time looping if the object is coherent */
618 if (nvbo->force_coherent)
619 return;
620
621 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
359088d5 622 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
26c9e8ef 623 PAGE_SIZE, DMA_FROM_DEVICE);
b22870ba
AC
624}
625
141b15e5
CK
626void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
627{
628 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
629 struct nouveau_bo *nvbo = nouveau_bo(bo);
630
631 mutex_lock(&drm->ttm.io_reserve_mutex);
632 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
633 mutex_unlock(&drm->ttm.io_reserve_mutex);
634}
635
636void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
637{
638 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
639 struct nouveau_bo *nvbo = nouveau_bo(bo);
640
641 mutex_lock(&drm->ttm.io_reserve_mutex);
642 list_del_init(&nvbo->io_reserve_lru);
643 mutex_unlock(&drm->ttm.io_reserve_mutex);
644}
645
7a45d764
BS
646int
647nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 648 bool no_wait_gpu)
7a45d764 649{
19be5570 650 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
7a45d764
BS
651 int ret;
652
19be5570 653 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
7a45d764
BS
654 if (ret)
655 return ret;
656
b22870ba
AC
657 nouveau_bo_sync_for_device(nvbo);
658
7a45d764
BS
659 return 0;
660}
661
6ee73861
BS
662void
663nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
664{
665 bool is_iomem;
666 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771 667
36a471ba 668 mem += index;
c3a0c771 669
6ee73861
BS
670 if (is_iomem)
671 iowrite16_native(val, (void __force __iomem *)mem);
672 else
673 *mem = val;
674}
675
676u32
677nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
678{
679 bool is_iomem;
680 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771 681
36a471ba 682 mem += index;
c3a0c771 683
6ee73861
BS
684 if (is_iomem)
685 return ioread32_native((void __force __iomem *)mem);
686 else
687 return *mem;
688}
689
690void
691nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
692{
693 bool is_iomem;
694 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771 695
36a471ba 696 mem += index;
c3a0c771 697
6ee73861
BS
698 if (is_iomem)
699 iowrite32_native(val, (void __force __iomem *)mem);
700 else
701 *mem = val;
702}
703
649bf3ca 704static struct ttm_tt *
dde5da23 705nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
6ee73861 706{
a7fb8a23 707#if IS_ENABLED(CONFIG_AGP)
dde5da23 708 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
6ee73861 709
340b0e7c 710 if (drm->agp.bridge) {
dde5da23 711 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
6ee73861 712 }
df1b4b91 713#endif
6ee73861 714
dde5da23 715 return nouveau_sgdma_create_ttm(bo, page_flags);
6ee73861
BS
716}
717
8635784a
DA
718static int
719nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
720 struct ttm_resource *reg)
721{
722#if IS_ENABLED(CONFIG_AGP)
723 struct nouveau_drm *drm = nouveau_bdev(bdev);
0b988ca1
DA
724#endif
725 if (!reg)
726 return -EINVAL;
727#if IS_ENABLED(CONFIG_AGP)
8635784a 728 if (drm->agp.bridge)
48efa57e 729 return ttm_agp_bind(ttm, reg);
8635784a
DA
730#endif
731 return nouveau_sgdma_bind(bdev, ttm, reg);
732}
733
734static void
735nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
736{
737#if IS_ENABLED(CONFIG_AGP)
738 struct nouveau_drm *drm = nouveau_bdev(bdev);
739
740 if (drm->agp.bridge) {
48efa57e 741 ttm_agp_unbind(ttm);
8635784a
DA
742 return;
743 }
744#endif
745 nouveau_sgdma_unbind(bdev, ttm);
746}
747
6ee73861
BS
748static void
749nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
750{
751 struct nouveau_bo *nvbo = nouveau_bo(bo);
752
753 switch (bo->mem.mem_type) {
22fbd538 754 case TTM_PL_VRAM:
81b61579
CK
755 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
756 NOUVEAU_GEM_DOMAIN_CPU);
22fbd538 757 break;
6ee73861 758 default:
81b61579 759 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
6ee73861
BS
760 break;
761 }
22fbd538
FJ
762
763 *pl = nvbo->placement;
6ee73861
BS
764}
765
d2f96666 766static int
3c57d85d 767nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
2966141a 768 struct ttm_resource *reg)
d2f96666 769{
9ce523cc
BS
770 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
771 struct nouveau_mem *new_mem = nouveau_mem(reg);
d7722134 772 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
d2f96666
BS
773 int ret;
774
d7722134
BS
775 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
776 old_mem->mem.size, &old_mem->vma[0]);
d2f96666
BS
777 if (ret)
778 return ret;
779
d7722134
BS
780 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
781 new_mem->mem.size, &old_mem->vma[1]);
782 if (ret)
783 goto done;
3c57d85d 784
9ce523cc
BS
785 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
786 if (ret)
787 goto done;
788
789 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
790done:
791 if (ret) {
d7722134
BS
792 nvif_vmm_put(vmm, &old_mem->vma[1]);
793 nvif_vmm_put(vmm, &old_mem->vma[0]);
9ce523cc 794 }
d2f96666
BS
795 return 0;
796}
797
f1ab0cc9
BS
798static int
799nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
2966141a 800 bool no_wait_gpu, struct ttm_resource *new_reg)
f1ab0cc9 801{
ebb945a9 802 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 803 struct nouveau_channel *chan = drm->ttm.chan;
a01ca78c 804 struct nouveau_cli *cli = (void *)chan->user.client;
35b8141b 805 struct nouveau_fence *fence;
f1ab0cc9
BS
806 int ret;
807
d2f96666 808 /* create temporary vmas for the transfer and attach them to the
be83cd4e 809 * old nvkm_mem node, these will get cleaned up after ttm has
2966141a 810 * destroyed the ttm_resource
3425df48 811 */
1167c6bc 812 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
605f9ccd 813 ret = nouveau_bo_move_prep(drm, bo, new_reg);
d2f96666 814 if (ret)
3c57d85d 815 return ret;
3425df48
BS
816 }
817
0ad72863 818 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
e3be4c23 819 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
6a6b73f2 820 if (ret == 0) {
605f9ccd 821 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
35b8141b
BS
822 if (ret == 0) {
823 ret = nouveau_fence_new(chan, false, &fence);
824 if (ret == 0) {
f2c24b83
ML
825 ret = ttm_bo_move_accel_cleanup(bo,
826 &fence->base,
e46f468f 827 evict, false,
605f9ccd 828 new_reg);
35b8141b
BS
829 nouveau_fence_unref(&fence);
830 }
831 }
6a6b73f2 832 }
0ad72863 833 mutex_unlock(&cli->mutex);
6a6b73f2 834 return ret;
6ee73861
BS
835}
836
d1b167e1 837void
49981046 838nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 839{
72ecb0a6 840 static const struct _method_table {
d1b167e1 841 const char *name;
1a46098e 842 int engine;
315a8b2e 843 s32 oclass;
d1b167e1
BS
844 int (*exec)(struct nouveau_channel *,
845 struct ttm_buffer_object *,
2966141a 846 struct ttm_resource *, struct ttm_resource *);
d1b167e1
BS
847 int (*init)(struct nouveau_channel *, u32 handle);
848 } _methods[] = {
c36322d2
BS
849 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
850 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
6e1f34e3
BS
851 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
852 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
146cfe24
BS
853 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
854 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
8e7e1586
BS
855 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
856 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
990b4547
BS
857 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
858 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
00fc6f6f 859 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 860 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
861 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
862 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
863 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
864 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
865 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
866 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
867 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 868 {},
72ecb0a6
JJ
869 };
870 const struct _method_table *mthd = _methods;
d1b167e1
BS
871 const char *name = "CPU";
872 int ret;
873
874 do {
49981046 875 struct nouveau_channel *chan;
ebb945a9 876
00fc6f6f 877 if (mthd->engine)
49981046
BS
878 chan = drm->cechan;
879 else
880 chan = drm->channel;
881 if (chan == NULL)
882 continue;
883
9ac596a4 884 ret = nvif_object_ctor(&chan->user, "ttmBoMove",
0ad72863
BS
885 mthd->oclass | (mthd->engine << 16),
886 mthd->oclass, NULL, 0,
887 &drm->ttm.copy);
d1b167e1 888 if (ret == 0) {
0ad72863 889 ret = mthd->init(chan, drm->ttm.copy.handle);
ebb945a9 890 if (ret) {
9ac596a4 891 nvif_object_dtor(&drm->ttm.copy);
ebb945a9 892 continue;
d1b167e1 893 }
ebb945a9
BS
894
895 drm->ttm.move = mthd->exec;
1bb3f6a2 896 drm->ttm.chan = chan;
ebb945a9
BS
897 name = mthd->name;
898 break;
d1b167e1
BS
899 }
900 } while ((++mthd)->exec);
901
ebb945a9 902 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
903}
904
6ee73861
BS
905static int
906nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
2966141a 907 bool no_wait_gpu, struct ttm_resource *new_reg)
6ee73861 908{
c13c55d6 909 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
f1217ed0
CK
910 struct ttm_place placement_memtype = {
911 .fpfn = 0,
912 .lpfn = 0,
48e07c23
CK
913 .mem_type = TTM_PL_TT,
914 .flags = TTM_PL_MASK_CACHING
f1217ed0 915 };
6ee73861 916 struct ttm_placement placement;
2966141a 917 struct ttm_resource tmp_reg;
6ee73861
BS
918 int ret;
919
6ee73861 920 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 921 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861 922
605f9ccd
BS
923 tmp_reg = *new_reg;
924 tmp_reg.mm_node = NULL;
c13c55d6 925 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
6ee73861
BS
926 if (ret)
927 return ret;
928
2040ec97
DA
929 ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
930 if (ret)
931 goto out;
932
cae515f4 933 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
6ee73861
BS
934 if (ret)
935 goto out;
936
605f9ccd 937 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
6ee73861
BS
938 if (ret)
939 goto out;
940
3e98d829 941 ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
6ee73861 942out:
b2458726 943 ttm_resource_free(bo, &tmp_reg);
6ee73861
BS
944 return ret;
945}
946
947static int
948nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
2966141a 949 bool no_wait_gpu, struct ttm_resource *new_reg)
6ee73861 950{
c13c55d6 951 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
f1217ed0
CK
952 struct ttm_place placement_memtype = {
953 .fpfn = 0,
954 .lpfn = 0,
48e07c23
CK
955 .mem_type = TTM_PL_TT,
956 .flags = TTM_PL_MASK_CACHING
f1217ed0 957 };
6ee73861 958 struct ttm_placement placement;
2966141a 959 struct ttm_resource tmp_reg;
6ee73861
BS
960 int ret;
961
6ee73861 962 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 963 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861 964
605f9ccd
BS
965 tmp_reg = *new_reg;
966 tmp_reg.mm_node = NULL;
c13c55d6 967 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
6ee73861
BS
968 if (ret)
969 return ret;
970
3e98d829 971 ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
6ee73861
BS
972 if (ret)
973 goto out;
974
605f9ccd 975 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
6ee73861
BS
976 if (ret)
977 goto out;
978
979out:
b2458726 980 ttm_resource_free(bo, &tmp_reg);
6ee73861
BS
981 return ret;
982}
983
a4154bbf 984static void
66257db7 985nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
2966141a 986 struct ttm_resource *new_reg)
a4154bbf 987{
9ce523cc 988 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
a4154bbf 989 struct nouveau_bo *nvbo = nouveau_bo(bo);
24e8375b 990 struct nouveau_vma *vma;
fd2871af 991
9f1feed2
BS
992 /* ttm can now (stupidly) pass the driver bos it didn't create... */
993 if (bo->destroy != nouveau_bo_del_ttm)
994 return;
995
141b15e5
CK
996 nouveau_bo_del_io_reserve_lru(bo);
997
a48296ab 998 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
9ce523cc 999 mem->mem.page == nvbo->page) {
a48296ab 1000 list_for_each_entry(vma, &nvbo->vma_list, head) {
24e8375b 1001 nouveau_vma_map(vma, mem);
a48296ab
BS
1002 }
1003 } else {
1004 list_for_each_entry(vma, &nvbo->vma_list, head) {
10dcab3e 1005 WARN_ON(ttm_bo_wait(bo, false, false));
24e8375b 1006 nouveau_vma_unmap(vma);
fd2871af 1007 }
a4154bbf 1008 }
0dc9b286
ND
1009
1010 if (new_reg) {
1011 if (new_reg->mm_node)
1012 nvbo->offset = (new_reg->start << PAGE_SHIFT);
1013 else
1014 nvbo->offset = 0;
1015 }
1016
a4154bbf
BS
1017}
1018
6ee73861 1019static int
2966141a 1020nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
ebb945a9 1021 struct nouveau_drm_tile **new_tile)
6ee73861 1022{
ebb945a9
BS
1023 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1024 struct drm_device *dev = drm->dev;
a0af9add 1025 struct nouveau_bo *nvbo = nouveau_bo(bo);
605f9ccd 1026 u64 offset = new_reg->start << PAGE_SHIFT;
6ee73861 1027
a4154bbf 1028 *new_tile = NULL;
605f9ccd 1029 if (new_reg->mem_type != TTM_PL_VRAM)
a0af9add 1030 return 0;
a0af9add 1031
1167c6bc 1032 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
605f9ccd 1033 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
7760a2e3 1034 nvbo->mode, nvbo->zeta);
6ee73861
BS
1035 }
1036
a0af9add
FJ
1037 return 0;
1038}
1039
1040static void
1041nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1042 struct nouveau_drm_tile *new_tile,
1043 struct nouveau_drm_tile **old_tile)
a0af9add 1044{
ebb945a9
BS
1045 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1046 struct drm_device *dev = drm->dev;
52791eee 1047 struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
a0af9add 1048
f2c24b83 1049 nv10_bo_put_tile_region(dev, *old_tile, fence);
a4154bbf 1050 *old_tile = new_tile;
a0af9add
FJ
1051}
1052
1053static int
2823f4f0
CK
1054nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1055 struct ttm_operation_ctx *ctx,
2966141a 1056 struct ttm_resource *new_reg)
a0af9add 1057{
ebb945a9 1058 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add 1059 struct nouveau_bo *nvbo = nouveau_bo(bo);
2966141a 1060 struct ttm_resource *old_reg = &bo->mem;
ebb945a9 1061 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1062 int ret = 0;
1063
2823f4f0 1064 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
88932a7b
CK
1065 if (ret)
1066 return ret;
1067
5be5a15a
AC
1068 if (nvbo->pin_refcnt)
1069 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1070
1167c6bc 1071 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
605f9ccd 1072 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
a4154bbf
BS
1073 if (ret)
1074 return ret;
1075 }
a0af9add 1076
a0af9add 1077 /* Fake bo copy. */
605f9ccd 1078 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
ecfe6953 1079 ttm_bo_move_null(bo, new_reg);
a0af9add 1080 goto out;
6ee73861
BS
1081 }
1082
a0af9add 1083 /* Hardware assisted copy. */
cef9e99e 1084 if (drm->ttm.move) {
605f9ccd 1085 if (new_reg->mem_type == TTM_PL_SYSTEM)
2823f4f0
CK
1086 ret = nouveau_bo_move_flipd(bo, evict,
1087 ctx->interruptible,
1088 ctx->no_wait_gpu, new_reg);
605f9ccd 1089 else if (old_reg->mem_type == TTM_PL_SYSTEM)
2823f4f0
CK
1090 ret = nouveau_bo_move_flips(bo, evict,
1091 ctx->interruptible,
1092 ctx->no_wait_gpu, new_reg);
cef9e99e 1093 else
2823f4f0
CK
1094 ret = nouveau_bo_move_m2mf(bo, evict,
1095 ctx->interruptible,
1096 ctx->no_wait_gpu, new_reg);
cef9e99e
BS
1097 if (!ret)
1098 goto out;
1099 }
a0af9add
FJ
1100
1101 /* Fallback to software copy. */
2823f4f0 1102 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
cef9e99e 1103 if (ret == 0)
3e98d829 1104 ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
a0af9add
FJ
1105
1106out:
1167c6bc 1107 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1108 if (ret)
1109 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1110 else
1111 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1112 }
a0af9add
FJ
1113
1114 return ret;
6ee73861
BS
1115}
1116
1117static int
1118nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1119{
acb46527
DH
1120 struct nouveau_bo *nvbo = nouveau_bo(bo);
1121
f8659be8 1122 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
d9a1f0b4 1123 filp->private_data);
6ee73861
BS
1124}
1125
141b15e5
CK
1126static void
1127nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
1128 struct ttm_resource *reg)
1129{
1130 struct nouveau_mem *mem = nouveau_mem(reg);
1131
1132 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1133 switch (reg->mem_type) {
1134 case TTM_PL_TT:
1135 if (mem->kind)
1136 nvif_object_unmap_handle(&mem->mem.object);
1137 break;
1138 case TTM_PL_VRAM:
1139 nvif_object_unmap_handle(&mem->mem.object);
1140 break;
1141 default:
1142 break;
1143 }
1144 }
1145}
1146
f32f02fd 1147static int
2966141a 1148nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
f32f02fd 1149{
ebb945a9 1150 struct nouveau_drm *drm = nouveau_bdev(bdev);
1167c6bc 1151 struct nvkm_device *device = nvxx_device(&drm->client.device);
9ce523cc 1152 struct nouveau_mem *mem = nouveau_mem(reg);
141b15e5 1153 int ret;
f32f02fd 1154
141b15e5
CK
1155 mutex_lock(&drm->ttm.io_reserve_mutex);
1156retry:
605f9ccd 1157 switch (reg->mem_type) {
f32f02fd
JG
1158 case TTM_PL_SYSTEM:
1159 /* System memory */
141b15e5
CK
1160 ret = 0;
1161 goto out;
f32f02fd 1162 case TTM_PL_TT:
a7fb8a23 1163#if IS_ENABLED(CONFIG_AGP)
340b0e7c 1164 if (drm->agp.bridge) {
54d04ea8
CK
1165 reg->bus.offset = (reg->start << PAGE_SHIFT) +
1166 drm->agp.base;
605f9ccd 1167 reg->bus.is_iomem = !drm->agp.cma;
f32f02fd
JG
1168 }
1169#endif
141b15e5
CK
1170 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
1171 !mem->kind) {
a5540906 1172 /* untiled */
141b15e5 1173 ret = 0;
a5540906 1174 break;
141b15e5 1175 }
f6e7393e 1176 fallthrough; /* tiled memory */
f32f02fd 1177 case TTM_PL_VRAM:
54d04ea8
CK
1178 reg->bus.offset = (reg->start << PAGE_SHIFT) +
1179 device->func->resource_addr(device, 1);
605f9ccd 1180 reg->bus.is_iomem = true;
d7722134
BS
1181 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1182 union {
1183 struct nv50_mem_map_v0 nv50;
1184 struct gf100_mem_map_v0 gf100;
1185 } args;
1186 u64 handle, length;
1187 u32 argc = 0;
d7722134
BS
1188
1189 switch (mem->mem.object.oclass) {
1190 case NVIF_CLASS_MEM_NV50:
1191 args.nv50.version = 0;
1192 args.nv50.ro = 0;
1193 args.nv50.kind = mem->kind;
1194 args.nv50.comp = mem->comp;
b554b12a 1195 argc = sizeof(args.nv50);
d7722134
BS
1196 break;
1197 case NVIF_CLASS_MEM_GF100:
1198 args.gf100.version = 0;
1199 args.gf100.ro = 0;
1200 args.gf100.kind = mem->kind;
b554b12a 1201 argc = sizeof(args.gf100);
d7722134
BS
1202 break;
1203 default:
1204 WARN_ON(1);
1205 break;
1206 }
1207
1208 ret = nvif_object_map_handle(&mem->mem.object,
b554b12a 1209 &args, argc,
d7722134 1210 &handle, &length);
8a39db76
BS
1211 if (ret != 1) {
1212 if (WARN_ON(ret == 0))
141b15e5
CK
1213 ret = -EINVAL;
1214 goto out;
8a39db76 1215 }
d7722134 1216
d7722134 1217 reg->bus.offset = handle;
141b15e5 1218 ret = 0;
f869ef88 1219 }
f32f02fd
JG
1220 break;
1221 default:
141b15e5 1222 ret = -EINVAL;
f32f02fd 1223 }
141b15e5
CK
1224
1225out:
1226 if (ret == -ENOSPC) {
1227 struct nouveau_bo *nvbo;
1228
1229 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1230 typeof(*nvbo),
1231 io_reserve_lru);
1232 if (nvbo) {
1233 list_del_init(&nvbo->io_reserve_lru);
1234 drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1235 bdev->dev_mapping);
1236 nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
1237 goto retry;
1238 }
1239
1240 }
1241 mutex_unlock(&drm->ttm.io_reserve_mutex);
1242 return ret;
f32f02fd
JG
1243}
1244
1245static void
2966141a 1246nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
f32f02fd 1247{
d7722134 1248 struct nouveau_drm *drm = nouveau_bdev(bdev);
f869ef88 1249
141b15e5
CK
1250 mutex_lock(&drm->ttm.io_reserve_mutex);
1251 nouveau_ttm_io_mem_free_locked(drm, reg);
1252 mutex_unlock(&drm->ttm.io_reserve_mutex);
f32f02fd
JG
1253}
1254
1255static int
1256nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1257{
ebb945a9 1258 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1259 struct nouveau_bo *nvbo = nouveau_bo(bo);
1167c6bc 1260 struct nvkm_device *device = nvxx_device(&drm->client.device);
7e8820fe 1261 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
f1217ed0 1262 int i, ret;
e1429b4c
BS
1263
1264 /* as long as the bo isn't in vram, and isn't tiled, we've got
1265 * nothing to do here.
1266 */
1267 if (bo->mem.mem_type != TTM_PL_VRAM) {
1167c6bc 1268 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
7760a2e3 1269 !nvbo->kind)
e1429b4c 1270 return 0;
a5540906
ML
1271
1272 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
81b61579
CK
1273 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
1274 0);
a5540906
ML
1275
1276 ret = nouveau_bo_validate(nvbo, false, false);
1277 if (ret)
1278 return ret;
1279 }
1280 return 0;
e1429b4c
BS
1281 }
1282
1283 /* make sure bo is in mappable vram */
1167c6bc 1284 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
a5540906 1285 bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1286 return 0;
1287
f1217ed0
CK
1288 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1289 nvbo->placements[i].fpfn = 0;
1290 nvbo->placements[i].lpfn = mappable;
1291 }
1292
1293 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1294 nvbo->busy_placements[i].fpfn = 0;
1295 nvbo->busy_placements[i].lpfn = mappable;
1296 }
e1429b4c 1297
81b61579 1298 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
97a875cb 1299 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1300}
1301
3230cfc3 1302static int
0a667b50
DA
1303nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
1304 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
3230cfc3 1305{
8e7e7052 1306 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1307 struct nouveau_drm *drm;
359088d5 1308 struct device *dev;
22b33e8e 1309 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3 1310
7eec9151 1311 if (ttm_tt_is_populated(ttm))
3230cfc3
KRW
1312 return 0;
1313
22b33e8e
DA
1314 if (slave && ttm->sg) {
1315 /* make userspace faulting work */
1316 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1317 ttm_dma->dma_address, ttm->num_pages);
7eec9151 1318 ttm_tt_set_populated(ttm);
22b33e8e
DA
1319 return 0;
1320 }
1321
0a667b50 1322 drm = nouveau_bdev(bdev);
359088d5 1323 dev = drm->dev->dev;
3230cfc3 1324
a7fb8a23 1325#if IS_ENABLED(CONFIG_AGP)
340b0e7c 1326 if (drm->agp.bridge) {
43482554 1327 return ttm_pool_populate(ttm, ctx);
dea7e0ac
JG
1328 }
1329#endif
1330
9bcd38de 1331#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
3230cfc3 1332 if (swiotlb_nr_tbl()) {
d0cef9fa 1333 return ttm_dma_populate((void *)ttm, dev, ctx);
3230cfc3
KRW
1334 }
1335#endif
bd549d35 1336 return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
3230cfc3
KRW
1337}
1338
1339static void
0a667b50
DA
1340nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1341 struct ttm_tt *ttm)
3230cfc3 1342{
8e7e7052 1343 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1344 struct nouveau_drm *drm;
359088d5 1345 struct device *dev;
22b33e8e
DA
1346 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1347
1348 if (slave)
1349 return;
3230cfc3 1350
0a667b50 1351 drm = nouveau_bdev(bdev);
359088d5 1352 dev = drm->dev->dev;
3230cfc3 1353
a7fb8a23 1354#if IS_ENABLED(CONFIG_AGP)
340b0e7c 1355 if (drm->agp.bridge) {
43482554 1356 ttm_pool_unpopulate(ttm);
dea7e0ac
JG
1357 return;
1358 }
1359#endif
1360
9bcd38de 1361#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
3230cfc3 1362 if (swiotlb_nr_tbl()) {
359088d5 1363 ttm_dma_unpopulate((void *)ttm, dev);
3230cfc3
KRW
1364 return;
1365 }
1366#endif
1367
bd549d35 1368 ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
3230cfc3
KRW
1369}
1370
8635784a
DA
1371static void
1372nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
1373 struct ttm_tt *ttm)
1374{
1375#if IS_ENABLED(CONFIG_AGP)
1376 struct nouveau_drm *drm = nouveau_bdev(bdev);
1377 if (drm->agp.bridge) {
37bff654 1378 ttm_agp_unbind(ttm);
7626168f 1379 ttm_tt_destroy_common(bdev, ttm);
48efa57e 1380 ttm_agp_destroy(ttm);
8635784a
DA
1381 return;
1382 }
1383#endif
1384 nouveau_sgdma_destroy(bdev, ttm);
1385}
1386
875ac34a 1387void
809e9447 1388nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
875ac34a 1389{
52791eee 1390 struct dma_resv *resv = nvbo->bo.base.resv;
bdaf7ddf 1391
809e9447 1392 if (exclusive)
52791eee 1393 dma_resv_add_excl_fence(resv, &fence->base);
809e9447 1394 else if (fence)
52791eee 1395 dma_resv_add_shared_fence(resv, &fence->base);
875ac34a
BS
1396}
1397
6ee73861 1398struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1399 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1400 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1401 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
8635784a
DA
1402 .ttm_tt_bind = &nouveau_ttm_tt_bind,
1403 .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
1404 .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
a2ab19fe 1405 .eviction_valuable = ttm_bo_eviction_valuable,
6ee73861 1406 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1407 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1408 .move = nouveau_bo_move,
1409 .verify_access = nouveau_bo_verify_access,
f32f02fd
JG
1410 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1411 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1412 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861 1413};