drm/nouveau: protect evo_wait/evo_kick sections with a channel mutex
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
ebb945a9 30#include <core/engine.h>
6ee73861 31
ebb945a9
BS
32#include <subdev/fb.h>
33#include <subdev/vm.h>
34#include <subdev/bar.h>
35
36#include "nouveau_drm.h"
6ee73861 37#include "nouveau_dma.h"
d375e7d5 38#include "nouveau_fence.h"
6ee73861 39
ebb945a9
BS
40#include "nouveau_bo.h"
41#include "nouveau_ttm.h"
42#include "nouveau_gem.h"
a510604d 43
bc9e7b9a
BS
44/*
45 * NV10-NV40 tiling helpers
46 */
47
48static void
ebb945a9
BS
49nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
50 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 51{
77145f1c 52 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9
BS
53 int i = reg - drm->tile.reg;
54 struct nouveau_fb *pfb = nouveau_fb(drm->device);
55 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
56 struct nouveau_engine *engine;
bc9e7b9a 57
ebb945a9 58 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
59
60 if (tile->pitch)
ebb945a9 61 pfb->tile.fini(pfb, i, tile);
bc9e7b9a
BS
62
63 if (pitch)
ebb945a9 64 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
bc9e7b9a 65
ebb945a9 66 pfb->tile.prog(pfb, i, tile);
bc9e7b9a 67
ebb945a9
BS
68 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
69 engine->tile_prog(engine, i);
70 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
71 engine->tile_prog(engine, i);
bc9e7b9a
BS
72}
73
ebb945a9 74static struct nouveau_drm_tile *
bc9e7b9a
BS
75nv10_bo_get_tile_region(struct drm_device *dev, int i)
76{
77145f1c 77 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 78 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 79
ebb945a9 80 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
81
82 if (!tile->used &&
83 (!tile->fence || nouveau_fence_done(tile->fence)))
84 tile->used = true;
85 else
86 tile = NULL;
87
ebb945a9 88 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
89 return tile;
90}
91
92static void
ebb945a9
BS
93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94 struct nouveau_fence *fence)
bc9e7b9a 95{
77145f1c 96 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
97
98 if (tile) {
ebb945a9 99 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
100 if (fence) {
101 /* Mark it as pending. */
102 tile->fence = fence;
103 nouveau_fence_ref(fence);
104 }
105
106 tile->used = false;
ebb945a9 107 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
108 }
109}
110
ebb945a9
BS
111static struct nouveau_drm_tile *
112nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
113 u32 size, u32 pitch, u32 flags)
bc9e7b9a 114{
77145f1c 115 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9
BS
116 struct nouveau_fb *pfb = nouveau_fb(drm->device);
117 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
118 int i;
119
ebb945a9 120 for (i = 0; i < pfb->tile.regions; i++) {
bc9e7b9a
BS
121 tile = nv10_bo_get_tile_region(dev, i);
122
123 if (pitch && !found) {
124 found = tile;
125 continue;
126
ebb945a9 127 } else if (tile && pfb->tile.region[i].pitch) {
bc9e7b9a
BS
128 /* Kill an unused tile region. */
129 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
130 }
131
132 nv10_bo_put_tile_region(dev, tile, NULL);
133 }
134
135 if (found)
136 nv10_bo_update_tile_region(dev, found, addr, size,
137 pitch, flags);
138 return found;
139}
140
6ee73861
BS
141static void
142nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
143{
ebb945a9
BS
144 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
145 struct drm_device *dev = drm->dev;
6ee73861
BS
146 struct nouveau_bo *nvbo = nouveau_bo(bo);
147
6ee73861
BS
148 if (unlikely(nvbo->gem))
149 DRM_ERROR("bo %p still attached to GEM object\n", bo);
bc9e7b9a 150 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
151 kfree(nvbo);
152}
153
a0af9add 154static void
db5c8e29 155nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 156 int *align, int *size)
a0af9add 157{
ebb945a9
BS
158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 struct nouveau_device *device = nv_device(drm->device);
a0af9add 160
ebb945a9 161 if (device->card_type < NV_50) {
bfd83aca 162 if (nvbo->tile_mode) {
ebb945a9 163 if (device->chipset >= 0x40) {
a0af9add 164 *align = 65536;
bfd83aca 165 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 166
ebb945a9 167 } else if (device->chipset >= 0x30) {
a0af9add 168 *align = 32768;
bfd83aca 169 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 170
ebb945a9 171 } else if (device->chipset >= 0x20) {
a0af9add 172 *align = 16384;
bfd83aca 173 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 174
ebb945a9 175 } else if (device->chipset >= 0x10) {
a0af9add 176 *align = 16384;
bfd83aca 177 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
178 }
179 }
bfd83aca 180 } else {
f91bac5b
BS
181 *size = roundup(*size, (1 << nvbo->page_shift));
182 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
183 }
184
1c7059e4 185 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
186}
187
6ee73861 188int
7375c95b
BS
189nouveau_bo_new(struct drm_device *dev, int size, int align,
190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
22b33e8e 191 struct sg_table *sg,
7375c95b 192 struct nouveau_bo **pnvbo)
6ee73861 193{
77145f1c 194 struct nouveau_drm *drm = nouveau_drm(dev);
6ee73861 195 struct nouveau_bo *nvbo;
57de4ba9 196 size_t acc_size;
f91bac5b 197 int ret;
22b33e8e
DA
198 int type = ttm_bo_type_device;
199
200 if (sg)
201 type = ttm_bo_type_sg;
6ee73861
BS
202
203 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
204 if (!nvbo)
205 return -ENOMEM;
206 INIT_LIST_HEAD(&nvbo->head);
207 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 208 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
209 nvbo->tile_mode = tile_mode;
210 nvbo->tile_flags = tile_flags;
ebb945a9 211 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 212
f91bac5b 213 nvbo->page_shift = 12;
ebb945a9 214 if (drm->client.base.vm) {
f91bac5b 215 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
ebb945a9 216 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
f91bac5b
BS
217 }
218
219 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
220 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
221 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 222
ebb945a9 223 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
224 sizeof(struct nouveau_bo));
225
ebb945a9 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 227 type, &nvbo->placement,
0b91c4a1 228 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
fd2871af 229 nouveau_bo_del_ttm);
6ee73861
BS
230 if (ret) {
231 /* ttm will call nouveau_bo_del_ttm if it fails.. */
232 return ret;
233 }
234
6ee73861
BS
235 *pnvbo = nvbo;
236 return 0;
237}
238
78ad0f7b
FJ
239static void
240set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
241{
242 *n = 0;
243
244 if (type & TTM_PL_FLAG_VRAM)
245 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
246 if (type & TTM_PL_FLAG_TT)
247 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
248 if (type & TTM_PL_FLAG_SYSTEM)
249 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
250}
251
699ddfd9
FJ
252static void
253set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
254{
ebb945a9
BS
255 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
256 struct nouveau_fb *pfb = nouveau_fb(drm->device);
257 u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
699ddfd9 258
ebb945a9 259 if (nv_device(drm->device)->card_type == NV_10 &&
812f219a 260 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 261 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
262 /*
263 * Make sure that the color and depth buffers are handled
264 * by independent memory controller units. Up to a 9x
265 * speed up when alpha-blending and depth-test are enabled
266 * at the same time.
267 */
699ddfd9
FJ
268 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
269 nvbo->placement.fpfn = vram_pages / 2;
270 nvbo->placement.lpfn = ~0;
271 } else {
272 nvbo->placement.fpfn = 0;
273 nvbo->placement.lpfn = vram_pages / 2;
274 }
275 }
276}
277
6ee73861 278void
78ad0f7b 279nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 280{
78ad0f7b
FJ
281 struct ttm_placement *pl = &nvbo->placement;
282 uint32_t flags = TTM_PL_MASK_CACHING |
283 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
284
285 pl->placement = nvbo->placements;
286 set_placement_list(nvbo->placements, &pl->num_placement,
287 type, flags);
288
289 pl->busy_placement = nvbo->busy_placements;
290 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
291 type | busy, flags);
699ddfd9
FJ
292
293 set_placement_range(nvbo, type);
6ee73861
BS
294}
295
296int
297nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
298{
ebb945a9 299 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 300 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 301 int ret;
6ee73861
BS
302
303 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
ebb945a9 304 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
6ee73861
BS
305 1 << bo->mem.mem_type, memtype);
306 return -EINVAL;
307 }
308
309 if (nvbo->pin_refcnt++)
310 return 0;
311
312 ret = ttm_bo_reserve(bo, false, false, false, 0);
313 if (ret)
314 goto out;
315
78ad0f7b 316 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 317
97a875cb 318 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
319 if (ret == 0) {
320 switch (bo->mem.mem_type) {
321 case TTM_PL_VRAM:
ebb945a9 322 drm->gem.vram_available -= bo->mem.size;
6ee73861
BS
323 break;
324 case TTM_PL_TT:
ebb945a9 325 drm->gem.gart_available -= bo->mem.size;
6ee73861
BS
326 break;
327 default:
328 break;
329 }
330 }
331 ttm_bo_unreserve(bo);
332out:
333 if (unlikely(ret))
334 nvbo->pin_refcnt--;
335 return ret;
336}
337
338int
339nouveau_bo_unpin(struct nouveau_bo *nvbo)
340{
ebb945a9 341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 342 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 343 int ret;
6ee73861
BS
344
345 if (--nvbo->pin_refcnt)
346 return 0;
347
348 ret = ttm_bo_reserve(bo, false, false, false, 0);
349 if (ret)
350 return ret;
351
78ad0f7b 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 353
97a875cb 354 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
355 if (ret == 0) {
356 switch (bo->mem.mem_type) {
357 case TTM_PL_VRAM:
ebb945a9 358 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
359 break;
360 case TTM_PL_TT:
ebb945a9 361 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
362 break;
363 default:
364 break;
365 }
366 }
367
368 ttm_bo_unreserve(bo);
369 return ret;
370}
371
372int
373nouveau_bo_map(struct nouveau_bo *nvbo)
374{
375 int ret;
376
377 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
378 if (ret)
379 return ret;
380
381 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
382 ttm_bo_unreserve(&nvbo->bo);
383 return ret;
384}
385
386void
387nouveau_bo_unmap(struct nouveau_bo *nvbo)
388{
9d59e8a1
BS
389 if (nvbo)
390 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
391}
392
7a45d764
BS
393int
394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 395 bool no_wait_gpu)
7a45d764
BS
396{
397 int ret;
398
97a875cb
ML
399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
400 interruptible, no_wait_gpu);
7a45d764
BS
401 if (ret)
402 return ret;
403
404 return 0;
405}
406
6ee73861
BS
407u16
408nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
409{
410 bool is_iomem;
411 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
412 mem = &mem[index];
413 if (is_iomem)
414 return ioread16_native((void __force __iomem *)mem);
415 else
416 return *mem;
417}
418
419void
420nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
421{
422 bool is_iomem;
423 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
424 mem = &mem[index];
425 if (is_iomem)
426 iowrite16_native(val, (void __force __iomem *)mem);
427 else
428 *mem = val;
429}
430
431u32
432nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
433{
434 bool is_iomem;
435 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
436 mem = &mem[index];
437 if (is_iomem)
438 return ioread32_native((void __force __iomem *)mem);
439 else
440 return *mem;
441}
442
443void
444nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
445{
446 bool is_iomem;
447 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
448 mem = &mem[index];
449 if (is_iomem)
450 iowrite32_native(val, (void __force __iomem *)mem);
451 else
452 *mem = val;
453}
454
649bf3ca 455static struct ttm_tt *
ebb945a9
BS
456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
457 uint32_t page_flags, struct page *dummy_read)
6ee73861 458{
df1b4b91 459#if __OS_HAS_AGP
ebb945a9
BS
460 struct nouveau_drm *drm = nouveau_bdev(bdev);
461 struct drm_device *dev = drm->dev;
6ee73861 462
ebb945a9
BS
463 if (drm->agp.stat == ENABLED) {
464 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
465 page_flags, dummy_read);
6ee73861 466 }
df1b4b91 467#endif
6ee73861 468
ebb945a9 469 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
470}
471
472static int
473nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
474{
475 /* We'll do this from user space. */
476 return 0;
477}
478
479static int
480nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
481 struct ttm_mem_type_manager *man)
482{
ebb945a9 483 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
484
485 switch (type) {
486 case TTM_PL_SYSTEM:
487 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
488 man->available_caching = TTM_PL_MASK_CACHING;
489 man->default_caching = TTM_PL_FLAG_CACHED;
490 break;
491 case TTM_PL_VRAM:
ebb945a9 492 if (nv_device(drm->device)->card_type >= NV_50) {
573a2a37 493 man->func = &nouveau_vram_manager;
f869ef88
BS
494 man->io_reserve_fastpath = false;
495 man->use_io_reserve_lru = true;
496 } else {
573a2a37 497 man->func = &ttm_bo_manager_func;
f869ef88 498 }
6ee73861 499 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 500 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
501 man->available_caching = TTM_PL_FLAG_UNCACHED |
502 TTM_PL_FLAG_WC;
503 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
504 break;
505 case TTM_PL_TT:
ebb945a9 506 if (nv_device(drm->device)->card_type >= NV_50)
26c0c9e3 507 man->func = &nouveau_gart_manager;
3863c9bc 508 else
ebb945a9 509 if (drm->agp.stat != ENABLED)
3863c9bc 510 man->func = &nv04_gart_manager;
26c0c9e3
BS
511 else
512 man->func = &ttm_bo_manager_func;
ebb945a9
BS
513
514 if (drm->agp.stat == ENABLED) {
f32f02fd 515 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
516 man->available_caching = TTM_PL_FLAG_UNCACHED |
517 TTM_PL_FLAG_WC;
518 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 519 } else {
6ee73861
BS
520 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
521 TTM_MEMTYPE_FLAG_CMA;
522 man->available_caching = TTM_PL_MASK_CACHING;
523 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 524 }
ebb945a9 525
6ee73861
BS
526 break;
527 default:
6ee73861
BS
528 return -EINVAL;
529 }
530 return 0;
531}
532
533static void
534nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
535{
536 struct nouveau_bo *nvbo = nouveau_bo(bo);
537
538 switch (bo->mem.mem_type) {
22fbd538 539 case TTM_PL_VRAM:
78ad0f7b
FJ
540 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
541 TTM_PL_FLAG_SYSTEM);
22fbd538 542 break;
6ee73861 543 default:
78ad0f7b 544 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
545 break;
546 }
22fbd538
FJ
547
548 *pl = nvbo->placement;
6ee73861
BS
549}
550
551
552/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
553 * TTM_PL_{VRAM,TT} directly.
554 */
a0af9add 555
6ee73861
BS
556static int
557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21 558 struct nouveau_bo *nvbo, bool evict,
97a875cb 559 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
560{
561 struct nouveau_fence *fence = NULL;
562 int ret;
563
d375e7d5 564 ret = nouveau_fence_new(chan, &fence);
6ee73861
BS
565 if (ret)
566 return ret;
567
b03640b1 568 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
97a875cb 569 no_wait_gpu, new_mem);
382d62e5 570 nouveau_fence_unref(&fence);
6ee73861
BS
571 return ret;
572}
573
49981046
BS
574static int
575nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
576{
577 int ret = RING_SPACE(chan, 2);
578 if (ret == 0) {
579 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
580 OUT_RING (chan, handle);
581 FIRE_RING (chan);
582 }
583 return ret;
584}
585
c6b7e895
BS
586static int
587nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
588 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
589{
590 struct nouveau_mem *node = old_mem->mm_node;
591 int ret = RING_SPACE(chan, 10);
592 if (ret == 0) {
6d597027 593 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
594 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
595 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
596 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
597 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
598 OUT_RING (chan, PAGE_SIZE);
599 OUT_RING (chan, PAGE_SIZE);
600 OUT_RING (chan, PAGE_SIZE);
601 OUT_RING (chan, new_mem->num_pages);
6d597027 602 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
603 }
604 return ret;
605}
606
d1b167e1
BS
607static int
608nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
609{
610 int ret = RING_SPACE(chan, 2);
611 if (ret == 0) {
612 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
613 OUT_RING (chan, handle);
614 }
615 return ret;
616}
617
1a46098e
BS
618static int
619nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
620 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
621{
622 struct nouveau_mem *node = old_mem->mm_node;
623 u64 src_offset = node->vma[0].offset;
624 u64 dst_offset = node->vma[1].offset;
625 u32 page_count = new_mem->num_pages;
626 int ret;
627
628 page_count = new_mem->num_pages;
629 while (page_count) {
630 int line_count = (page_count > 8191) ? 8191 : page_count;
631
632 ret = RING_SPACE(chan, 11);
633 if (ret)
634 return ret;
635
636 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
637 OUT_RING (chan, upper_32_bits(src_offset));
638 OUT_RING (chan, lower_32_bits(src_offset));
639 OUT_RING (chan, upper_32_bits(dst_offset));
640 OUT_RING (chan, lower_32_bits(dst_offset));
641 OUT_RING (chan, PAGE_SIZE);
642 OUT_RING (chan, PAGE_SIZE);
643 OUT_RING (chan, PAGE_SIZE);
644 OUT_RING (chan, line_count);
645 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
646 OUT_RING (chan, 0x00000110);
647
648 page_count -= line_count;
649 src_offset += (PAGE_SIZE * line_count);
650 dst_offset += (PAGE_SIZE * line_count);
651 }
652
653 return 0;
654}
655
183720b8
BS
656static int
657nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
658 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
659{
d2f96666
BS
660 struct nouveau_mem *node = old_mem->mm_node;
661 u64 src_offset = node->vma[0].offset;
662 u64 dst_offset = node->vma[1].offset;
183720b8
BS
663 u32 page_count = new_mem->num_pages;
664 int ret;
665
183720b8
BS
666 page_count = new_mem->num_pages;
667 while (page_count) {
668 int line_count = (page_count > 2047) ? 2047 : page_count;
669
670 ret = RING_SPACE(chan, 12);
671 if (ret)
672 return ret;
673
d1b167e1 674 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
675 OUT_RING (chan, upper_32_bits(dst_offset));
676 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 677 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
678 OUT_RING (chan, upper_32_bits(src_offset));
679 OUT_RING (chan, lower_32_bits(src_offset));
680 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
681 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
682 OUT_RING (chan, PAGE_SIZE); /* line_length */
683 OUT_RING (chan, line_count);
d1b167e1 684 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
685 OUT_RING (chan, 0x00100110);
686
687 page_count -= line_count;
688 src_offset += (PAGE_SIZE * line_count);
689 dst_offset += (PAGE_SIZE * line_count);
690 }
691
692 return 0;
693}
694
fdf53241
BS
695static int
696nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
697 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
698{
699 struct nouveau_mem *node = old_mem->mm_node;
700 u64 src_offset = node->vma[0].offset;
701 u64 dst_offset = node->vma[1].offset;
702 u32 page_count = new_mem->num_pages;
703 int ret;
704
705 page_count = new_mem->num_pages;
706 while (page_count) {
707 int line_count = (page_count > 8191) ? 8191 : page_count;
708
709 ret = RING_SPACE(chan, 11);
710 if (ret)
711 return ret;
712
713 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
714 OUT_RING (chan, upper_32_bits(src_offset));
715 OUT_RING (chan, lower_32_bits(src_offset));
716 OUT_RING (chan, upper_32_bits(dst_offset));
717 OUT_RING (chan, lower_32_bits(dst_offset));
718 OUT_RING (chan, PAGE_SIZE);
719 OUT_RING (chan, PAGE_SIZE);
720 OUT_RING (chan, PAGE_SIZE);
721 OUT_RING (chan, line_count);
722 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
723 OUT_RING (chan, 0x00000110);
724
725 page_count -= line_count;
726 src_offset += (PAGE_SIZE * line_count);
727 dst_offset += (PAGE_SIZE * line_count);
728 }
729
730 return 0;
731}
732
5490e5df
BS
733static int
734nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
735 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
736{
737 struct nouveau_mem *node = old_mem->mm_node;
738 int ret = RING_SPACE(chan, 7);
739 if (ret == 0) {
740 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
741 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
742 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
743 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
744 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
745 OUT_RING (chan, 0x00000000 /* COPY */);
746 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
747 }
748 return ret;
749}
750
4c193d25
BS
751static int
752nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
753 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
754{
755 struct nouveau_mem *node = old_mem->mm_node;
756 int ret = RING_SPACE(chan, 7);
757 if (ret == 0) {
758 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
759 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
760 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
761 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
762 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
763 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
764 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
765 }
766 return ret;
767}
768
d1b167e1
BS
769static int
770nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
771{
ebb945a9 772 int ret = RING_SPACE(chan, 6);
d1b167e1 773 if (ret == 0) {
ebb945a9
BS
774 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
775 OUT_RING (chan, handle);
776 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
777 OUT_RING (chan, NvNotify0);
778 OUT_RING (chan, NvDmaFB);
779 OUT_RING (chan, NvDmaFB);
d1b167e1
BS
780 }
781
782 return ret;
783}
784
6ee73861 785static int
f1ab0cc9
BS
786nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
787 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 788{
d2f96666 789 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
790 struct nouveau_bo *nvbo = nouveau_bo(bo);
791 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
792 u64 src_offset = node->vma[0].offset;
793 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
794 int ret;
795
f1ab0cc9
BS
796 while (length) {
797 u32 amount, stride, height;
798
5220b3c1
BS
799 amount = min(length, (u64)(4 * 1024 * 1024));
800 stride = 16 * 4;
f1ab0cc9
BS
801 height = amount / stride;
802
f13b3263
FJ
803 if (new_mem->mem_type == TTM_PL_VRAM &&
804 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
805 ret = RING_SPACE(chan, 8);
806 if (ret)
807 return ret;
808
d1b167e1 809 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 810 OUT_RING (chan, 0);
5220b3c1 811 OUT_RING (chan, 0);
f1ab0cc9
BS
812 OUT_RING (chan, stride);
813 OUT_RING (chan, height);
814 OUT_RING (chan, 1);
815 OUT_RING (chan, 0);
816 OUT_RING (chan, 0);
817 } else {
818 ret = RING_SPACE(chan, 2);
819 if (ret)
820 return ret;
821
d1b167e1 822 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
823 OUT_RING (chan, 1);
824 }
f13b3263
FJ
825 if (old_mem->mem_type == TTM_PL_VRAM &&
826 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
827 ret = RING_SPACE(chan, 8);
828 if (ret)
829 return ret;
830
d1b167e1 831 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 832 OUT_RING (chan, 0);
5220b3c1 833 OUT_RING (chan, 0);
f1ab0cc9
BS
834 OUT_RING (chan, stride);
835 OUT_RING (chan, height);
836 OUT_RING (chan, 1);
837 OUT_RING (chan, 0);
838 OUT_RING (chan, 0);
839 } else {
840 ret = RING_SPACE(chan, 2);
841 if (ret)
842 return ret;
843
d1b167e1 844 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
845 OUT_RING (chan, 1);
846 }
847
848 ret = RING_SPACE(chan, 14);
6ee73861
BS
849 if (ret)
850 return ret;
f1ab0cc9 851
d1b167e1 852 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
853 OUT_RING (chan, upper_32_bits(src_offset));
854 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 855 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
856 OUT_RING (chan, lower_32_bits(src_offset));
857 OUT_RING (chan, lower_32_bits(dst_offset));
858 OUT_RING (chan, stride);
859 OUT_RING (chan, stride);
860 OUT_RING (chan, stride);
861 OUT_RING (chan, height);
862 OUT_RING (chan, 0x00000101);
863 OUT_RING (chan, 0x00000000);
d1b167e1 864 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
865 OUT_RING (chan, 0);
866
867 length -= amount;
868 src_offset += amount;
869 dst_offset += amount;
6ee73861
BS
870 }
871
f1ab0cc9
BS
872 return 0;
873}
874
d1b167e1
BS
875static int
876nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
877{
ebb945a9 878 int ret = RING_SPACE(chan, 4);
d1b167e1 879 if (ret == 0) {
ebb945a9
BS
880 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
881 OUT_RING (chan, handle);
882 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
883 OUT_RING (chan, NvNotify0);
d1b167e1
BS
884 }
885
886 return ret;
887}
888
a6704788
BS
889static inline uint32_t
890nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
891 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
892{
893 if (mem->mem_type == TTM_PL_TT)
ebb945a9
BS
894 return NvDmaTT;
895 return NvDmaFB;
a6704788
BS
896}
897
f1ab0cc9
BS
898static int
899nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
900 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
901{
d961db75
BS
902 u32 src_offset = old_mem->start << PAGE_SHIFT;
903 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
904 u32 page_count = new_mem->num_pages;
905 int ret;
906
907 ret = RING_SPACE(chan, 3);
908 if (ret)
909 return ret;
910
d1b167e1 911 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
912 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
913 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
914
6ee73861
BS
915 page_count = new_mem->num_pages;
916 while (page_count) {
917 int line_count = (page_count > 2047) ? 2047 : page_count;
918
6ee73861
BS
919 ret = RING_SPACE(chan, 11);
920 if (ret)
921 return ret;
f1ab0cc9 922
d1b167e1 923 BEGIN_NV04(chan, NvSubCopy,
6ee73861 924 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
925 OUT_RING (chan, src_offset);
926 OUT_RING (chan, dst_offset);
927 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
928 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
929 OUT_RING (chan, PAGE_SIZE); /* line_length */
930 OUT_RING (chan, line_count);
931 OUT_RING (chan, 0x00000101);
932 OUT_RING (chan, 0x00000000);
d1b167e1 933 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 934 OUT_RING (chan, 0);
6ee73861
BS
935
936 page_count -= line_count;
937 src_offset += (PAGE_SIZE * line_count);
938 dst_offset += (PAGE_SIZE * line_count);
939 }
940
f1ab0cc9
BS
941 return 0;
942}
943
d2f96666
BS
944static int
945nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
946 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
947{
948 struct nouveau_mem *node = mem->mm_node;
949 int ret;
950
ebb945a9
BS
951 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
952 PAGE_SHIFT, node->page_shift,
953 NV_MEM_ACCESS_RW, vma);
d2f96666
BS
954 if (ret)
955 return ret;
956
957 if (mem->mem_type == TTM_PL_VRAM)
958 nouveau_vm_map(vma, node);
959 else
f7b24c42 960 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
d2f96666
BS
961
962 return 0;
963}
964
f1ab0cc9
BS
965static int
966nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 967 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 968{
ebb945a9
BS
969 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
970 struct nouveau_channel *chan = chan = drm->channel;
f1ab0cc9 971 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 972 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
973 int ret;
974
ebb945a9 975 mutex_lock(&chan->cli->mutex);
f1ab0cc9 976
d2f96666
BS
977 /* create temporary vmas for the transfer and attach them to the
978 * old nouveau_mem node, these will get cleaned up after ttm has
979 * destroyed the ttm_mem_reg
3425df48 980 */
ebb945a9 981 if (nv_device(drm->device)->card_type >= NV_50) {
d5f42394 982 struct nouveau_mem *node = old_mem->mm_node;
3425df48 983
d2f96666
BS
984 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
985 if (ret)
986 goto out;
987
988 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
989 if (ret)
990 goto out;
3425df48
BS
991 }
992
ebb945a9 993 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
994 if (ret == 0) {
995 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
6a6b73f2
BS
996 no_wait_gpu, new_mem);
997 }
f1ab0cc9 998
3425df48 999out:
ebb945a9 1000 mutex_unlock(&chan->cli->mutex);
6a6b73f2 1001 return ret;
6ee73861
BS
1002}
1003
d1b167e1 1004void
49981046 1005nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 1006{
d1b167e1
BS
1007 static const struct {
1008 const char *name;
1a46098e 1009 int engine;
d1b167e1
BS
1010 u32 oclass;
1011 int (*exec)(struct nouveau_channel *,
1012 struct ttm_buffer_object *,
1013 struct ttm_mem_reg *, struct ttm_mem_reg *);
1014 int (*init)(struct nouveau_channel *, u32 handle);
1015 } _methods[] = {
49981046
BS
1016 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1017 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
1018 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1019 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1020 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1021 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1022 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1023 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1024 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1025 {},
1a46098e 1026 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1027 }, *mthd = _methods;
1028 const char *name = "CPU";
1029 int ret;
1030
1031 do {
ebb945a9 1032 struct nouveau_object *object;
49981046 1033 struct nouveau_channel *chan;
1a46098e 1034 u32 handle = (mthd->engine << 16) | mthd->oclass;
ebb945a9 1035
49981046
BS
1036 if (mthd->init == nve0_bo_move_init)
1037 chan = drm->cechan;
1038 else
1039 chan = drm->channel;
1040 if (chan == NULL)
1041 continue;
1042
1043 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
ebb945a9 1044 mthd->oclass, NULL, 0, &object);
d1b167e1 1045 if (ret == 0) {
1a46098e 1046 ret = mthd->init(chan, handle);
ebb945a9 1047 if (ret) {
49981046 1048 nouveau_object_del(nv_object(drm),
ebb945a9
BS
1049 chan->handle, handle);
1050 continue;
d1b167e1 1051 }
ebb945a9
BS
1052
1053 drm->ttm.move = mthd->exec;
1054 name = mthd->name;
1055 break;
d1b167e1
BS
1056 }
1057 } while ((++mthd)->exec);
1058
ebb945a9 1059 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1060}
1061
6ee73861
BS
1062static int
1063nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1064 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1065{
1066 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1067 struct ttm_placement placement;
1068 struct ttm_mem_reg tmp_mem;
1069 int ret;
1070
1071 placement.fpfn = placement.lpfn = 0;
1072 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1073 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1074
1075 tmp_mem = *new_mem;
1076 tmp_mem.mm_node = NULL;
97a875cb 1077 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1078 if (ret)
1079 return ret;
1080
1081 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1082 if (ret)
1083 goto out;
1084
97a875cb 1085 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1086 if (ret)
1087 goto out;
1088
97a875cb 1089 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
6ee73861 1090out:
42311ff9 1091 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1092 return ret;
1093}
1094
1095static int
1096nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1097 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1098{
1099 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1100 struct ttm_placement placement;
1101 struct ttm_mem_reg tmp_mem;
1102 int ret;
1103
1104 placement.fpfn = placement.lpfn = 0;
1105 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1106 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1107
1108 tmp_mem = *new_mem;
1109 tmp_mem.mm_node = NULL;
97a875cb 1110 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1111 if (ret)
1112 return ret;
1113
97a875cb 1114 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
6ee73861
BS
1115 if (ret)
1116 goto out;
1117
97a875cb 1118 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1119 if (ret)
1120 goto out;
1121
1122out:
42311ff9 1123 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1124 return ret;
1125}
1126
a4154bbf
BS
1127static void
1128nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1129{
a4154bbf 1130 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
1131 struct nouveau_vma *vma;
1132
9f1feed2
BS
1133 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1134 if (bo->destroy != nouveau_bo_del_ttm)
1135 return;
1136
fd2871af 1137 list_for_each_entry(vma, &nvbo->vma_list, head) {
dc97b340 1138 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
fd2871af
BS
1139 nouveau_vm_map(vma, new_mem->mm_node);
1140 } else
dc97b340 1141 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
ebb945a9 1142 nvbo->page_shift == vma->vm->vmm->spg_shift) {
22b33e8e
DA
1143 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1144 nouveau_vm_map_sg_table(vma, 0, new_mem->
1145 num_pages << PAGE_SHIFT,
1146 new_mem->mm_node);
1147 else
1148 nouveau_vm_map_sg(vma, 0, new_mem->
1149 num_pages << PAGE_SHIFT,
1150 new_mem->mm_node);
fd2871af
BS
1151 } else {
1152 nouveau_vm_unmap(vma);
1153 }
a4154bbf
BS
1154 }
1155}
1156
6ee73861 1157static int
a0af9add 1158nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1159 struct nouveau_drm_tile **new_tile)
6ee73861 1160{
ebb945a9
BS
1161 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1162 struct drm_device *dev = drm->dev;
a0af9add 1163 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1164 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1165
a4154bbf
BS
1166 *new_tile = NULL;
1167 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1168 return 0;
a0af9add 1169
ebb945a9 1170 if (nv_device(drm->device)->card_type >= NV_10) {
bc9e7b9a 1171 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1172 nvbo->tile_mode,
1173 nvbo->tile_flags);
6ee73861
BS
1174 }
1175
a0af9add
FJ
1176 return 0;
1177}
1178
1179static void
1180nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1181 struct nouveau_drm_tile *new_tile,
1182 struct nouveau_drm_tile **old_tile)
a0af9add 1183{
ebb945a9
BS
1184 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1185 struct drm_device *dev = drm->dev;
a0af9add 1186
bc9e7b9a 1187 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
a4154bbf 1188 *old_tile = new_tile;
a0af9add
FJ
1189}
1190
1191static int
1192nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1193 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1194{
ebb945a9 1195 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1196 struct nouveau_bo *nvbo = nouveau_bo(bo);
1197 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1198 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1199 int ret = 0;
1200
ebb945a9 1201 if (nv_device(drm->device)->card_type < NV_50) {
a4154bbf
BS
1202 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1203 if (ret)
1204 return ret;
1205 }
a0af9add 1206
a0af9add 1207 /* Fake bo copy. */
6ee73861
BS
1208 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1209 BUG_ON(bo->mem.mm_node != NULL);
1210 bo->mem = *new_mem;
1211 new_mem->mm_node = NULL;
a0af9add 1212 goto out;
6ee73861
BS
1213 }
1214
d1b167e1 1215 /* CPU copy if we have no accelerated method available */
ebb945a9 1216 if (!drm->ttm.move) {
97a875cb 1217 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
b8a6a804
BS
1218 goto out;
1219 }
1220
a0af9add
FJ
1221 /* Hardware assisted copy. */
1222 if (new_mem->mem_type == TTM_PL_SYSTEM)
97a875cb
ML
1223 ret = nouveau_bo_move_flipd(bo, evict, intr,
1224 no_wait_gpu, new_mem);
a0af9add 1225 else if (old_mem->mem_type == TTM_PL_SYSTEM)
97a875cb
ML
1226 ret = nouveau_bo_move_flips(bo, evict, intr,
1227 no_wait_gpu, new_mem);
a0af9add 1228 else
97a875cb
ML
1229 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1230 no_wait_gpu, new_mem);
6ee73861 1231
a0af9add
FJ
1232 if (!ret)
1233 goto out;
1234
1235 /* Fallback to software copy. */
97a875cb 1236 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
a0af9add
FJ
1237
1238out:
ebb945a9 1239 if (nv_device(drm->device)->card_type < NV_50) {
a4154bbf
BS
1240 if (ret)
1241 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1242 else
1243 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1244 }
a0af9add
FJ
1245
1246 return ret;
6ee73861
BS
1247}
1248
1249static int
1250nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1251{
1252 return 0;
1253}
1254
f32f02fd
JG
1255static int
1256nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1257{
1258 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9
BS
1259 struct nouveau_drm *drm = nouveau_bdev(bdev);
1260 struct drm_device *dev = drm->dev;
f869ef88 1261 int ret;
f32f02fd
JG
1262
1263 mem->bus.addr = NULL;
1264 mem->bus.offset = 0;
1265 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1266 mem->bus.base = 0;
1267 mem->bus.is_iomem = false;
1268 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1269 return -EINVAL;
1270 switch (mem->mem_type) {
1271 case TTM_PL_SYSTEM:
1272 /* System memory */
1273 return 0;
1274 case TTM_PL_TT:
1275#if __OS_HAS_AGP
ebb945a9 1276 if (drm->agp.stat == ENABLED) {
d961db75 1277 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1278 mem->bus.base = drm->agp.base;
eda85d6a 1279 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
f32f02fd
JG
1280 }
1281#endif
1282 break;
1283 case TTM_PL_VRAM:
3863c9bc
BS
1284 mem->bus.offset = mem->start << PAGE_SHIFT;
1285 mem->bus.base = pci_resource_start(dev->pdev, 1);
1286 mem->bus.is_iomem = true;
ebb945a9
BS
1287 if (nv_device(drm->device)->card_type >= NV_50) {
1288 struct nouveau_bar *bar = nouveau_bar(drm->device);
3863c9bc 1289 struct nouveau_mem *node = mem->mm_node;
8984e046 1290
ebb945a9 1291 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
3863c9bc
BS
1292 &node->bar_vma);
1293 if (ret)
1294 return ret;
f869ef88 1295
3863c9bc 1296 mem->bus.offset = node->bar_vma.offset;
f869ef88 1297 }
f32f02fd
JG
1298 break;
1299 default:
1300 return -EINVAL;
1301 }
1302 return 0;
1303}
1304
1305static void
1306nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1307{
ebb945a9
BS
1308 struct nouveau_drm *drm = nouveau_bdev(bdev);
1309 struct nouveau_bar *bar = nouveau_bar(drm->device);
d5f42394 1310 struct nouveau_mem *node = mem->mm_node;
f869ef88 1311
d5f42394 1312 if (!node->bar_vma.node)
f869ef88
BS
1313 return;
1314
ebb945a9 1315 bar->unmap(bar, &node->bar_vma);
f32f02fd
JG
1316}
1317
1318static int
1319nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1320{
ebb945a9 1321 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1322 struct nouveau_bo *nvbo = nouveau_bo(bo);
ebb945a9
BS
1323 struct nouveau_device *device = nv_device(drm->device);
1324 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
e1429b4c
BS
1325
1326 /* as long as the bo isn't in vram, and isn't tiled, we've got
1327 * nothing to do here.
1328 */
1329 if (bo->mem.mem_type != TTM_PL_VRAM) {
ebb945a9 1330 if (nv_device(drm->device)->card_type < NV_50 ||
f13b3263 1331 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1332 return 0;
1333 }
1334
1335 /* make sure bo is in mappable vram */
ebb945a9 1336 if (bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1337 return 0;
1338
1339
1340 nvbo->placement.fpfn = 0;
ebb945a9 1341 nvbo->placement.lpfn = mappable;
c284815d 1342 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1343 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1344}
1345
3230cfc3
KRW
1346static int
1347nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1348{
8e7e7052 1349 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1350 struct nouveau_drm *drm;
3230cfc3
KRW
1351 struct drm_device *dev;
1352 unsigned i;
1353 int r;
22b33e8e 1354 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1355
1356 if (ttm->state != tt_unpopulated)
1357 return 0;
1358
22b33e8e
DA
1359 if (slave && ttm->sg) {
1360 /* make userspace faulting work */
1361 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1362 ttm_dma->dma_address, ttm->num_pages);
1363 ttm->state = tt_unbound;
1364 return 0;
1365 }
1366
ebb945a9
BS
1367 drm = nouveau_bdev(ttm->bdev);
1368 dev = drm->dev;
3230cfc3 1369
dea7e0ac 1370#if __OS_HAS_AGP
ebb945a9 1371 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1372 return ttm_agp_tt_populate(ttm);
1373 }
1374#endif
1375
3230cfc3
KRW
1376#ifdef CONFIG_SWIOTLB
1377 if (swiotlb_nr_tbl()) {
8e7e7052 1378 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1379 }
1380#endif
1381
1382 r = ttm_pool_populate(ttm);
1383 if (r) {
1384 return r;
1385 }
1386
1387 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1388 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
3230cfc3
KRW
1389 0, PAGE_SIZE,
1390 PCI_DMA_BIDIRECTIONAL);
8e7e7052 1391 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
3230cfc3 1392 while (--i) {
8e7e7052 1393 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3 1394 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
8e7e7052 1395 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1396 }
1397 ttm_pool_unpopulate(ttm);
1398 return -EFAULT;
1399 }
1400 }
1401 return 0;
1402}
1403
1404static void
1405nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1406{
8e7e7052 1407 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1408 struct nouveau_drm *drm;
3230cfc3
KRW
1409 struct drm_device *dev;
1410 unsigned i;
22b33e8e
DA
1411 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1412
1413 if (slave)
1414 return;
3230cfc3 1415
ebb945a9
BS
1416 drm = nouveau_bdev(ttm->bdev);
1417 dev = drm->dev;
3230cfc3 1418
dea7e0ac 1419#if __OS_HAS_AGP
ebb945a9 1420 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1421 ttm_agp_tt_unpopulate(ttm);
1422 return;
1423 }
1424#endif
1425
3230cfc3
KRW
1426#ifdef CONFIG_SWIOTLB
1427 if (swiotlb_nr_tbl()) {
8e7e7052 1428 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1429 return;
1430 }
1431#endif
1432
1433 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
1434 if (ttm_dma->dma_address[i]) {
1435 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3
KRW
1436 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1437 }
1438 }
1439
1440 ttm_pool_unpopulate(ttm);
1441}
1442
875ac34a
BS
1443void
1444nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1445{
1446 struct nouveau_fence *old_fence = NULL;
1447
1448 if (likely(fence))
1449 nouveau_fence_ref(fence);
1450
1451 spin_lock(&nvbo->bo.bdev->fence_lock);
1452 old_fence = nvbo->bo.sync_obj;
1453 nvbo->bo.sync_obj = fence;
1454 spin_unlock(&nvbo->bo.bdev->fence_lock);
1455
1456 nouveau_fence_unref(&old_fence);
1457}
1458
1459static void
1460nouveau_bo_fence_unref(void **sync_obj)
1461{
1462 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1463}
1464
1465static void *
1466nouveau_bo_fence_ref(void *sync_obj)
1467{
1468 return nouveau_fence_ref(sync_obj);
1469}
1470
1471static bool
dedfdffd 1472nouveau_bo_fence_signalled(void *sync_obj)
875ac34a 1473{
d375e7d5 1474 return nouveau_fence_done(sync_obj);
875ac34a
BS
1475}
1476
1477static int
dedfdffd 1478nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
875ac34a
BS
1479{
1480 return nouveau_fence_wait(sync_obj, lazy, intr);
1481}
1482
1483static int
dedfdffd 1484nouveau_bo_fence_flush(void *sync_obj)
875ac34a
BS
1485{
1486 return 0;
1487}
1488
6ee73861 1489struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1490 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1491 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1492 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1493 .invalidate_caches = nouveau_bo_invalidate_caches,
1494 .init_mem_type = nouveau_bo_init_mem_type,
1495 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1496 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1497 .move = nouveau_bo_move,
1498 .verify_access = nouveau_bo_verify_access,
875ac34a
BS
1499 .sync_obj_signaled = nouveau_bo_fence_signalled,
1500 .sync_obj_wait = nouveau_bo_fence_wait,
1501 .sync_obj_flush = nouveau_bo_fence_flush,
1502 .sync_obj_unref = nouveau_bo_fence_unref,
1503 .sync_obj_ref = nouveau_bo_fence_ref,
f32f02fd
JG
1504 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1505 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1506 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1507};
1508
fd2871af
BS
1509struct nouveau_vma *
1510nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1511{
1512 struct nouveau_vma *vma;
1513 list_for_each_entry(vma, &nvbo->vma_list, head) {
1514 if (vma->vm == vm)
1515 return vma;
1516 }
1517
1518 return NULL;
1519}
1520
1521int
1522nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1523 struct nouveau_vma *vma)
1524{
1525 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1526 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1527 int ret;
1528
1529 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1530 NV_MEM_ACCESS_RW, vma);
1531 if (ret)
1532 return ret;
1533
1534 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1535 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
22b33e8e
DA
1536 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1537 if (node->sg)
1538 nouveau_vm_map_sg_table(vma, 0, size, node);
1539 else
1540 nouveau_vm_map_sg(vma, 0, size, node);
1541 }
fd2871af
BS
1542
1543 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1544 vma->refcount = 1;
fd2871af
BS
1545 return 0;
1546}
1547
1548void
1549nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1550{
1551 if (vma->node) {
1552 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1553 spin_lock(&nvbo->bo.bdev->fence_lock);
1717c0e2 1554 ttm_bo_wait(&nvbo->bo, false, false, false);
fd2871af
BS
1555 spin_unlock(&nvbo->bo.bdev->fence_lock);
1556 nouveau_vm_unmap(vma);
1557 }
1558
1559 nouveau_vm_put(vma);
1560 list_del(&vma->head);
1561 }
1562}