drm/nv50-nvc0: completely disable relocs
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
f869ef88
BS
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
6ee73861 37
a510604d 38#include <linux/log2.h>
5a0e3ad6 39#include <linux/slab.h>
a510604d 40
6ee73861
BS
41static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 45 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
6ee73861
BS
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
a5cf68b0 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
7db26623
BS
52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
6ee73861
BS
56 kfree(nvbo);
57}
58
a0af9add 59static void
db5c8e29 60nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 61 int *align, int *size)
a0af9add 62{
bfd83aca 63 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 64
573a2a37 65 if (dev_priv->card_type < NV_50) {
bfd83aca 66 if (nvbo->tile_mode) {
a0af9add
FJ
67 if (dev_priv->chipset >= 0x40) {
68 *align = 65536;
bfd83aca 69 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
70
71 } else if (dev_priv->chipset >= 0x30) {
72 *align = 32768;
bfd83aca 73 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
74
75 } else if (dev_priv->chipset >= 0x20) {
76 *align = 16384;
bfd83aca 77 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
78
79 } else if (dev_priv->chipset >= 0x10) {
80 *align = 16384;
bfd83aca 81 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
82 }
83 }
bfd83aca 84 } else {
f91bac5b
BS
85 *size = roundup(*size, (1 << nvbo->page_shift));
86 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
87 }
88
1c7059e4 89 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
90}
91
6ee73861
BS
92int
93nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
94 int size, int align, uint32_t flags, uint32_t tile_mode,
d550c41e 95 uint32_t tile_flags, struct nouveau_bo **pnvbo)
6ee73861
BS
96{
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 struct nouveau_bo *nvbo;
f91bac5b 99 int ret;
6ee73861
BS
100
101 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
102 if (!nvbo)
103 return -ENOMEM;
104 INIT_LIST_HEAD(&nvbo->head);
105 INIT_LIST_HEAD(&nvbo->entry);
6ee73861
BS
106 nvbo->tile_mode = tile_mode;
107 nvbo->tile_flags = tile_flags;
699ddfd9 108 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 109
f91bac5b
BS
110 nvbo->page_shift = 12;
111 if (dev_priv->bar1_vm) {
112 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
113 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
114 }
115
116 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
6ee73861
BS
117 align >>= PAGE_SHIFT;
118
d550c41e 119 if (dev_priv->chan_vm) {
f91bac5b 120 ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift,
4c136142
BS
121 NV_MEM_ACCESS_RW, &nvbo->vma);
122 if (ret) {
123 kfree(nvbo);
124 return ret;
125 }
126 }
127
812f219a 128 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
78ad0f7b 129 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861
BS
130
131 nvbo->channel = chan;
6ee73861
BS
132 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
133 ttm_bo_type_device, &nvbo->placement, align, 0,
134 false, NULL, size, nouveau_bo_del_ttm);
6ee73861
BS
135 if (ret) {
136 /* ttm will call nouveau_bo_del_ttm if it fails.. */
137 return ret;
138 }
90af89b9 139 nvbo->channel = NULL;
6ee73861 140
6ee73861
BS
141 *pnvbo = nvbo;
142 return 0;
143}
144
78ad0f7b
FJ
145static void
146set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
147{
148 *n = 0;
149
150 if (type & TTM_PL_FLAG_VRAM)
151 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
152 if (type & TTM_PL_FLAG_TT)
153 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
154 if (type & TTM_PL_FLAG_SYSTEM)
155 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
156}
157
699ddfd9
FJ
158static void
159set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
160{
161 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
812f219a 162 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
699ddfd9
FJ
163
164 if (dev_priv->card_type == NV_10 &&
812f219a
FJ
165 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
166 nvbo->bo.mem.num_pages < vram_pages / 2) {
699ddfd9
FJ
167 /*
168 * Make sure that the color and depth buffers are handled
169 * by independent memory controller units. Up to a 9x
170 * speed up when alpha-blending and depth-test are enabled
171 * at the same time.
172 */
699ddfd9
FJ
173 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
174 nvbo->placement.fpfn = vram_pages / 2;
175 nvbo->placement.lpfn = ~0;
176 } else {
177 nvbo->placement.fpfn = 0;
178 nvbo->placement.lpfn = vram_pages / 2;
179 }
180 }
181}
182
6ee73861 183void
78ad0f7b 184nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 185{
78ad0f7b
FJ
186 struct ttm_placement *pl = &nvbo->placement;
187 uint32_t flags = TTM_PL_MASK_CACHING |
188 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
189
190 pl->placement = nvbo->placements;
191 set_placement_list(nvbo->placements, &pl->num_placement,
192 type, flags);
193
194 pl->busy_placement = nvbo->busy_placements;
195 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
196 type | busy, flags);
699ddfd9
FJ
197
198 set_placement_range(nvbo, type);
6ee73861
BS
199}
200
201int
202nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
203{
204 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
205 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 206 int ret;
6ee73861
BS
207
208 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
209 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
210 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
211 1 << bo->mem.mem_type, memtype);
212 return -EINVAL;
213 }
214
215 if (nvbo->pin_refcnt++)
216 return 0;
217
218 ret = ttm_bo_reserve(bo, false, false, false, 0);
219 if (ret)
220 goto out;
221
78ad0f7b 222 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 223
7a45d764 224 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
225 if (ret == 0) {
226 switch (bo->mem.mem_type) {
227 case TTM_PL_VRAM:
228 dev_priv->fb_aper_free -= bo->mem.size;
229 break;
230 case TTM_PL_TT:
231 dev_priv->gart_info.aper_free -= bo->mem.size;
232 break;
233 default:
234 break;
235 }
236 }
237 ttm_bo_unreserve(bo);
238out:
239 if (unlikely(ret))
240 nvbo->pin_refcnt--;
241 return ret;
242}
243
244int
245nouveau_bo_unpin(struct nouveau_bo *nvbo)
246{
247 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
248 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 249 int ret;
6ee73861
BS
250
251 if (--nvbo->pin_refcnt)
252 return 0;
253
254 ret = ttm_bo_reserve(bo, false, false, false, 0);
255 if (ret)
256 return ret;
257
78ad0f7b 258 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 259
7a45d764 260 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
261 if (ret == 0) {
262 switch (bo->mem.mem_type) {
263 case TTM_PL_VRAM:
264 dev_priv->fb_aper_free += bo->mem.size;
265 break;
266 case TTM_PL_TT:
267 dev_priv->gart_info.aper_free += bo->mem.size;
268 break;
269 default:
270 break;
271 }
272 }
273
274 ttm_bo_unreserve(bo);
275 return ret;
276}
277
278int
279nouveau_bo_map(struct nouveau_bo *nvbo)
280{
281 int ret;
282
283 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
284 if (ret)
285 return ret;
286
287 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
288 ttm_bo_unreserve(&nvbo->bo);
289 return ret;
290}
291
292void
293nouveau_bo_unmap(struct nouveau_bo *nvbo)
294{
9d59e8a1
BS
295 if (nvbo)
296 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
297}
298
7a45d764
BS
299int
300nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
301 bool no_wait_reserve, bool no_wait_gpu)
302{
303 int ret;
304
305 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
306 no_wait_reserve, no_wait_gpu);
307 if (ret)
308 return ret;
309
310 return 0;
311}
312
6ee73861
BS
313u16
314nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
315{
316 bool is_iomem;
317 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
318 mem = &mem[index];
319 if (is_iomem)
320 return ioread16_native((void __force __iomem *)mem);
321 else
322 return *mem;
323}
324
325void
326nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
327{
328 bool is_iomem;
329 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
330 mem = &mem[index];
331 if (is_iomem)
332 iowrite16_native(val, (void __force __iomem *)mem);
333 else
334 *mem = val;
335}
336
337u32
338nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
339{
340 bool is_iomem;
341 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
342 mem = &mem[index];
343 if (is_iomem)
344 return ioread32_native((void __force __iomem *)mem);
345 else
346 return *mem;
347}
348
349void
350nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
351{
352 bool is_iomem;
353 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
354 mem = &mem[index];
355 if (is_iomem)
356 iowrite32_native(val, (void __force __iomem *)mem);
357 else
358 *mem = val;
359}
360
361static struct ttm_backend *
362nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
363{
364 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
365 struct drm_device *dev = dev_priv->dev;
366
367 switch (dev_priv->gart_info.type) {
b694dfb2 368#if __OS_HAS_AGP
6ee73861
BS
369 case NOUVEAU_GART_AGP:
370 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 371#endif
58e6c7a9
BS
372 case NOUVEAU_GART_PDMA:
373 case NOUVEAU_GART_HW:
6ee73861
BS
374 return nouveau_sgdma_init_ttm(dev);
375 default:
376 NV_ERROR(dev, "Unknown GART type %d\n",
377 dev_priv->gart_info.type);
378 break;
379 }
380
381 return NULL;
382}
383
384static int
385nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
386{
387 /* We'll do this from user space. */
388 return 0;
389}
390
391static int
392nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
393 struct ttm_mem_type_manager *man)
394{
395 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
396 struct drm_device *dev = dev_priv->dev;
397
398 switch (type) {
399 case TTM_PL_SYSTEM:
400 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
401 man->available_caching = TTM_PL_MASK_CACHING;
402 man->default_caching = TTM_PL_FLAG_CACHED;
403 break;
404 case TTM_PL_VRAM:
8984e046 405 if (dev_priv->card_type >= NV_50) {
573a2a37 406 man->func = &nouveau_vram_manager;
f869ef88
BS
407 man->io_reserve_fastpath = false;
408 man->use_io_reserve_lru = true;
409 } else {
573a2a37 410 man->func = &ttm_bo_manager_func;
f869ef88 411 }
6ee73861 412 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 413 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
414 man->available_caching = TTM_PL_FLAG_UNCACHED |
415 TTM_PL_FLAG_WC;
416 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
417 break;
418 case TTM_PL_TT:
26c0c9e3
BS
419 if (dev_priv->card_type >= NV_50)
420 man->func = &nouveau_gart_manager;
421 else
422 man->func = &ttm_bo_manager_func;
6ee73861
BS
423 switch (dev_priv->gart_info.type) {
424 case NOUVEAU_GART_AGP:
f32f02fd 425 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
426 man->available_caching = TTM_PL_FLAG_UNCACHED |
427 TTM_PL_FLAG_WC;
428 man->default_caching = TTM_PL_FLAG_WC;
6ee73861 429 break;
58e6c7a9
BS
430 case NOUVEAU_GART_PDMA:
431 case NOUVEAU_GART_HW:
6ee73861
BS
432 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
433 TTM_MEMTYPE_FLAG_CMA;
434 man->available_caching = TTM_PL_MASK_CACHING;
435 man->default_caching = TTM_PL_FLAG_CACHED;
436 break;
437 default:
438 NV_ERROR(dev, "Unknown GART type: %d\n",
439 dev_priv->gart_info.type);
440 return -EINVAL;
441 }
6ee73861
BS
442 break;
443 default:
444 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
445 return -EINVAL;
446 }
447 return 0;
448}
449
450static void
451nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
452{
453 struct nouveau_bo *nvbo = nouveau_bo(bo);
454
455 switch (bo->mem.mem_type) {
22fbd538 456 case TTM_PL_VRAM:
78ad0f7b
FJ
457 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
458 TTM_PL_FLAG_SYSTEM);
22fbd538 459 break;
6ee73861 460 default:
78ad0f7b 461 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
462 break;
463 }
22fbd538
FJ
464
465 *pl = nvbo->placement;
6ee73861
BS
466}
467
468
469/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
470 * TTM_PL_{VRAM,TT} directly.
471 */
a0af9add 472
6ee73861
BS
473static int
474nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
475 struct nouveau_bo *nvbo, bool evict,
476 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
477 struct ttm_mem_reg *new_mem)
478{
479 struct nouveau_fence *fence = NULL;
480 int ret;
481
482 ret = nouveau_fence_new(chan, &fence, true);
483 if (ret)
484 return ret;
485
64798817 486 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 487 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 488 nouveau_fence_unref(&fence);
6ee73861
BS
489 return ret;
490}
491
183720b8
BS
492static int
493nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
494 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
495{
d2f96666
BS
496 struct nouveau_mem *node = old_mem->mm_node;
497 u64 src_offset = node->vma[0].offset;
498 u64 dst_offset = node->vma[1].offset;
183720b8
BS
499 u32 page_count = new_mem->num_pages;
500 int ret;
501
183720b8
BS
502 page_count = new_mem->num_pages;
503 while (page_count) {
504 int line_count = (page_count > 2047) ? 2047 : page_count;
505
506 ret = RING_SPACE(chan, 12);
507 if (ret)
508 return ret;
509
510 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
511 OUT_RING (chan, upper_32_bits(dst_offset));
512 OUT_RING (chan, lower_32_bits(dst_offset));
513 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
514 OUT_RING (chan, upper_32_bits(src_offset));
515 OUT_RING (chan, lower_32_bits(src_offset));
516 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
517 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
518 OUT_RING (chan, PAGE_SIZE); /* line_length */
519 OUT_RING (chan, line_count);
520 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
521 OUT_RING (chan, 0x00100110);
522
523 page_count -= line_count;
524 src_offset += (PAGE_SIZE * line_count);
525 dst_offset += (PAGE_SIZE * line_count);
526 }
527
528 return 0;
529}
530
6ee73861 531static int
f1ab0cc9
BS
532nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
533 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 534{
d2f96666 535 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
536 struct nouveau_bo *nvbo = nouveau_bo(bo);
537 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
538 u64 src_offset = node->vma[0].offset;
539 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
540 int ret;
541
f1ab0cc9
BS
542 while (length) {
543 u32 amount, stride, height;
544
5220b3c1
BS
545 amount = min(length, (u64)(4 * 1024 * 1024));
546 stride = 16 * 4;
f1ab0cc9
BS
547 height = amount / stride;
548
f13b3263
FJ
549 if (new_mem->mem_type == TTM_PL_VRAM &&
550 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
551 ret = RING_SPACE(chan, 8);
552 if (ret)
553 return ret;
554
555 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
556 OUT_RING (chan, 0);
5220b3c1 557 OUT_RING (chan, 0);
f1ab0cc9
BS
558 OUT_RING (chan, stride);
559 OUT_RING (chan, height);
560 OUT_RING (chan, 1);
561 OUT_RING (chan, 0);
562 OUT_RING (chan, 0);
563 } else {
564 ret = RING_SPACE(chan, 2);
565 if (ret)
566 return ret;
567
568 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
569 OUT_RING (chan, 1);
570 }
f13b3263
FJ
571 if (old_mem->mem_type == TTM_PL_VRAM &&
572 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
573 ret = RING_SPACE(chan, 8);
574 if (ret)
575 return ret;
576
577 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
578 OUT_RING (chan, 0);
5220b3c1 579 OUT_RING (chan, 0);
f1ab0cc9
BS
580 OUT_RING (chan, stride);
581 OUT_RING (chan, height);
582 OUT_RING (chan, 1);
583 OUT_RING (chan, 0);
584 OUT_RING (chan, 0);
585 } else {
586 ret = RING_SPACE(chan, 2);
587 if (ret)
588 return ret;
589
590 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
591 OUT_RING (chan, 1);
592 }
593
594 ret = RING_SPACE(chan, 14);
6ee73861
BS
595 if (ret)
596 return ret;
f1ab0cc9
BS
597
598 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
599 OUT_RING (chan, upper_32_bits(src_offset));
600 OUT_RING (chan, upper_32_bits(dst_offset));
601 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
602 OUT_RING (chan, lower_32_bits(src_offset));
603 OUT_RING (chan, lower_32_bits(dst_offset));
604 OUT_RING (chan, stride);
605 OUT_RING (chan, stride);
606 OUT_RING (chan, stride);
607 OUT_RING (chan, height);
608 OUT_RING (chan, 0x00000101);
609 OUT_RING (chan, 0x00000000);
610 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
611 OUT_RING (chan, 0);
612
613 length -= amount;
614 src_offset += amount;
615 dst_offset += amount;
6ee73861
BS
616 }
617
f1ab0cc9
BS
618 return 0;
619}
620
a6704788
BS
621static inline uint32_t
622nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
623 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
624{
625 if (mem->mem_type == TTM_PL_TT)
626 return chan->gart_handle;
627 return chan->vram_handle;
628}
629
f1ab0cc9
BS
630static int
631nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
632 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
633{
d961db75
BS
634 u32 src_offset = old_mem->start << PAGE_SHIFT;
635 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
636 u32 page_count = new_mem->num_pages;
637 int ret;
638
639 ret = RING_SPACE(chan, 3);
640 if (ret)
641 return ret;
642
643 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
644 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
645 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
646
6ee73861
BS
647 page_count = new_mem->num_pages;
648 while (page_count) {
649 int line_count = (page_count > 2047) ? 2047 : page_count;
650
6ee73861
BS
651 ret = RING_SPACE(chan, 11);
652 if (ret)
653 return ret;
f1ab0cc9 654
6ee73861
BS
655 BEGIN_RING(chan, NvSubM2MF,
656 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
657 OUT_RING (chan, src_offset);
658 OUT_RING (chan, dst_offset);
659 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
660 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
661 OUT_RING (chan, PAGE_SIZE); /* line_length */
662 OUT_RING (chan, line_count);
663 OUT_RING (chan, 0x00000101);
664 OUT_RING (chan, 0x00000000);
6ee73861 665 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 666 OUT_RING (chan, 0);
6ee73861
BS
667
668 page_count -= line_count;
669 src_offset += (PAGE_SIZE * line_count);
670 dst_offset += (PAGE_SIZE * line_count);
671 }
672
f1ab0cc9
BS
673 return 0;
674}
675
d2f96666
BS
676static int
677nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
678 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
679{
680 struct nouveau_mem *node = mem->mm_node;
681 int ret;
682
683 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
684 node->page_shift, NV_MEM_ACCESS_RO, vma);
685 if (ret)
686 return ret;
687
688 if (mem->mem_type == TTM_PL_VRAM)
689 nouveau_vm_map(vma, node);
690 else
691 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
692 node, node->pages);
693
694 return 0;
695}
696
f1ab0cc9
BS
697static int
698nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
699 bool no_wait_reserve, bool no_wait_gpu,
700 struct ttm_mem_reg *new_mem)
701{
702 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
703 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 704 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
705 struct nouveau_channel *chan;
706 int ret;
707
708 chan = nvbo->channel;
d550c41e 709 if (!chan) {
f1ab0cc9 710 chan = dev_priv->channel;
e419cf09 711 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 712 }
f1ab0cc9 713
d2f96666
BS
714 /* create temporary vmas for the transfer and attach them to the
715 * old nouveau_mem node, these will get cleaned up after ttm has
716 * destroyed the ttm_mem_reg
3425df48 717 */
26c0c9e3 718 if (dev_priv->card_type >= NV_50) {
d5f42394 719 struct nouveau_mem *node = old_mem->mm_node;
3425df48 720
d2f96666
BS
721 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
722 if (ret)
723 goto out;
724
725 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
726 if (ret)
727 goto out;
3425df48
BS
728 }
729
f1ab0cc9
BS
730 if (dev_priv->card_type < NV_50)
731 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
732 else
183720b8 733 if (dev_priv->card_type < NV_C0)
f1ab0cc9 734 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
183720b8
BS
735 else
736 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
737 if (ret == 0) {
738 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
739 no_wait_reserve,
740 no_wait_gpu, new_mem);
741 }
f1ab0cc9 742
3425df48 743out:
6a6b73f2
BS
744 if (chan == dev_priv->channel)
745 mutex_unlock(&chan->mutex);
746 return ret;
6ee73861
BS
747}
748
749static int
750nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
751 bool no_wait_reserve, bool no_wait_gpu,
752 struct ttm_mem_reg *new_mem)
6ee73861
BS
753{
754 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
755 struct ttm_placement placement;
756 struct ttm_mem_reg tmp_mem;
757 int ret;
758
759 placement.fpfn = placement.lpfn = 0;
760 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 761 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
762
763 tmp_mem = *new_mem;
764 tmp_mem.mm_node = NULL;
9d87fa21 765 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
766 if (ret)
767 return ret;
768
769 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
770 if (ret)
771 goto out;
772
9d87fa21 773 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
774 if (ret)
775 goto out;
776
b8884da6 777 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 778out:
42311ff9 779 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
780 return ret;
781}
782
783static int
784nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
785 bool no_wait_reserve, bool no_wait_gpu,
786 struct ttm_mem_reg *new_mem)
6ee73861
BS
787{
788 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
789 struct ttm_placement placement;
790 struct ttm_mem_reg tmp_mem;
791 int ret;
792
793 placement.fpfn = placement.lpfn = 0;
794 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 795 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
796
797 tmp_mem = *new_mem;
798 tmp_mem.mm_node = NULL;
9d87fa21 799 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
800 if (ret)
801 return ret;
802
b8884da6 803 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
804 if (ret)
805 goto out;
806
b8884da6 807 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
808 if (ret)
809 goto out;
810
811out:
42311ff9 812 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
813 return ret;
814}
815
a4154bbf
BS
816static void
817nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
818{
26c0c9e3 819 struct nouveau_mem *node = new_mem->mm_node;
a4154bbf 820 struct nouveau_bo *nvbo = nouveau_bo(bo);
26c0c9e3 821 struct nouveau_vma *vma = &nvbo->vma;
a4154bbf 822
111af5c1 823 if (!vma->vm)
a4154bbf
BS
824 return;
825
d2f96666
BS
826 if (new_mem->mem_type == TTM_PL_VRAM) {
827 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
828 } else
829 if (new_mem->mem_type == TTM_PL_TT &&
830 nvbo->page_shift == nvbo->vma.vm->spg_shift) {
831 nouveau_vm_map_sg(&nvbo->vma, 0, new_mem->
832 num_pages << PAGE_SHIFT, node, node->pages);
833 } else {
3425df48 834 nouveau_vm_unmap(&nvbo->vma);
a4154bbf
BS
835 }
836}
837
6ee73861 838static int
a0af9add
FJ
839nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
840 struct nouveau_tile_reg **new_tile)
6ee73861
BS
841{
842 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 843 struct drm_device *dev = dev_priv->dev;
a0af9add 844 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 845 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 846
a4154bbf
BS
847 *new_tile = NULL;
848 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 849 return 0;
a0af9add 850
a4154bbf 851 if (dev_priv->card_type >= NV_10) {
a0af9add 852 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
853 nvbo->tile_mode,
854 nvbo->tile_flags);
6ee73861
BS
855 }
856
a0af9add
FJ
857 return 0;
858}
859
860static void
861nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
862 struct nouveau_tile_reg *new_tile,
863 struct nouveau_tile_reg **old_tile)
864{
865 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
866 struct drm_device *dev = dev_priv->dev;
867
a4154bbf
BS
868 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
869 *old_tile = new_tile;
a0af9add
FJ
870}
871
872static int
873nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
874 bool no_wait_reserve, bool no_wait_gpu,
875 struct ttm_mem_reg *new_mem)
a0af9add
FJ
876{
877 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
878 struct nouveau_bo *nvbo = nouveau_bo(bo);
879 struct ttm_mem_reg *old_mem = &bo->mem;
880 struct nouveau_tile_reg *new_tile = NULL;
881 int ret = 0;
882
a4154bbf
BS
883 if (dev_priv->card_type < NV_50) {
884 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
885 if (ret)
886 return ret;
887 }
a0af9add 888
a0af9add 889 /* Fake bo copy. */
6ee73861
BS
890 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
891 BUG_ON(bo->mem.mm_node != NULL);
892 bo->mem = *new_mem;
893 new_mem->mm_node = NULL;
a0af9add 894 goto out;
6ee73861
BS
895 }
896
b8a6a804 897 /* Software copy if the card isn't up and running yet. */
183720b8 898 if (!dev_priv->channel) {
b8a6a804
BS
899 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
900 goto out;
901 }
902
a0af9add
FJ
903 /* Hardware assisted copy. */
904 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 905 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 906 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 907 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 908 else
9d87fa21 909 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 910
a0af9add
FJ
911 if (!ret)
912 goto out;
913
914 /* Fallback to software copy. */
9d87fa21 915 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
916
917out:
a4154bbf
BS
918 if (dev_priv->card_type < NV_50) {
919 if (ret)
920 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
921 else
922 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
923 }
a0af9add
FJ
924
925 return ret;
6ee73861
BS
926}
927
928static int
929nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
930{
931 return 0;
932}
933
f32f02fd
JG
934static int
935nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
936{
937 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
938 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
939 struct drm_device *dev = dev_priv->dev;
f869ef88 940 int ret;
f32f02fd
JG
941
942 mem->bus.addr = NULL;
943 mem->bus.offset = 0;
944 mem->bus.size = mem->num_pages << PAGE_SHIFT;
945 mem->bus.base = 0;
946 mem->bus.is_iomem = false;
947 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
948 return -EINVAL;
949 switch (mem->mem_type) {
950 case TTM_PL_SYSTEM:
951 /* System memory */
952 return 0;
953 case TTM_PL_TT:
954#if __OS_HAS_AGP
955 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 956 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
957 mem->bus.base = dev_priv->gart_info.aper_base;
958 mem->bus.is_iomem = true;
959 }
960#endif
961 break;
962 case TTM_PL_VRAM:
f869ef88 963 {
d5f42394 964 struct nouveau_mem *node = mem->mm_node;
8984e046 965 u8 page_shift;
f869ef88
BS
966
967 if (!dev_priv->bar1_vm) {
968 mem->bus.offset = mem->start << PAGE_SHIFT;
969 mem->bus.base = pci_resource_start(dev->pdev, 1);
970 mem->bus.is_iomem = true;
971 break;
972 }
973
8984e046 974 if (dev_priv->card_type == NV_C0)
d5f42394 975 page_shift = node->page_shift;
8984e046
BS
976 else
977 page_shift = 12;
978
4c74eb7f 979 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 980 page_shift, NV_MEM_ACCESS_RW,
d5f42394 981 &node->bar_vma);
f869ef88
BS
982 if (ret)
983 return ret;
984
d5f42394 985 nouveau_vm_map(&node->bar_vma, node);
f869ef88 986 if (ret) {
d5f42394 987 nouveau_vm_put(&node->bar_vma);
f869ef88
BS
988 return ret;
989 }
990
d5f42394 991 mem->bus.offset = node->bar_vma.offset;
8984e046
BS
992 if (dev_priv->card_type == NV_50) /*XXX*/
993 mem->bus.offset -= 0x0020000000ULL;
01d73a69 994 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 995 mem->bus.is_iomem = true;
f869ef88 996 }
f32f02fd
JG
997 break;
998 default:
999 return -EINVAL;
1000 }
1001 return 0;
1002}
1003
1004static void
1005nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1006{
f869ef88 1007 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
d5f42394 1008 struct nouveau_mem *node = mem->mm_node;
f869ef88
BS
1009
1010 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1011 return;
1012
d5f42394 1013 if (!node->bar_vma.node)
f869ef88
BS
1014 return;
1015
d5f42394
BS
1016 nouveau_vm_unmap(&node->bar_vma);
1017 nouveau_vm_put(&node->bar_vma);
f32f02fd
JG
1018}
1019
1020static int
1021nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1022{
e1429b4c
BS
1023 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1024 struct nouveau_bo *nvbo = nouveau_bo(bo);
1025
1026 /* as long as the bo isn't in vram, and isn't tiled, we've got
1027 * nothing to do here.
1028 */
1029 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
1030 if (dev_priv->card_type < NV_50 ||
1031 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1032 return 0;
1033 }
1034
1035 /* make sure bo is in mappable vram */
d961db75 1036 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
1037 return 0;
1038
1039
1040 nvbo->placement.fpfn = 0;
1041 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1042 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 1043 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1044}
1045
332b242f
FJ
1046void
1047nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1048{
23c45e8e 1049 struct nouveau_fence *old_fence;
332b242f
FJ
1050
1051 if (likely(fence))
23c45e8e 1052 nouveau_fence_ref(fence);
332b242f 1053
23c45e8e
FJ
1054 spin_lock(&nvbo->bo.bdev->fence_lock);
1055 old_fence = nvbo->bo.sync_obj;
1056 nvbo->bo.sync_obj = fence;
332b242f 1057 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
1058
1059 nouveau_fence_unref(&old_fence);
332b242f
FJ
1060}
1061
6ee73861
BS
1062struct ttm_bo_driver nouveau_bo_driver = {
1063 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1064 .invalidate_caches = nouveau_bo_invalidate_caches,
1065 .init_mem_type = nouveau_bo_init_mem_type,
1066 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1067 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1068 .move = nouveau_bo_move,
1069 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1070 .sync_obj_signaled = __nouveau_fence_signalled,
1071 .sync_obj_wait = __nouveau_fence_wait,
1072 .sync_obj_flush = __nouveau_fence_flush,
1073 .sync_obj_unref = __nouveau_fence_unref,
1074 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1075 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1076 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1077 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1078};
1079