2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
38 #include <linux/log2.h>
39 #include <linux/slab.h>
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45 struct drm_device *dev = dev_priv->dev;
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
60 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
65 if (dev_priv->card_type < NV_50) {
66 if (nvbo->tile_mode) {
67 if (dev_priv->chipset >= 0x40) {
69 *size = roundup(*size, 64 * nvbo->tile_mode);
71 } else if (dev_priv->chipset >= 0x30) {
73 *size = roundup(*size, 64 * nvbo->tile_mode);
75 } else if (dev_priv->chipset >= 0x20) {
77 *size = roundup(*size, 64 * nvbo->tile_mode);
79 } else if (dev_priv->chipset >= 0x10) {
81 *size = roundup(*size, 32 * nvbo->tile_mode);
85 if (likely(dev_priv->chan_vm)) {
86 if (*size > 256 * 1024)
87 *page_shift = dev_priv->chan_vm->lpg_shift;
89 *page_shift = dev_priv->chan_vm->spg_shift;
94 *size = roundup(*size, (1 << *page_shift));
95 *align = max((1 << *page_shift), *align);
98 *size = roundup(*size, PAGE_SIZE);
102 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
103 int size, int align, uint32_t flags, uint32_t tile_mode,
104 uint32_t tile_flags, bool no_vm, bool mappable,
105 struct nouveau_bo **pnvbo)
107 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 struct nouveau_bo *nvbo;
109 int ret = 0, page_shift = 0;
111 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
114 INIT_LIST_HEAD(&nvbo->head);
115 INIT_LIST_HEAD(&nvbo->entry);
116 nvbo->mappable = mappable;
118 nvbo->tile_mode = tile_mode;
119 nvbo->tile_flags = tile_flags;
120 nvbo->bo.bdev = &dev_priv->ttm.bdev;
122 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
123 align >>= PAGE_SHIFT;
125 if (!nvbo->no_vm && dev_priv->chan_vm) {
126 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
127 NV_MEM_ACCESS_RW, &nvbo->vma);
134 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
135 nouveau_bo_placement_set(nvbo, flags, 0);
137 nvbo->channel = chan;
138 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
139 ttm_bo_type_device, &nvbo->placement, align, 0,
140 false, NULL, size, nouveau_bo_del_ttm);
142 /* ttm will call nouveau_bo_del_ttm if it fails.. */
145 nvbo->channel = NULL;
147 if (nvbo->vma.node) {
148 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
149 nvbo->bo.offset = nvbo->vma.offset;
157 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
161 if (type & TTM_PL_FLAG_VRAM)
162 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
163 if (type & TTM_PL_FLAG_TT)
164 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
165 if (type & TTM_PL_FLAG_SYSTEM)
166 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
170 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
172 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
173 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
175 if (dev_priv->card_type == NV_10 &&
176 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
177 nvbo->bo.mem.num_pages < vram_pages / 2) {
179 * Make sure that the color and depth buffers are handled
180 * by independent memory controller units. Up to a 9x
181 * speed up when alpha-blending and depth-test are enabled
184 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
185 nvbo->placement.fpfn = vram_pages / 2;
186 nvbo->placement.lpfn = ~0;
188 nvbo->placement.fpfn = 0;
189 nvbo->placement.lpfn = vram_pages / 2;
195 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
197 struct ttm_placement *pl = &nvbo->placement;
198 uint32_t flags = TTM_PL_MASK_CACHING |
199 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
201 pl->placement = nvbo->placements;
202 set_placement_list(nvbo->placements, &pl->num_placement,
205 pl->busy_placement = nvbo->busy_placements;
206 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
209 set_placement_range(nvbo, type);
213 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
215 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
216 struct ttm_buffer_object *bo = &nvbo->bo;
219 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
220 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
221 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
222 1 << bo->mem.mem_type, memtype);
226 if (nvbo->pin_refcnt++)
229 ret = ttm_bo_reserve(bo, false, false, false, 0);
233 nouveau_bo_placement_set(nvbo, memtype, 0);
235 ret = nouveau_bo_validate(nvbo, false, false, false);
237 switch (bo->mem.mem_type) {
239 dev_priv->fb_aper_free -= bo->mem.size;
242 dev_priv->gart_info.aper_free -= bo->mem.size;
248 ttm_bo_unreserve(bo);
256 nouveau_bo_unpin(struct nouveau_bo *nvbo)
258 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
259 struct ttm_buffer_object *bo = &nvbo->bo;
262 if (--nvbo->pin_refcnt)
265 ret = ttm_bo_reserve(bo, false, false, false, 0);
269 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
271 ret = nouveau_bo_validate(nvbo, false, false, false);
273 switch (bo->mem.mem_type) {
275 dev_priv->fb_aper_free += bo->mem.size;
278 dev_priv->gart_info.aper_free += bo->mem.size;
285 ttm_bo_unreserve(bo);
290 nouveau_bo_map(struct nouveau_bo *nvbo)
294 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
298 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
299 ttm_bo_unreserve(&nvbo->bo);
304 nouveau_bo_unmap(struct nouveau_bo *nvbo)
307 ttm_bo_kunmap(&nvbo->kmap);
311 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
312 bool no_wait_reserve, bool no_wait_gpu)
316 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
317 no_wait_reserve, no_wait_gpu);
321 if (nvbo->vma.node) {
322 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
323 nvbo->bo.offset = nvbo->vma.offset;
330 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
333 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
336 return ioread16_native((void __force __iomem *)mem);
342 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
345 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
348 iowrite16_native(val, (void __force __iomem *)mem);
354 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
357 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
360 return ioread32_native((void __force __iomem *)mem);
366 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
369 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
372 iowrite32_native(val, (void __force __iomem *)mem);
377 static struct ttm_backend *
378 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
380 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
381 struct drm_device *dev = dev_priv->dev;
383 switch (dev_priv->gart_info.type) {
385 case NOUVEAU_GART_AGP:
386 return ttm_agp_backend_init(bdev, dev->agp->bridge);
388 case NOUVEAU_GART_SGDMA:
389 return nouveau_sgdma_init_ttm(dev);
391 NV_ERROR(dev, "Unknown GART type %d\n",
392 dev_priv->gart_info.type);
400 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
402 /* We'll do this from user space. */
407 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
408 struct ttm_mem_type_manager *man)
410 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
411 struct drm_device *dev = dev_priv->dev;
415 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
416 man->available_caching = TTM_PL_MASK_CACHING;
417 man->default_caching = TTM_PL_FLAG_CACHED;
420 if (dev_priv->card_type >= NV_50) {
421 man->func = &nouveau_vram_manager;
422 man->io_reserve_fastpath = false;
423 man->use_io_reserve_lru = true;
425 man->func = &ttm_bo_manager_func;
427 man->flags = TTM_MEMTYPE_FLAG_FIXED |
428 TTM_MEMTYPE_FLAG_MAPPABLE;
429 man->available_caching = TTM_PL_FLAG_UNCACHED |
431 man->default_caching = TTM_PL_FLAG_WC;
434 man->func = &ttm_bo_manager_func;
435 switch (dev_priv->gart_info.type) {
436 case NOUVEAU_GART_AGP:
437 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
438 man->available_caching = TTM_PL_FLAG_UNCACHED |
440 man->default_caching = TTM_PL_FLAG_WC;
442 case NOUVEAU_GART_SGDMA:
443 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
444 TTM_MEMTYPE_FLAG_CMA;
445 man->available_caching = TTM_PL_MASK_CACHING;
446 man->default_caching = TTM_PL_FLAG_CACHED;
447 man->gpu_offset = dev_priv->gart_info.aper_base;
450 NV_ERROR(dev, "Unknown GART type: %d\n",
451 dev_priv->gart_info.type);
456 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
463 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
465 struct nouveau_bo *nvbo = nouveau_bo(bo);
467 switch (bo->mem.mem_type) {
469 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
473 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
477 *pl = nvbo->placement;
481 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
482 * TTM_PL_{VRAM,TT} directly.
486 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
487 struct nouveau_bo *nvbo, bool evict,
488 bool no_wait_reserve, bool no_wait_gpu,
489 struct ttm_mem_reg *new_mem)
491 struct nouveau_fence *fence = NULL;
494 ret = nouveau_fence_new(chan, &fence, true);
498 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
499 no_wait_reserve, no_wait_gpu, new_mem);
500 nouveau_fence_unref(&fence);
504 static inline uint32_t
505 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
506 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
508 struct nouveau_bo *nvbo = nouveau_bo(bo);
511 if (mem->mem_type == TTM_PL_TT)
516 if (mem->mem_type == TTM_PL_TT)
517 return chan->gart_handle;
518 return chan->vram_handle;
522 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
523 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
525 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
526 struct nouveau_bo *nvbo = nouveau_bo(bo);
527 u64 src_offset = old_mem->start << PAGE_SHIFT;
528 u64 dst_offset = new_mem->start << PAGE_SHIFT;
529 u32 page_count = new_mem->num_pages;
533 if (old_mem->mem_type == TTM_PL_VRAM)
534 src_offset = nvbo->vma.offset;
536 src_offset += dev_priv->gart_info.aper_base;
538 if (new_mem->mem_type == TTM_PL_VRAM)
539 dst_offset = nvbo->vma.offset;
541 dst_offset += dev_priv->gart_info.aper_base;
544 page_count = new_mem->num_pages;
546 int line_count = (page_count > 2047) ? 2047 : page_count;
548 ret = RING_SPACE(chan, 12);
552 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
553 OUT_RING (chan, upper_32_bits(dst_offset));
554 OUT_RING (chan, lower_32_bits(dst_offset));
555 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
556 OUT_RING (chan, upper_32_bits(src_offset));
557 OUT_RING (chan, lower_32_bits(src_offset));
558 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
559 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
560 OUT_RING (chan, PAGE_SIZE); /* line_length */
561 OUT_RING (chan, line_count);
562 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
563 OUT_RING (chan, 0x00100110);
565 page_count -= line_count;
566 src_offset += (PAGE_SIZE * line_count);
567 dst_offset += (PAGE_SIZE * line_count);
574 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
575 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
577 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
578 struct nouveau_bo *nvbo = nouveau_bo(bo);
579 u64 length = (new_mem->num_pages << PAGE_SHIFT);
580 u64 src_offset, dst_offset;
583 src_offset = old_mem->start << PAGE_SHIFT;
584 dst_offset = new_mem->start << PAGE_SHIFT;
586 if (old_mem->mem_type == TTM_PL_VRAM)
587 src_offset = nvbo->vma.offset;
589 src_offset += dev_priv->gart_info.aper_base;
591 if (new_mem->mem_type == TTM_PL_VRAM)
592 dst_offset = nvbo->vma.offset;
594 dst_offset += dev_priv->gart_info.aper_base;
597 ret = RING_SPACE(chan, 3);
601 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
602 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
603 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
606 u32 amount, stride, height;
608 amount = min(length, (u64)(4 * 1024 * 1024));
610 height = amount / stride;
612 if (new_mem->mem_type == TTM_PL_VRAM &&
613 nouveau_bo_tile_layout(nvbo)) {
614 ret = RING_SPACE(chan, 8);
618 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
621 OUT_RING (chan, stride);
622 OUT_RING (chan, height);
627 ret = RING_SPACE(chan, 2);
631 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
634 if (old_mem->mem_type == TTM_PL_VRAM &&
635 nouveau_bo_tile_layout(nvbo)) {
636 ret = RING_SPACE(chan, 8);
640 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
643 OUT_RING (chan, stride);
644 OUT_RING (chan, height);
649 ret = RING_SPACE(chan, 2);
653 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
657 ret = RING_SPACE(chan, 14);
661 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
662 OUT_RING (chan, upper_32_bits(src_offset));
663 OUT_RING (chan, upper_32_bits(dst_offset));
664 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
665 OUT_RING (chan, lower_32_bits(src_offset));
666 OUT_RING (chan, lower_32_bits(dst_offset));
667 OUT_RING (chan, stride);
668 OUT_RING (chan, stride);
669 OUT_RING (chan, stride);
670 OUT_RING (chan, height);
671 OUT_RING (chan, 0x00000101);
672 OUT_RING (chan, 0x00000000);
673 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
677 src_offset += amount;
678 dst_offset += amount;
685 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
686 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
688 u32 src_offset = old_mem->start << PAGE_SHIFT;
689 u32 dst_offset = new_mem->start << PAGE_SHIFT;
690 u32 page_count = new_mem->num_pages;
693 ret = RING_SPACE(chan, 3);
697 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
698 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
699 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
701 page_count = new_mem->num_pages;
703 int line_count = (page_count > 2047) ? 2047 : page_count;
705 ret = RING_SPACE(chan, 11);
709 BEGIN_RING(chan, NvSubM2MF,
710 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
711 OUT_RING (chan, src_offset);
712 OUT_RING (chan, dst_offset);
713 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
714 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
715 OUT_RING (chan, PAGE_SIZE); /* line_length */
716 OUT_RING (chan, line_count);
717 OUT_RING (chan, 0x00000101);
718 OUT_RING (chan, 0x00000000);
719 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
722 page_count -= line_count;
723 src_offset += (PAGE_SIZE * line_count);
724 dst_offset += (PAGE_SIZE * line_count);
731 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
732 bool no_wait_reserve, bool no_wait_gpu,
733 struct ttm_mem_reg *new_mem)
735 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
736 struct nouveau_bo *nvbo = nouveau_bo(bo);
737 struct nouveau_channel *chan;
740 chan = nvbo->channel;
741 if (!chan || nvbo->no_vm) {
742 chan = dev_priv->channel;
743 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
746 if (dev_priv->card_type < NV_50)
747 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
749 if (dev_priv->card_type < NV_C0)
750 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
752 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
754 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
756 no_wait_gpu, new_mem);
759 if (chan == dev_priv->channel)
760 mutex_unlock(&chan->mutex);
765 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
766 bool no_wait_reserve, bool no_wait_gpu,
767 struct ttm_mem_reg *new_mem)
769 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
770 struct ttm_placement placement;
771 struct ttm_mem_reg tmp_mem;
774 placement.fpfn = placement.lpfn = 0;
775 placement.num_placement = placement.num_busy_placement = 1;
776 placement.placement = placement.busy_placement = &placement_memtype;
779 tmp_mem.mm_node = NULL;
780 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
784 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
788 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
792 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
794 ttm_bo_mem_put(bo, &tmp_mem);
799 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
800 bool no_wait_reserve, bool no_wait_gpu,
801 struct ttm_mem_reg *new_mem)
803 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
804 struct ttm_placement placement;
805 struct ttm_mem_reg tmp_mem;
808 placement.fpfn = placement.lpfn = 0;
809 placement.num_placement = placement.num_busy_placement = 1;
810 placement.placement = placement.busy_placement = &placement_memtype;
813 tmp_mem.mm_node = NULL;
814 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
818 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
822 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
827 ttm_bo_mem_put(bo, &tmp_mem);
832 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
833 struct nouveau_tile_reg **new_tile)
835 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
836 struct drm_device *dev = dev_priv->dev;
837 struct nouveau_bo *nvbo = nouveau_bo(bo);
840 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
846 offset = new_mem->start << PAGE_SHIFT;
848 if (dev_priv->chan_vm) {
849 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
850 } else if (dev_priv->card_type >= NV_10) {
851 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
860 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
861 struct nouveau_tile_reg *new_tile,
862 struct nouveau_tile_reg **old_tile)
864 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
865 struct drm_device *dev = dev_priv->dev;
867 if (dev_priv->card_type >= NV_10 &&
868 dev_priv->card_type < NV_50) {
869 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
870 *old_tile = new_tile;
875 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
876 bool no_wait_reserve, bool no_wait_gpu,
877 struct ttm_mem_reg *new_mem)
879 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
880 struct nouveau_bo *nvbo = nouveau_bo(bo);
881 struct ttm_mem_reg *old_mem = &bo->mem;
882 struct nouveau_tile_reg *new_tile = NULL;
885 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
890 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
891 BUG_ON(bo->mem.mm_node != NULL);
893 new_mem->mm_node = NULL;
897 /* Software copy if the card isn't up and running yet. */
898 if (!dev_priv->channel) {
899 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
903 /* Hardware assisted copy. */
904 if (new_mem->mem_type == TTM_PL_SYSTEM)
905 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
906 else if (old_mem->mem_type == TTM_PL_SYSTEM)
907 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
909 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
914 /* Fallback to software copy. */
915 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
919 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
921 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
927 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
933 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
935 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
936 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
937 struct drm_device *dev = dev_priv->dev;
940 mem->bus.addr = NULL;
942 mem->bus.size = mem->num_pages << PAGE_SHIFT;
944 mem->bus.is_iomem = false;
945 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
947 switch (mem->mem_type) {
953 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
954 mem->bus.offset = mem->start << PAGE_SHIFT;
955 mem->bus.base = dev_priv->gart_info.aper_base;
956 mem->bus.is_iomem = true;
962 struct nouveau_vram *vram = mem->mm_node;
965 if (!dev_priv->bar1_vm) {
966 mem->bus.offset = mem->start << PAGE_SHIFT;
967 mem->bus.base = pci_resource_start(dev->pdev, 1);
968 mem->bus.is_iomem = true;
972 if (dev_priv->card_type == NV_C0)
973 page_shift = vram->page_shift;
977 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
978 page_shift, NV_MEM_ACCESS_RW,
983 nouveau_vm_map(&vram->bar_vma, vram);
985 nouveau_vm_put(&vram->bar_vma);
989 mem->bus.offset = vram->bar_vma.offset;
990 if (dev_priv->card_type == NV_50) /*XXX*/
991 mem->bus.offset -= 0x0020000000ULL;
992 mem->bus.base = pci_resource_start(dev->pdev, 1);
993 mem->bus.is_iomem = true;
1003 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1005 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1006 struct nouveau_vram *vram = mem->mm_node;
1008 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1011 if (!vram->bar_vma.node)
1014 nouveau_vm_unmap(&vram->bar_vma);
1015 nouveau_vm_put(&vram->bar_vma);
1019 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1021 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1022 struct nouveau_bo *nvbo = nouveau_bo(bo);
1024 /* as long as the bo isn't in vram, and isn't tiled, we've got
1025 * nothing to do here.
1027 if (bo->mem.mem_type != TTM_PL_VRAM) {
1028 if (dev_priv->card_type < NV_50 ||
1029 !nouveau_bo_tile_layout(nvbo))
1033 /* make sure bo is in mappable vram */
1034 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1038 nvbo->placement.fpfn = 0;
1039 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1040 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1041 return nouveau_bo_validate(nvbo, false, true, false);
1045 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1047 struct nouveau_fence *old_fence;
1050 nouveau_fence_ref(fence);
1052 spin_lock(&nvbo->bo.bdev->fence_lock);
1053 old_fence = nvbo->bo.sync_obj;
1054 nvbo->bo.sync_obj = fence;
1055 spin_unlock(&nvbo->bo.bdev->fence_lock);
1057 nouveau_fence_unref(&old_fence);
1060 struct ttm_bo_driver nouveau_bo_driver = {
1061 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1062 .invalidate_caches = nouveau_bo_invalidate_caches,
1063 .init_mem_type = nouveau_bo_init_mem_type,
1064 .evict_flags = nouveau_bo_evict_flags,
1065 .move = nouveau_bo_move,
1066 .verify_access = nouveau_bo_verify_access,
1067 .sync_obj_signaled = __nouveau_fence_signalled,
1068 .sync_obj_wait = __nouveau_fence_wait,
1069 .sync_obj_flush = __nouveau_fence_flush,
1070 .sync_obj_unref = __nouveau_fence_unref,
1071 .sync_obj_ref = __nouveau_fence_ref,
1072 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1073 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1074 .io_mem_free = &nouveau_ttm_io_mem_free,