2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
38 #include <linux/log2.h>
39 #include <linux/slab.h>
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45 struct drm_device *dev = dev_priv->dev;
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
56 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
57 int *align, int *size)
59 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61 if (dev_priv->card_type < NV_50) {
62 if (nvbo->tile_mode) {
63 if (dev_priv->chipset >= 0x40) {
65 *size = roundup(*size, 64 * nvbo->tile_mode);
67 } else if (dev_priv->chipset >= 0x30) {
69 *size = roundup(*size, 64 * nvbo->tile_mode);
71 } else if (dev_priv->chipset >= 0x20) {
73 *size = roundup(*size, 64 * nvbo->tile_mode);
75 } else if (dev_priv->chipset >= 0x10) {
77 *size = roundup(*size, 32 * nvbo->tile_mode);
81 *size = roundup(*size, (1 << nvbo->page_shift));
82 *align = max((1 << nvbo->page_shift), *align);
85 *size = roundup(*size, PAGE_SIZE);
89 nouveau_bo_new(struct drm_device *dev, int size, int align,
90 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
91 struct nouveau_bo **pnvbo)
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94 struct nouveau_bo *nvbo;
97 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
100 INIT_LIST_HEAD(&nvbo->head);
101 INIT_LIST_HEAD(&nvbo->entry);
102 INIT_LIST_HEAD(&nvbo->vma_list);
103 nvbo->tile_mode = tile_mode;
104 nvbo->tile_flags = tile_flags;
105 nvbo->bo.bdev = &dev_priv->ttm.bdev;
107 nvbo->page_shift = 12;
108 if (dev_priv->bar1_vm) {
109 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
110 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
113 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
114 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
115 nouveau_bo_placement_set(nvbo, flags, 0);
117 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
118 ttm_bo_type_device, &nvbo->placement,
119 align >> PAGE_SHIFT, 0, false, NULL, size,
122 /* ttm will call nouveau_bo_del_ttm if it fails.. */
131 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
135 if (type & TTM_PL_FLAG_VRAM)
136 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
137 if (type & TTM_PL_FLAG_TT)
138 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
139 if (type & TTM_PL_FLAG_SYSTEM)
140 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
144 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
146 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
147 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
149 if (dev_priv->card_type == NV_10 &&
150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
151 nvbo->bo.mem.num_pages < vram_pages / 2) {
153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x
155 * speed up when alpha-blending and depth-test are enabled
158 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
159 nvbo->placement.fpfn = vram_pages / 2;
160 nvbo->placement.lpfn = ~0;
162 nvbo->placement.fpfn = 0;
163 nvbo->placement.lpfn = vram_pages / 2;
169 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
171 struct ttm_placement *pl = &nvbo->placement;
172 uint32_t flags = TTM_PL_MASK_CACHING |
173 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
175 pl->placement = nvbo->placements;
176 set_placement_list(nvbo->placements, &pl->num_placement,
179 pl->busy_placement = nvbo->busy_placements;
180 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
183 set_placement_range(nvbo, type);
187 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
189 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
190 struct ttm_buffer_object *bo = &nvbo->bo;
193 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
194 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
195 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
196 1 << bo->mem.mem_type, memtype);
200 if (nvbo->pin_refcnt++)
203 ret = ttm_bo_reserve(bo, false, false, false, 0);
207 nouveau_bo_placement_set(nvbo, memtype, 0);
209 ret = nouveau_bo_validate(nvbo, false, false, false);
211 switch (bo->mem.mem_type) {
213 dev_priv->fb_aper_free -= bo->mem.size;
216 dev_priv->gart_info.aper_free -= bo->mem.size;
222 ttm_bo_unreserve(bo);
230 nouveau_bo_unpin(struct nouveau_bo *nvbo)
232 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
233 struct ttm_buffer_object *bo = &nvbo->bo;
236 if (--nvbo->pin_refcnt)
239 ret = ttm_bo_reserve(bo, false, false, false, 0);
243 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
245 ret = nouveau_bo_validate(nvbo, false, false, false);
247 switch (bo->mem.mem_type) {
249 dev_priv->fb_aper_free += bo->mem.size;
252 dev_priv->gart_info.aper_free += bo->mem.size;
259 ttm_bo_unreserve(bo);
264 nouveau_bo_map(struct nouveau_bo *nvbo)
268 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
272 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
273 ttm_bo_unreserve(&nvbo->bo);
278 nouveau_bo_unmap(struct nouveau_bo *nvbo)
281 ttm_bo_kunmap(&nvbo->kmap);
285 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
286 bool no_wait_reserve, bool no_wait_gpu)
290 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
291 no_wait_reserve, no_wait_gpu);
299 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
302 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
305 return ioread16_native((void __force __iomem *)mem);
311 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
314 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
317 iowrite16_native(val, (void __force __iomem *)mem);
323 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
326 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
329 return ioread32_native((void __force __iomem *)mem);
335 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
338 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
341 iowrite32_native(val, (void __force __iomem *)mem);
346 static struct ttm_tt *
347 nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
348 unsigned long size, uint32_t page_flags,
349 struct page *dummy_read_page)
351 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
352 struct drm_device *dev = dev_priv->dev;
354 switch (dev_priv->gart_info.type) {
356 case NOUVEAU_GART_AGP:
357 return ttm_agp_tt_create(bdev, dev->agp->bridge,
358 size, page_flags, dummy_read_page);
360 case NOUVEAU_GART_PDMA:
361 case NOUVEAU_GART_HW:
362 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
365 NV_ERROR(dev, "Unknown GART type %d\n",
366 dev_priv->gart_info.type);
374 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
376 /* We'll do this from user space. */
381 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
382 struct ttm_mem_type_manager *man)
384 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
385 struct drm_device *dev = dev_priv->dev;
389 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
390 man->available_caching = TTM_PL_MASK_CACHING;
391 man->default_caching = TTM_PL_FLAG_CACHED;
394 if (dev_priv->card_type >= NV_50) {
395 man->func = &nouveau_vram_manager;
396 man->io_reserve_fastpath = false;
397 man->use_io_reserve_lru = true;
399 man->func = &ttm_bo_manager_func;
401 man->flags = TTM_MEMTYPE_FLAG_FIXED |
402 TTM_MEMTYPE_FLAG_MAPPABLE;
403 man->available_caching = TTM_PL_FLAG_UNCACHED |
405 man->default_caching = TTM_PL_FLAG_WC;
408 if (dev_priv->card_type >= NV_50)
409 man->func = &nouveau_gart_manager;
411 man->func = &ttm_bo_manager_func;
412 switch (dev_priv->gart_info.type) {
413 case NOUVEAU_GART_AGP:
414 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
415 man->available_caching = TTM_PL_FLAG_UNCACHED |
417 man->default_caching = TTM_PL_FLAG_WC;
419 case NOUVEAU_GART_PDMA:
420 case NOUVEAU_GART_HW:
421 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
422 TTM_MEMTYPE_FLAG_CMA;
423 man->available_caching = TTM_PL_MASK_CACHING;
424 man->default_caching = TTM_PL_FLAG_CACHED;
427 NV_ERROR(dev, "Unknown GART type: %d\n",
428 dev_priv->gart_info.type);
433 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
440 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
442 struct nouveau_bo *nvbo = nouveau_bo(bo);
444 switch (bo->mem.mem_type) {
446 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
450 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
454 *pl = nvbo->placement;
458 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
459 * TTM_PL_{VRAM,TT} directly.
463 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
464 struct nouveau_bo *nvbo, bool evict,
465 bool no_wait_reserve, bool no_wait_gpu,
466 struct ttm_mem_reg *new_mem)
468 struct nouveau_fence *fence = NULL;
471 ret = nouveau_fence_new(chan, &fence, true);
475 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
476 no_wait_reserve, no_wait_gpu, new_mem);
477 nouveau_fence_unref(&fence);
482 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
483 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
485 struct nouveau_mem *node = old_mem->mm_node;
486 u64 src_offset = node->vma[0].offset;
487 u64 dst_offset = node->vma[1].offset;
488 u32 page_count = new_mem->num_pages;
491 page_count = new_mem->num_pages;
493 int line_count = (page_count > 2047) ? 2047 : page_count;
495 ret = RING_SPACE(chan, 12);
499 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
500 OUT_RING (chan, upper_32_bits(dst_offset));
501 OUT_RING (chan, lower_32_bits(dst_offset));
502 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
503 OUT_RING (chan, upper_32_bits(src_offset));
504 OUT_RING (chan, lower_32_bits(src_offset));
505 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
506 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
507 OUT_RING (chan, PAGE_SIZE); /* line_length */
508 OUT_RING (chan, line_count);
509 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
510 OUT_RING (chan, 0x00100110);
512 page_count -= line_count;
513 src_offset += (PAGE_SIZE * line_count);
514 dst_offset += (PAGE_SIZE * line_count);
521 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
522 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
524 struct nouveau_mem *node = old_mem->mm_node;
525 struct nouveau_bo *nvbo = nouveau_bo(bo);
526 u64 length = (new_mem->num_pages << PAGE_SHIFT);
527 u64 src_offset = node->vma[0].offset;
528 u64 dst_offset = node->vma[1].offset;
532 u32 amount, stride, height;
534 amount = min(length, (u64)(4 * 1024 * 1024));
536 height = amount / stride;
538 if (new_mem->mem_type == TTM_PL_VRAM &&
539 nouveau_bo_tile_layout(nvbo)) {
540 ret = RING_SPACE(chan, 8);
544 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
547 OUT_RING (chan, stride);
548 OUT_RING (chan, height);
553 ret = RING_SPACE(chan, 2);
557 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
560 if (old_mem->mem_type == TTM_PL_VRAM &&
561 nouveau_bo_tile_layout(nvbo)) {
562 ret = RING_SPACE(chan, 8);
566 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
569 OUT_RING (chan, stride);
570 OUT_RING (chan, height);
575 ret = RING_SPACE(chan, 2);
579 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
583 ret = RING_SPACE(chan, 14);
587 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
588 OUT_RING (chan, upper_32_bits(src_offset));
589 OUT_RING (chan, upper_32_bits(dst_offset));
590 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
591 OUT_RING (chan, lower_32_bits(src_offset));
592 OUT_RING (chan, lower_32_bits(dst_offset));
593 OUT_RING (chan, stride);
594 OUT_RING (chan, stride);
595 OUT_RING (chan, stride);
596 OUT_RING (chan, height);
597 OUT_RING (chan, 0x00000101);
598 OUT_RING (chan, 0x00000000);
599 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
603 src_offset += amount;
604 dst_offset += amount;
610 static inline uint32_t
611 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
612 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
614 if (mem->mem_type == TTM_PL_TT)
615 return chan->gart_handle;
616 return chan->vram_handle;
620 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
621 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
623 u32 src_offset = old_mem->start << PAGE_SHIFT;
624 u32 dst_offset = new_mem->start << PAGE_SHIFT;
625 u32 page_count = new_mem->num_pages;
628 ret = RING_SPACE(chan, 3);
632 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
633 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
634 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
636 page_count = new_mem->num_pages;
638 int line_count = (page_count > 2047) ? 2047 : page_count;
640 ret = RING_SPACE(chan, 11);
644 BEGIN_RING(chan, NvSubM2MF,
645 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
646 OUT_RING (chan, src_offset);
647 OUT_RING (chan, dst_offset);
648 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
649 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
650 OUT_RING (chan, PAGE_SIZE); /* line_length */
651 OUT_RING (chan, line_count);
652 OUT_RING (chan, 0x00000101);
653 OUT_RING (chan, 0x00000000);
654 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
657 page_count -= line_count;
658 src_offset += (PAGE_SIZE * line_count);
659 dst_offset += (PAGE_SIZE * line_count);
666 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
667 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
669 struct nouveau_mem *node = mem->mm_node;
672 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
673 node->page_shift, NV_MEM_ACCESS_RO, vma);
677 if (mem->mem_type == TTM_PL_VRAM)
678 nouveau_vm_map(vma, node);
680 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
687 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
688 bool no_wait_reserve, bool no_wait_gpu,
689 struct ttm_mem_reg *new_mem)
691 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
692 struct nouveau_bo *nvbo = nouveau_bo(bo);
693 struct ttm_mem_reg *old_mem = &bo->mem;
694 struct nouveau_channel *chan;
697 chan = nvbo->channel;
699 chan = dev_priv->channel;
700 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
703 /* create temporary vmas for the transfer and attach them to the
704 * old nouveau_mem node, these will get cleaned up after ttm has
705 * destroyed the ttm_mem_reg
707 if (dev_priv->card_type >= NV_50) {
708 struct nouveau_mem *node = old_mem->mm_node;
710 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
714 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
719 if (dev_priv->card_type < NV_50)
720 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
722 if (dev_priv->card_type < NV_C0)
723 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
725 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
727 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
729 no_wait_gpu, new_mem);
733 if (chan == dev_priv->channel)
734 mutex_unlock(&chan->mutex);
739 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
740 bool no_wait_reserve, bool no_wait_gpu,
741 struct ttm_mem_reg *new_mem)
743 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
744 struct ttm_placement placement;
745 struct ttm_mem_reg tmp_mem;
748 placement.fpfn = placement.lpfn = 0;
749 placement.num_placement = placement.num_busy_placement = 1;
750 placement.placement = placement.busy_placement = &placement_memtype;
753 tmp_mem.mm_node = NULL;
754 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
758 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
762 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
766 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
768 ttm_bo_mem_put(bo, &tmp_mem);
773 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
774 bool no_wait_reserve, bool no_wait_gpu,
775 struct ttm_mem_reg *new_mem)
777 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
778 struct ttm_placement placement;
779 struct ttm_mem_reg tmp_mem;
782 placement.fpfn = placement.lpfn = 0;
783 placement.num_placement = placement.num_busy_placement = 1;
784 placement.placement = placement.busy_placement = &placement_memtype;
787 tmp_mem.mm_node = NULL;
788 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
792 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
796 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
801 ttm_bo_mem_put(bo, &tmp_mem);
806 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
808 struct nouveau_mem *node = new_mem->mm_node;
809 struct nouveau_bo *nvbo = nouveau_bo(bo);
810 struct nouveau_vma *vma;
812 list_for_each_entry(vma, &nvbo->vma_list, head) {
813 if (new_mem->mem_type == TTM_PL_VRAM) {
814 nouveau_vm_map(vma, new_mem->mm_node);
816 if (new_mem->mem_type == TTM_PL_TT &&
817 nvbo->page_shift == vma->vm->spg_shift) {
818 nouveau_vm_map_sg(vma, 0, new_mem->
819 num_pages << PAGE_SHIFT,
822 nouveau_vm_unmap(vma);
828 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
829 struct nouveau_tile_reg **new_tile)
831 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
832 struct drm_device *dev = dev_priv->dev;
833 struct nouveau_bo *nvbo = nouveau_bo(bo);
834 u64 offset = new_mem->start << PAGE_SHIFT;
837 if (new_mem->mem_type != TTM_PL_VRAM)
840 if (dev_priv->card_type >= NV_10) {
841 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
850 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
851 struct nouveau_tile_reg *new_tile,
852 struct nouveau_tile_reg **old_tile)
854 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
855 struct drm_device *dev = dev_priv->dev;
857 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
858 *old_tile = new_tile;
862 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
863 bool no_wait_reserve, bool no_wait_gpu,
864 struct ttm_mem_reg *new_mem)
866 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
867 struct nouveau_bo *nvbo = nouveau_bo(bo);
868 struct ttm_mem_reg *old_mem = &bo->mem;
869 struct nouveau_tile_reg *new_tile = NULL;
872 if (dev_priv->card_type < NV_50) {
873 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
879 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
880 BUG_ON(bo->mem.mm_node != NULL);
882 new_mem->mm_node = NULL;
886 /* Software copy if the card isn't up and running yet. */
887 if (!dev_priv->channel) {
888 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
892 /* Hardware assisted copy. */
893 if (new_mem->mem_type == TTM_PL_SYSTEM)
894 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
895 else if (old_mem->mem_type == TTM_PL_SYSTEM)
896 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
898 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
903 /* Fallback to software copy. */
904 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
907 if (dev_priv->card_type < NV_50) {
909 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
911 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
918 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
924 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
926 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
927 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
928 struct drm_device *dev = dev_priv->dev;
931 mem->bus.addr = NULL;
933 mem->bus.size = mem->num_pages << PAGE_SHIFT;
935 mem->bus.is_iomem = false;
936 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
938 switch (mem->mem_type) {
944 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
945 mem->bus.offset = mem->start << PAGE_SHIFT;
946 mem->bus.base = dev_priv->gart_info.aper_base;
947 mem->bus.is_iomem = true;
953 struct nouveau_mem *node = mem->mm_node;
956 if (!dev_priv->bar1_vm) {
957 mem->bus.offset = mem->start << PAGE_SHIFT;
958 mem->bus.base = pci_resource_start(dev->pdev, 1);
959 mem->bus.is_iomem = true;
963 if (dev_priv->card_type >= NV_C0)
964 page_shift = node->page_shift;
968 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
969 page_shift, NV_MEM_ACCESS_RW,
974 nouveau_vm_map(&node->bar_vma, node);
976 nouveau_vm_put(&node->bar_vma);
980 mem->bus.offset = node->bar_vma.offset;
981 if (dev_priv->card_type == NV_50) /*XXX*/
982 mem->bus.offset -= 0x0020000000ULL;
983 mem->bus.base = pci_resource_start(dev->pdev, 1);
984 mem->bus.is_iomem = true;
994 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
996 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
997 struct nouveau_mem *node = mem->mm_node;
999 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1002 if (!node->bar_vma.node)
1005 nouveau_vm_unmap(&node->bar_vma);
1006 nouveau_vm_put(&node->bar_vma);
1010 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1012 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1013 struct nouveau_bo *nvbo = nouveau_bo(bo);
1015 /* as long as the bo isn't in vram, and isn't tiled, we've got
1016 * nothing to do here.
1018 if (bo->mem.mem_type != TTM_PL_VRAM) {
1019 if (dev_priv->card_type < NV_50 ||
1020 !nouveau_bo_tile_layout(nvbo))
1024 /* make sure bo is in mappable vram */
1025 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1029 nvbo->placement.fpfn = 0;
1030 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1031 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1032 return nouveau_bo_validate(nvbo, false, true, false);
1036 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1038 struct nouveau_fence *old_fence;
1041 nouveau_fence_ref(fence);
1043 spin_lock(&nvbo->bo.bdev->fence_lock);
1044 old_fence = nvbo->bo.sync_obj;
1045 nvbo->bo.sync_obj = fence;
1046 spin_unlock(&nvbo->bo.bdev->fence_lock);
1048 nouveau_fence_unref(&old_fence);
1051 struct ttm_bo_driver nouveau_bo_driver = {
1052 .ttm_tt_create = &nouveau_ttm_tt_create,
1053 .invalidate_caches = nouveau_bo_invalidate_caches,
1054 .init_mem_type = nouveau_bo_init_mem_type,
1055 .evict_flags = nouveau_bo_evict_flags,
1056 .move_notify = nouveau_bo_move_ntfy,
1057 .move = nouveau_bo_move,
1058 .verify_access = nouveau_bo_verify_access,
1059 .sync_obj_signaled = __nouveau_fence_signalled,
1060 .sync_obj_wait = __nouveau_fence_wait,
1061 .sync_obj_flush = __nouveau_fence_flush,
1062 .sync_obj_unref = __nouveau_fence_unref,
1063 .sync_obj_ref = __nouveau_fence_ref,
1064 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1065 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1066 .io_mem_free = &nouveau_ttm_io_mem_free,
1069 struct nouveau_vma *
1070 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1072 struct nouveau_vma *vma;
1073 list_for_each_entry(vma, &nvbo->vma_list, head) {
1082 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1083 struct nouveau_vma *vma)
1085 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1086 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1089 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1090 NV_MEM_ACCESS_RW, vma);
1094 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1095 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1097 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1098 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1100 list_add_tail(&vma->head, &nvbo->vma_list);
1106 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1109 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1110 spin_lock(&nvbo->bo.bdev->fence_lock);
1111 ttm_bo_wait(&nvbo->bo, false, false, false);
1112 spin_unlock(&nvbo->bo.bdev->fence_lock);
1113 nouveau_vm_unmap(vma);
1116 nouveau_vm_put(vma);
1117 list_del(&vma->head);