2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <drm/ttm/ttm_tt.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_chan.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_mem.h"
41 #include "nouveau_vmm.h"
43 #include <nvif/class.h>
44 #include <nvif/if500b.h>
45 #include <nvif/if900b.h>
47 static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
48 struct ttm_resource *reg);
49 static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
52 * NV10-NV40 tiling helpers
56 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
57 u32 addr, u32 size, u32 pitch, u32 flags)
59 struct nouveau_drm *drm = nouveau_drm(dev);
60 int i = reg - drm->tile.reg;
61 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
62 struct nvkm_fb_tile *tile = &fb->tile.region[i];
64 nouveau_fence_unref(®->fence);
67 nvkm_fb_tile_fini(fb, i, tile);
70 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
72 nvkm_fb_tile_prog(fb, i, tile);
75 static struct nouveau_drm_tile *
76 nv10_bo_get_tile_region(struct drm_device *dev, int i)
78 struct nouveau_drm *drm = nouveau_drm(dev);
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
81 spin_lock(&drm->tile.lock);
84 (!tile->fence || nouveau_fence_done(tile->fence)))
89 spin_unlock(&drm->tile.lock);
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 struct dma_fence *fence)
97 struct nouveau_drm *drm = nouveau_drm(dev);
100 spin_lock(&drm->tile.lock);
101 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
103 spin_unlock(&drm->tile.lock);
107 static struct nouveau_drm_tile *
108 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
109 u32 size, u32 pitch, u32 zeta)
111 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
113 struct nouveau_drm_tile *tile, *found = NULL;
116 for (i = 0; i < fb->tile.regions; i++) {
117 tile = nv10_bo_get_tile_region(dev, i);
119 if (pitch && !found) {
123 } else if (tile && fb->tile.region[i].pitch) {
124 /* Kill an unused tile region. */
125 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
128 nv10_bo_put_tile_region(dev, tile, NULL);
132 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
137 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
140 struct drm_device *dev = drm->dev;
141 struct nouveau_bo *nvbo = nouveau_bo(bo);
143 WARN_ON(nvbo->bo.pin_count > 0);
144 nouveau_bo_del_io_reserve_lru(bo);
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
148 * If nouveau_bo_new() allocated this buffer, the GEM object was never
149 * initialized, so don't attempt to release it.
152 /* Gem objects not being shared with other VMs get their
153 * dma_resv from a root GEM object.
156 drm_gem_object_put(nvbo->r_obj);
158 drm_gem_object_release(&bo->base);
160 dma_resv_fini(&bo->base._resv);
167 roundup_64(u64 x, u32 y)
175 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
177 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
178 struct nvif_device *device = &drm->client.device;
180 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
182 if (device->info.chipset >= 0x40) {
184 *size = roundup_64(*size, 64 * nvbo->mode);
186 } else if (device->info.chipset >= 0x30) {
188 *size = roundup_64(*size, 64 * nvbo->mode);
190 } else if (device->info.chipset >= 0x20) {
192 *size = roundup_64(*size, 64 * nvbo->mode);
194 } else if (device->info.chipset >= 0x10) {
196 *size = roundup_64(*size, 32 * nvbo->mode);
200 *size = roundup_64(*size, (1 << nvbo->page));
201 *align = max((1 << nvbo->page), *align);
204 *size = roundup_64(*size, PAGE_SIZE);
208 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
209 u32 tile_mode, u32 tile_flags, bool internal)
211 struct nouveau_drm *drm = cli->drm;
212 struct nouveau_bo *nvbo;
213 struct nvif_mmu *mmu = &cli->mmu;
214 struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
218 NV_WARN(drm, "skipped size %016llx\n", *size);
219 return ERR_PTR(-EINVAL);
222 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
224 return ERR_PTR(-ENOMEM);
226 INIT_LIST_HEAD(&nvbo->head);
227 INIT_LIST_HEAD(&nvbo->entry);
228 INIT_LIST_HEAD(&nvbo->vma_list);
229 nvbo->bo.bdev = &drm->ttm.bdev;
231 /* This is confusing, and doesn't actually mean we want an uncached
232 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
233 * into in nouveau_gem_new().
235 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
236 /* Determine if we can get a cache-coherent map, forcing
237 * uncached mapping if we can't.
239 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
240 nvbo->force_coherent = true;
243 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
244 if (!nouveau_cli_uvmm(cli) || internal) {
245 /* for BO noVM allocs, don't assign kinds */
246 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
247 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
248 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
250 return ERR_PTR(-EINVAL);
253 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
254 } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
255 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
256 nvbo->comp = (tile_flags & 0x00030000) >> 16;
257 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
259 return ERR_PTR(-EINVAL);
262 nvbo->zeta = (tile_flags & 0x00000007);
264 nvbo->mode = tile_mode;
266 /* Determine the desirable target GPU page size for the buffer. */
267 for (i = 0; i < vmm->page_nr; i++) {
268 /* Because we cannot currently allow VMM maps to fail
269 * during buffer migration, we need to determine page
270 * size for the buffer up-front, and pre-allocate its
273 * Skip page sizes that can't support needed domains.
275 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
276 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
278 if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
279 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
282 /* Select this page size if it's the first that supports
283 * the potential memory domains, or when it's compatible
284 * with the requested compression settings.
286 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
289 /* Stop once the buffer is larger than the current page size. */
290 if (*size >= 1ULL << vmm->page[i].shift)
294 if (WARN_ON(pi < 0)) {
296 return ERR_PTR(-EINVAL);
299 /* Disable compression if suitable settings couldn't be found. */
300 if (nvbo->comp && !vmm->page[pi].comp) {
301 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
302 nvbo->kind = mmu->kind[nvbo->kind];
305 nvbo->page = vmm->page[pi].shift;
307 /* reject other tile flags when in VM mode. */
309 return ERR_PTR(-EINVAL);
310 if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
311 return ERR_PTR(-EINVAL);
313 /* Determine the desirable target GPU page size for the buffer. */
314 for (i = 0; i < vmm->page_nr; i++) {
315 /* Because we cannot currently allow VMM maps to fail
316 * during buffer migration, we need to determine page
317 * size for the buffer up-front, and pre-allocate its
320 * Skip page sizes that can't support needed domains.
322 if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
324 if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
325 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
328 /* pick the last one as it will be smallest. */
331 /* Stop once the buffer is larger than the current page size. */
332 if (*size >= 1ULL << vmm->page[i].shift)
335 if (WARN_ON(pi < 0)) {
337 return ERR_PTR(-EINVAL);
339 nvbo->page = vmm->page[pi].shift;
342 nouveau_bo_fixup_align(nvbo, align, size);
348 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
349 struct sg_table *sg, struct dma_resv *robj)
351 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
353 struct ttm_operation_ctx ctx = {
354 .interruptible = false,
355 .no_wait_gpu = false,
359 nouveau_bo_placement_set(nvbo, domain, 0);
360 INIT_LIST_HEAD(&nvbo->io_reserve_lru);
362 ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
363 &nvbo->placement, align >> PAGE_SHIFT, &ctx,
364 sg, robj, nouveau_bo_del_ttm);
366 /* ttm will call nouveau_bo_del_ttm if it fails.. */
371 ttm_bo_unreserve(&nvbo->bo);
377 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
378 uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
379 struct sg_table *sg, struct dma_resv *robj,
380 struct nouveau_bo **pnvbo)
382 struct nouveau_bo *nvbo;
385 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
388 return PTR_ERR(nvbo);
390 nvbo->bo.base.size = size;
391 dma_resv_init(&nvbo->bo.base._resv);
392 drm_vma_node_reset(&nvbo->bo.base.vma_node);
394 /* This must be called before ttm_bo_init_reserved(). Subsequent
395 * bo_move() callbacks might already iterate the GEMs GPUVA list.
397 drm_gem_gpuva_init(&nvbo->bo.base);
399 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
408 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
410 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
411 u64 vram_size = drm->client.device.info.ram_size;
412 unsigned i, fpfn, lpfn;
414 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
415 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
416 nvbo->bo.base.size < vram_size / 4) {
418 * Make sure that the color and depth buffers are handled
419 * by independent memory controller units. Up to a 9x
420 * speed up when alpha-blending and depth-test are enabled
424 fpfn = (vram_size / 2) >> PAGE_SHIFT;
428 lpfn = (vram_size / 2) >> PAGE_SHIFT;
430 for (i = 0; i < nvbo->placement.num_placement; ++i) {
431 nvbo->placements[i].fpfn = fpfn;
432 nvbo->placements[i].lpfn = lpfn;
438 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
441 unsigned int *n = &nvbo->placement.num_placement;
442 struct ttm_place *pl = nvbo->placements;
447 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
448 pl[*n].mem_type = TTM_PL_VRAM;
449 pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ?
450 TTM_PL_FLAG_FALLBACK : 0;
453 if (domain & NOUVEAU_GEM_DOMAIN_GART) {
454 pl[*n].mem_type = TTM_PL_TT;
455 pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ?
456 TTM_PL_FLAG_FALLBACK : 0;
459 if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
460 pl[*n].mem_type = TTM_PL_SYSTEM;
461 pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ?
462 TTM_PL_FLAG_FALLBACK : 0;
466 nvbo->placement.placement = nvbo->placements;
467 set_placement_range(nvbo, domain);
470 int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
472 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
473 struct ttm_buffer_object *bo = &nvbo->bo;
474 bool force = false, evict = false;
477 dma_resv_assert_held(bo->base.resv);
479 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
480 domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
488 if (nvbo->bo.pin_count) {
491 switch (bo->resource->mem_type) {
493 error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
496 error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
503 NV_ERROR(drm, "bo %p pinned elsewhere: "
504 "0x%08x vs 0x%08x\n", bo,
505 bo->resource->mem_type, domain);
508 ttm_bo_pin(&nvbo->bo);
513 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
514 ret = nouveau_bo_validate(nvbo, false, false);
519 nouveau_bo_placement_set(nvbo, domain, 0);
520 ret = nouveau_bo_validate(nvbo, false, false);
524 ttm_bo_pin(&nvbo->bo);
526 switch (bo->resource->mem_type) {
528 drm->gem.vram_available -= bo->base.size;
531 drm->gem.gart_available -= bo->base.size;
539 nvbo->contig = false;
543 void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo)
545 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
546 struct ttm_buffer_object *bo = &nvbo->bo;
548 dma_resv_assert_held(bo->base.resv);
550 ttm_bo_unpin(&nvbo->bo);
551 if (!nvbo->bo.pin_count) {
552 switch (bo->resource->mem_type) {
554 drm->gem.vram_available += bo->base.size;
557 drm->gem.gart_available += bo->base.size;
565 int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
567 struct ttm_buffer_object *bo = &nvbo->bo;
570 ret = ttm_bo_reserve(bo, false, false, NULL);
573 ret = nouveau_bo_pin_locked(nvbo, domain, contig);
574 ttm_bo_unreserve(bo);
579 int nouveau_bo_unpin(struct nouveau_bo *nvbo)
581 struct ttm_buffer_object *bo = &nvbo->bo;
584 ret = ttm_bo_reserve(bo, false, false, NULL);
587 nouveau_bo_unpin_locked(nvbo);
588 ttm_bo_unreserve(bo);
594 nouveau_bo_map(struct nouveau_bo *nvbo)
598 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
602 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
604 ttm_bo_unreserve(&nvbo->bo);
609 nouveau_bo_unmap(struct nouveau_bo *nvbo)
614 ttm_bo_kunmap(&nvbo->kmap);
618 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
620 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
621 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
624 if (!ttm_dma || !ttm_dma->dma_address)
626 if (!ttm_dma->pages) {
627 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
631 /* Don't waste time looping if the object is coherent */
632 if (nvbo->force_coherent)
636 while (i < ttm_dma->num_pages) {
637 struct page *p = ttm_dma->pages[i];
638 size_t num_pages = 1;
640 for (j = i + 1; j < ttm_dma->num_pages; ++j) {
641 if (++p != ttm_dma->pages[j])
646 dma_sync_single_for_device(drm->dev->dev,
647 ttm_dma->dma_address[i],
648 num_pages * PAGE_SIZE, DMA_TO_DEVICE);
654 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
656 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
657 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
660 if (!ttm_dma || !ttm_dma->dma_address)
662 if (!ttm_dma->pages) {
663 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
667 /* Don't waste time looping if the object is coherent */
668 if (nvbo->force_coherent)
672 while (i < ttm_dma->num_pages) {
673 struct page *p = ttm_dma->pages[i];
674 size_t num_pages = 1;
676 for (j = i + 1; j < ttm_dma->num_pages; ++j) {
677 if (++p != ttm_dma->pages[j])
683 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
684 num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
689 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
691 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
692 struct nouveau_bo *nvbo = nouveau_bo(bo);
694 mutex_lock(&drm->ttm.io_reserve_mutex);
695 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
696 mutex_unlock(&drm->ttm.io_reserve_mutex);
699 void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
701 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
702 struct nouveau_bo *nvbo = nouveau_bo(bo);
704 mutex_lock(&drm->ttm.io_reserve_mutex);
705 list_del_init(&nvbo->io_reserve_lru);
706 mutex_unlock(&drm->ttm.io_reserve_mutex);
710 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
713 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
716 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
720 nouveau_bo_sync_for_device(nvbo);
726 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
729 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
734 iowrite16_native(val, (void __force __iomem *)mem);
740 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
743 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
748 return ioread32_native((void __force __iomem *)mem);
754 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
757 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
762 iowrite32_native(val, (void __force __iomem *)mem);
767 static struct ttm_tt *
768 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
770 #if IS_ENABLED(CONFIG_AGP)
771 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
773 if (drm->agp.bridge) {
774 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
778 return nouveau_sgdma_create_ttm(bo, page_flags);
782 nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
783 struct ttm_resource *reg)
785 #if IS_ENABLED(CONFIG_AGP)
786 struct nouveau_drm *drm = nouveau_bdev(bdev);
790 #if IS_ENABLED(CONFIG_AGP)
792 return ttm_agp_bind(ttm, reg);
794 return nouveau_sgdma_bind(bdev, ttm, reg);
798 nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
800 #if IS_ENABLED(CONFIG_AGP)
801 struct nouveau_drm *drm = nouveau_bdev(bdev);
803 if (drm->agp.bridge) {
808 nouveau_sgdma_unbind(bdev, ttm);
812 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
814 struct nouveau_bo *nvbo = nouveau_bo(bo);
816 switch (bo->resource->mem_type) {
818 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
819 NOUVEAU_GEM_DOMAIN_CPU);
822 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
826 *pl = nvbo->placement;
830 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
831 struct ttm_resource *reg)
833 struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
834 struct nouveau_mem *new_mem = nouveau_mem(reg);
835 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
838 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
839 old_mem->mem.size, &old_mem->vma[0]);
843 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
844 new_mem->mem.size, &old_mem->vma[1]);
848 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
852 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
855 nvif_vmm_put(vmm, &old_mem->vma[1]);
856 nvif_vmm_put(vmm, &old_mem->vma[0]);
862 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
863 struct ttm_operation_ctx *ctx,
864 struct ttm_resource *new_reg)
866 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
867 struct nouveau_channel *chan = drm->ttm.chan;
868 struct nouveau_cli *cli = (void *)chan->user.client;
869 struct nouveau_fence *fence;
872 /* create temporary vmas for the transfer and attach them to the
873 * old nvkm_mem node, these will get cleaned up after ttm has
874 * destroyed the ttm_resource
876 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
877 ret = nouveau_bo_move_prep(drm, bo, new_reg);
882 if (drm_drv_uses_atomic_modeset(drm->dev))
883 mutex_lock(&cli->mutex);
885 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
887 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
891 ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
895 ret = nouveau_fence_new(&fence, chan);
899 /* TODO: figure out a better solution here
901 * wait on the fence here explicitly as going through
902 * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
904 * Without this the operation can timeout and we'll fallback to a
905 * software copy, which might take several minutes to finish.
907 nouveau_fence_wait(fence, false, false);
908 ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
910 nouveau_fence_unref(&fence);
913 mutex_unlock(&cli->mutex);
918 nouveau_bo_move_init(struct nouveau_drm *drm)
920 static const struct _method_table {
924 int (*exec)(struct nouveau_channel *,
925 struct ttm_buffer_object *,
926 struct ttm_resource *, struct ttm_resource *);
927 int (*init)(struct nouveau_channel *, u32 handle);
929 { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
930 { "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
931 { "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
932 { "GRCE", 0, 0xc6b5, nve0_bo_move_copy, nvc0_bo_move_init },
933 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
934 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
935 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
936 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
937 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
938 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
939 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
940 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
941 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
942 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
943 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
944 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
945 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
946 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
947 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
948 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
949 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
950 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
951 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
954 const struct _method_table *mthd = _methods;
955 const char *name = "CPU";
959 struct nouveau_channel *chan;
968 ret = nvif_object_ctor(&chan->user, "ttmBoMove",
969 mthd->oclass | (mthd->engine << 16),
970 mthd->oclass, NULL, 0,
973 ret = mthd->init(chan, drm->ttm.copy.handle);
975 nvif_object_dtor(&drm->ttm.copy);
979 drm->ttm.move = mthd->exec;
980 drm->ttm.chan = chan;
984 } while ((++mthd)->exec);
986 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
989 static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
990 struct ttm_resource *new_reg)
992 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
993 struct nouveau_bo *nvbo = nouveau_bo(bo);
994 struct nouveau_vma *vma;
997 /* ttm can now (stupidly) pass the driver bos it didn't create... */
998 if (bo->destroy != nouveau_bo_del_ttm)
1001 nouveau_bo_del_io_reserve_lru(bo);
1003 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
1004 mem->mem.page == nvbo->page) {
1005 list_for_each_entry(vma, &nvbo->vma_list, head) {
1006 nouveau_vma_map(vma, mem);
1008 nouveau_uvmm_bo_map_all(nvbo, mem);
1010 list_for_each_entry(vma, &nvbo->vma_list, head) {
1011 ret = dma_resv_wait_timeout(bo->base.resv,
1012 DMA_RESV_USAGE_BOOKKEEP,
1015 nouveau_vma_unmap(vma);
1017 nouveau_uvmm_bo_unmap_all(nvbo);
1021 nvbo->offset = (new_reg->start << PAGE_SHIFT);
1026 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
1027 struct nouveau_drm_tile **new_tile)
1029 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1030 struct drm_device *dev = drm->dev;
1031 struct nouveau_bo *nvbo = nouveau_bo(bo);
1032 u64 offset = new_reg->start << PAGE_SHIFT;
1035 if (new_reg->mem_type != TTM_PL_VRAM)
1038 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1039 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
1040 nvbo->mode, nvbo->zeta);
1047 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1048 struct nouveau_drm_tile *new_tile,
1049 struct nouveau_drm_tile **old_tile)
1051 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1052 struct drm_device *dev = drm->dev;
1053 struct dma_fence *fence;
1056 ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
1059 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
1060 false, MAX_SCHEDULE_TIMEOUT);
1062 nv10_bo_put_tile_region(dev, *old_tile, fence);
1063 *old_tile = new_tile;
1067 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1068 struct ttm_operation_ctx *ctx,
1069 struct ttm_resource *new_reg,
1070 struct ttm_place *hop)
1072 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1073 struct nouveau_bo *nvbo = nouveau_bo(bo);
1074 struct drm_gem_object *obj = &bo->base;
1075 struct ttm_resource *old_reg = bo->resource;
1076 struct nouveau_drm_tile *new_tile = NULL;
1079 if (new_reg->mem_type == TTM_PL_TT) {
1080 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
1085 drm_gpuvm_bo_gem_evict(obj, evict);
1086 nouveau_bo_move_ntfy(bo, new_reg);
1087 ret = ttm_bo_wait_ctx(bo, ctx);
1091 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1092 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1098 if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM &&
1100 ttm_bo_move_null(bo, new_reg);
1104 if (old_reg->mem_type == TTM_PL_SYSTEM &&
1105 new_reg->mem_type == TTM_PL_TT) {
1106 ttm_bo_move_null(bo, new_reg);
1110 if (old_reg->mem_type == TTM_PL_TT &&
1111 new_reg->mem_type == TTM_PL_SYSTEM) {
1112 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
1113 ttm_resource_free(bo, &bo->resource);
1114 ttm_bo_assign_mem(bo, new_reg);
1118 /* Hardware assisted copy. */
1119 if (drm->ttm.move) {
1120 if ((old_reg->mem_type == TTM_PL_SYSTEM &&
1121 new_reg->mem_type == TTM_PL_VRAM) ||
1122 (old_reg->mem_type == TTM_PL_VRAM &&
1123 new_reg->mem_type == TTM_PL_SYSTEM)) {
1126 hop->mem_type = TTM_PL_TT;
1130 ret = nouveau_bo_move_m2mf(bo, evict, ctx,
1136 /* Fallback to software copy. */
1137 ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1141 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1143 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1145 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1149 nouveau_bo_move_ntfy(bo, bo->resource);
1150 drm_gpuvm_bo_gem_evict(obj, !evict);
1156 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
1157 struct ttm_resource *reg)
1159 struct nouveau_mem *mem = nouveau_mem(reg);
1161 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1162 switch (reg->mem_type) {
1165 nvif_object_unmap_handle(&mem->mem.object);
1168 nvif_object_unmap_handle(&mem->mem.object);
1177 nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
1179 struct nouveau_drm *drm = nouveau_bdev(bdev);
1180 struct nvkm_device *device = nvxx_device(&drm->client.device);
1181 struct nouveau_mem *mem = nouveau_mem(reg);
1182 struct nvif_mmu *mmu = &drm->client.mmu;
1185 mutex_lock(&drm->ttm.io_reserve_mutex);
1187 switch (reg->mem_type) {
1193 #if IS_ENABLED(CONFIG_AGP)
1194 if (drm->agp.bridge) {
1195 reg->bus.offset = (reg->start << PAGE_SHIFT) +
1197 reg->bus.is_iomem = !drm->agp.cma;
1198 reg->bus.caching = ttm_write_combined;
1201 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
1207 fallthrough; /* tiled memory */
1209 reg->bus.offset = (reg->start << PAGE_SHIFT) +
1210 device->func->resource_addr(device, 1);
1211 reg->bus.is_iomem = true;
1213 /* Some BARs do not support being ioremapped WC */
1214 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
1215 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
1216 reg->bus.caching = ttm_uncached;
1218 reg->bus.caching = ttm_write_combined;
1220 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1222 struct nv50_mem_map_v0 nv50;
1223 struct gf100_mem_map_v0 gf100;
1228 switch (mem->mem.object.oclass) {
1229 case NVIF_CLASS_MEM_NV50:
1230 args.nv50.version = 0;
1232 args.nv50.kind = mem->kind;
1233 args.nv50.comp = mem->comp;
1234 argc = sizeof(args.nv50);
1236 case NVIF_CLASS_MEM_GF100:
1237 args.gf100.version = 0;
1239 args.gf100.kind = mem->kind;
1240 argc = sizeof(args.gf100);
1247 ret = nvif_object_map_handle(&mem->mem.object,
1251 if (WARN_ON(ret == 0))
1256 reg->bus.offset = handle;
1265 if (ret == -ENOSPC) {
1266 struct nouveau_bo *nvbo;
1268 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1272 list_del_init(&nvbo->io_reserve_lru);
1273 drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1275 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
1276 nvbo->bo.resource->bus.offset = 0;
1277 nvbo->bo.resource->bus.addr = NULL;
1282 mutex_unlock(&drm->ttm.io_reserve_mutex);
1287 nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
1289 struct nouveau_drm *drm = nouveau_bdev(bdev);
1291 mutex_lock(&drm->ttm.io_reserve_mutex);
1292 nouveau_ttm_io_mem_free_locked(drm, reg);
1293 mutex_unlock(&drm->ttm.io_reserve_mutex);
1296 vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1298 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1299 struct nouveau_bo *nvbo = nouveau_bo(bo);
1300 struct nvkm_device *device = nvxx_device(&drm->client.device);
1301 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1304 /* as long as the bo isn't in vram, and isn't tiled, we've got
1305 * nothing to do here.
1307 if (bo->resource->mem_type != TTM_PL_VRAM) {
1308 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1312 if (bo->resource->mem_type != TTM_PL_SYSTEM)
1315 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
1318 /* make sure bo is in mappable vram */
1319 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1320 bo->resource->start + PFN_UP(bo->resource->size) < mappable)
1323 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1324 nvbo->placements[i].fpfn = 0;
1325 nvbo->placements[i].lpfn = mappable;
1328 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
1331 ret = nouveau_bo_validate(nvbo, false, false);
1332 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
1333 return VM_FAULT_NOPAGE;
1334 else if (unlikely(ret))
1335 return VM_FAULT_SIGBUS;
1337 ttm_bo_move_to_lru_tail_unlocked(bo);
1342 nouveau_ttm_tt_populate(struct ttm_device *bdev,
1343 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1345 struct ttm_tt *ttm_dma = (void *)ttm;
1346 struct nouveau_drm *drm;
1347 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
1349 if (ttm_tt_is_populated(ttm))
1352 if (slave && ttm->sg) {
1353 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
1358 drm = nouveau_bdev(bdev);
1360 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
1364 nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
1367 struct nouveau_drm *drm;
1368 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
1373 nouveau_ttm_tt_unbind(bdev, ttm);
1375 drm = nouveau_bdev(bdev);
1377 return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
1381 nouveau_ttm_tt_destroy(struct ttm_device *bdev,
1384 #if IS_ENABLED(CONFIG_AGP)
1385 struct nouveau_drm *drm = nouveau_bdev(bdev);
1386 if (drm->agp.bridge) {
1387 ttm_agp_destroy(ttm);
1391 nouveau_sgdma_destroy(bdev, ttm);
1395 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1397 struct dma_resv *resv = nvbo->bo.base.resv;
1402 dma_resv_add_fence(resv, &fence->base, exclusive ?
1403 DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
1407 nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1409 nouveau_bo_move_ntfy(bo, NULL);
1412 struct ttm_device_funcs nouveau_bo_driver = {
1413 .ttm_tt_create = &nouveau_ttm_tt_create,
1414 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1415 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1416 .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
1417 .eviction_valuable = ttm_bo_eviction_valuable,
1418 .evict_flags = nouveau_bo_evict_flags,
1419 .delete_mem_notify = nouveau_bo_delete_mem_notify,
1420 .move = nouveau_bo_move,
1421 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1422 .io_mem_free = &nouveau_ttm_io_mem_free,