2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
32 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
34 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
35 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
41 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
48 nouveau_mem_node_cleanup(struct nouveau_mem *node)
50 if (node->vma[0].node) {
51 nouveau_vm_unmap(&node->vma[0]);
52 nouveau_vm_put(&node->vma[0]);
55 if (node->vma[1].node) {
56 nouveau_vm_unmap(&node->vma[1]);
57 nouveau_vm_put(&node->vma[1]);
62 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
63 struct ttm_mem_reg *mem)
65 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
66 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
67 nouveau_mem_node_cleanup(mem->mm_node);
68 pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
72 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
73 struct ttm_buffer_object *bo,
74 const struct ttm_place *place,
75 struct ttm_mem_reg *mem)
77 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
78 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
79 struct nouveau_bo *nvbo = nouveau_bo(bo);
80 struct nouveau_mem *node;
84 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
85 size_nc = 1 << nvbo->page_shift;
87 ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
88 mem->page_alignment << PAGE_SHIFT, size_nc,
89 (nvbo->tile_flags >> 8) & 0x3ff, &node);
92 return (ret == -ENOSPC) ? 0 : ret;
95 node->page_shift = nvbo->page_shift;
98 mem->start = node->offset >> PAGE_SHIFT;
103 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
105 struct nouveau_fb *pfb = man->priv;
106 struct nouveau_mm *mm = &pfb->vram;
107 struct nouveau_mm_node *r;
108 u32 total = 0, free = 0;
110 mutex_lock(&nv_subdev(pfb)->mutex);
111 list_for_each_entry(r, &mm->nodes, nl_entry) {
112 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
113 prefix, r->type, ((u64)r->offset << 12),
114 (((u64)r->offset + r->length) << 12));
120 mutex_unlock(&nv_subdev(pfb)->mutex);
122 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
123 prefix, (u64)total << 12, (u64)free << 12);
124 printk(KERN_DEBUG "%s block: 0x%08x\n",
125 prefix, mm->block_size << 12);
128 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
129 nouveau_vram_manager_init,
130 nouveau_vram_manager_fini,
131 nouveau_vram_manager_new,
132 nouveau_vram_manager_del,
133 nouveau_vram_manager_debug
137 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
143 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
149 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
150 struct ttm_mem_reg *mem)
152 nouveau_mem_node_cleanup(mem->mm_node);
158 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
159 struct ttm_buffer_object *bo,
160 const struct ttm_place *place,
161 struct ttm_mem_reg *mem)
163 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
164 struct nouveau_bo *nvbo = nouveau_bo(bo);
165 struct nouveau_mem *node;
167 node = kzalloc(sizeof(*node), GFP_KERNEL);
171 node->page_shift = 12;
173 switch (drm->device.info.family) {
174 case NV_DEVICE_INFO_V0_TESLA:
175 if (drm->device.info.chipset != 0x50)
176 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
178 case NV_DEVICE_INFO_V0_FERMI:
179 case NV_DEVICE_INFO_V0_KEPLER:
180 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
192 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
196 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
197 nouveau_gart_manager_init,
198 nouveau_gart_manager_fini,
199 nouveau_gart_manager_new,
200 nouveau_gart_manager_del,
201 nouveau_gart_manager_debug
205 #include <core/subdev/vm/nv04.h>
207 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
209 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
210 struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
211 struct nv04_vmmgr_priv *priv = (void *)vmm;
212 struct nouveau_vm *vm = NULL;
213 nouveau_vm_ref(priv->vm, &vm, NULL);
219 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
221 struct nouveau_vm *vm = man->priv;
222 nouveau_vm_ref(NULL, &vm, NULL);
228 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
230 struct nouveau_mem *node = mem->mm_node;
231 if (node->vma[0].node)
232 nouveau_vm_put(&node->vma[0]);
238 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
239 struct ttm_buffer_object *bo,
240 const struct ttm_place *place,
241 struct ttm_mem_reg *mem)
243 struct nouveau_mem *node;
246 node = kzalloc(sizeof(*node), GFP_KERNEL);
250 node->page_shift = 12;
252 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
253 NV_MEM_ACCESS_RW, &node->vma[0]);
260 mem->start = node->vma[0].offset >> PAGE_SHIFT;
265 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
269 const struct ttm_mem_type_manager_func nv04_gart_manager = {
270 nv04_gart_manager_init,
271 nv04_gart_manager_fini,
272 nv04_gart_manager_new,
273 nv04_gart_manager_del,
274 nv04_gart_manager_debug
278 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
280 struct drm_file *file_priv = filp->private_data;
281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
290 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
292 return ttm_mem_global_init(ref->object);
296 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
298 ttm_mem_global_release(ref->object);
302 nouveau_ttm_global_init(struct nouveau_drm *drm)
304 struct drm_global_reference *global_ref;
307 global_ref = &drm->ttm.mem_global_ref;
308 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
309 global_ref->size = sizeof(struct ttm_mem_global);
310 global_ref->init = &nouveau_ttm_mem_global_init;
311 global_ref->release = &nouveau_ttm_mem_global_release;
313 ret = drm_global_item_ref(global_ref);
314 if (unlikely(ret != 0)) {
315 DRM_ERROR("Failed setting up TTM memory accounting\n");
316 drm->ttm.mem_global_ref.release = NULL;
320 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
321 global_ref = &drm->ttm.bo_global_ref.ref;
322 global_ref->global_type = DRM_GLOBAL_TTM_BO;
323 global_ref->size = sizeof(struct ttm_bo_global);
324 global_ref->init = &ttm_bo_global_init;
325 global_ref->release = &ttm_bo_global_release;
327 ret = drm_global_item_ref(global_ref);
328 if (unlikely(ret != 0)) {
329 DRM_ERROR("Failed setting up TTM BO subsystem\n");
330 drm_global_item_unref(&drm->ttm.mem_global_ref);
331 drm->ttm.mem_global_ref.release = NULL;
339 nouveau_ttm_global_release(struct nouveau_drm *drm)
341 if (drm->ttm.mem_global_ref.release == NULL)
344 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
345 drm_global_item_unref(&drm->ttm.mem_global_ref);
346 drm->ttm.mem_global_ref.release = NULL;
350 nouveau_ttm_init(struct nouveau_drm *drm)
352 struct drm_device *dev = drm->dev;
356 bits = nvkm_vmmgr(&drm->device)->dma_bits;
357 if (nv_device_is_pci(nvkm_device(&drm->device))) {
358 if (drm->agp.stat == ENABLED ||
359 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
362 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
366 ret = pci_set_consistent_dma_mask(dev->pdev,
369 pci_set_consistent_dma_mask(dev->pdev,
373 ret = nouveau_ttm_global_init(drm);
377 ret = ttm_bo_device_init(&drm->ttm.bdev,
378 drm->ttm.bo_global_ref.ref.object,
380 dev->anon_inode->i_mapping,
381 DRM_FILE_PAGE_OFFSET,
382 bits <= 32 ? true : false);
384 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
389 drm->gem.vram_available = drm->device.info.ram_user;
391 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
392 drm->gem.vram_available >> PAGE_SHIFT);
394 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
398 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
399 nv_device_resource_len(nvkm_device(&drm->device), 1));
402 if (drm->agp.stat != ENABLED) {
403 drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
405 drm->gem.gart_available = drm->agp.size;
408 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
409 drm->gem.gart_available >> PAGE_SHIFT);
411 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
415 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
416 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
421 nouveau_ttm_fini(struct nouveau_drm *drm)
423 mutex_lock(&drm->dev->struct_mutex);
424 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
425 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
426 mutex_unlock(&drm->dev->struct_mutex);
428 ttm_bo_device_release(&drm->ttm.bdev);
430 nouveau_ttm_global_release(drm);
432 arch_phys_wc_del(drm->ttm.mtrr);