2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Keith Whitwell <keith@tungstengraphics.com>
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
38 static struct mem_block *
39 split_block(struct mem_block *p, uint64_t start, uint64_t size,
40 struct drm_file *file_priv)
42 /* Maybe cut off the start of an existing block */
43 if (start > p->start) {
44 struct mem_block *newblock =
45 kmalloc(sizeof(*newblock), GFP_KERNEL);
48 newblock->start = start;
49 newblock->size = p->size - (start - p->start);
50 newblock->file_priv = NULL;
51 newblock->next = p->next;
53 p->next->prev = newblock;
55 p->size -= newblock->size;
59 /* Maybe cut off the end of an existing block */
61 struct mem_block *newblock =
62 kmalloc(sizeof(*newblock), GFP_KERNEL);
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->file_priv = NULL;
68 newblock->next = p->next;
70 p->next->prev = newblock;
76 /* Our block is in the middle */
77 p->file_priv = file_priv;
82 nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
83 int align2, struct drm_file *file_priv, int tail)
86 uint64_t mask = (1 << align2) - 1;
92 list_for_each_prev(p, heap) {
93 uint64_t start = ((p->start + p->size) - size) & ~mask;
95 if (p->file_priv == NULL && start >= p->start &&
96 start + size <= p->start + p->size)
97 return split_block(p, start, size, file_priv);
100 list_for_each(p, heap) {
101 uint64_t start = (p->start + mask) & ~mask;
103 if (p->file_priv == NULL &&
104 start + size <= p->start + p->size)
105 return split_block(p, start, size, file_priv);
112 void nouveau_mem_free_block(struct mem_block *p)
116 /* Assumes a single contiguous range. Needs a special file_priv in
117 * 'heap' to stop it being subsumed.
119 if (p->next->file_priv == NULL) {
120 struct mem_block *q = p->next;
127 if (p->prev->file_priv == NULL) {
128 struct mem_block *q = p->prev;
136 /* Initialize. How to check for an uninitialized heap?
138 int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
141 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
146 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
152 blocks->start = start;
154 blocks->file_priv = NULL;
155 blocks->next = blocks->prev = *heap;
157 memset(*heap, 0, sizeof(**heap));
158 (*heap)->file_priv = (struct drm_file *) -1;
159 (*heap)->next = (*heap)->prev = blocks;
164 * Free all blocks associated with the releasing file_priv
166 void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
170 if (!heap || !heap->next)
173 list_for_each(p, heap) {
174 if (p->file_priv == file_priv)
178 /* Assumes a single contiguous range. Needs a special file_priv in
179 * 'heap' to stop it being subsumed.
181 list_for_each(p, heap) {
182 while ((p->file_priv == NULL) &&
183 (p->next->file_priv == NULL) &&
185 struct mem_block *q = p->next;
195 * NV10-NV40 tiling helpers
199 nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
200 uint32_t size, uint32_t pitch)
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
204 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
205 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
206 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
210 tile->used = !!pitch;
211 nouveau_fence_unref((void **)&tile->fence);
213 if (!pfifo->cache_flush(dev))
216 pfifo->reassign(dev, false);
217 pfifo->cache_flush(dev);
218 pfifo->cache_pull(dev, false);
220 nouveau_wait_for_idle(dev);
222 pgraph->set_region_tiling(dev, i, addr, size, pitch);
223 pfb->set_region_tiling(dev, i, addr, size, pitch);
225 pfifo->cache_pull(dev, true);
226 pfifo->reassign(dev, true);
229 struct nouveau_tile_reg *
230 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
235 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
238 spin_lock(&dev_priv->tile.lock);
240 for (i = 0; i < pfb->num_tiles; i++) {
242 /* Tile region in use. */
246 !nouveau_fence_signalled(tile[i].fence, NULL))
247 /* Pending tile region. */
250 if (max(tile[i].addr, addr) <
251 min(tile[i].addr + tile[i].size, addr + size))
252 /* Kill an intersecting tile region. */
253 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
255 if (pitch && !found) {
256 /* Free tile region. */
257 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
262 spin_unlock(&dev_priv->tile.lock);
268 nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
269 struct nouveau_fence *fence)
272 /* Mark it as pending. */
274 nouveau_fence_ref(fence);
284 nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys)
287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt;
289 unsigned psz, pfl, pages;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
294 pgt = &dev_priv->gart_info.sg_ctxdma;
296 virt -= dev_priv->vm_gart_base;
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
301 pgt = dev_priv->vm_vram_pt;
303 virt -= dev_priv->vm_vram_base;
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
312 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) {
315 struct nouveau_gpuobj *pt = pgt[virt >> 29];
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
318 nv_wo32(dev, pt, pte++, 0x00000000);
319 nv_wo32(dev, pt, pte++, 0x00000000);
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
330 nv_wo32(dev, pt, pte++, offset_l | pfl);
331 nv_wo32(dev, pt, pte++, offset_h | flags);
337 dev_priv->engine.instmem.finish_access(dev);
339 nv_wr32(dev, 0x100c80, 0x00050001);
340 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
341 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
342 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
346 nv_wr32(dev, 0x100c80, 0x00000001);
347 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
348 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
349 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
357 nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
365 void nouveau_mem_takedown(struct mem_block **heap)
372 for (p = (*heap)->next; p != *heap;) {
373 struct mem_block *q = p;
382 void nouveau_mem_close(struct drm_device *dev)
384 struct drm_nouveau_private *dev_priv = dev->dev_private;
386 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
387 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
388 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
390 ttm_bo_device_release(&dev_priv->ttm.bdev);
392 nouveau_ttm_global_release(dev_priv);
394 if (drm_core_has_AGP(dev) && dev->agp &&
395 drm_core_check_feature(dev, DRIVER_MODESET)) {
396 struct drm_agp_mem *entry, *tempe;
398 /* Remove AGP resources, but leave dev->agp
399 intact until drv_cleanup is called. */
400 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
402 drm_unbind_agp(entry->memory);
403 drm_free_agp(entry->memory, entry->pages);
406 INIT_LIST_HEAD(&dev->agp->memory);
408 if (dev->agp->acquired)
409 drm_agp_release(dev);
411 dev->agp->acquired = 0;
412 dev->agp->enabled = 0;
415 if (dev_priv->fb_mtrr) {
416 drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
417 drm_get_resource_len(dev, 1), DRM_MTRR_WC);
418 dev_priv->fb_mtrr = 0;
422 /*XXX won't work on BSD because of pci_read_config_dword */
424 nouveau_mem_fb_amount_igp(struct drm_device *dev)
426 struct drm_nouveau_private *dev_priv = dev->dev_private;
427 struct pci_dev *bridge;
430 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
432 NV_ERROR(dev, "no bridge device\n");
436 if (dev_priv->flags&NV_NFORCE) {
437 pci_read_config_dword(bridge, 0x7C, &mem);
438 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
440 if (dev_priv->flags&NV_NFORCE2) {
441 pci_read_config_dword(bridge, 0x84, &mem);
442 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
445 NV_ERROR(dev, "impossible!\n");
449 /* returns the amount of FB ram in bytes */
450 uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
452 struct drm_nouveau_private *dev_priv = dev->dev_private;
455 switch (dev_priv->card_type) {
457 boot0 = nv_rd32(dev, NV03_BOOT_0);
458 if (boot0 & 0x00000100)
459 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
461 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
462 case NV04_BOOT_0_RAM_AMOUNT_32MB:
463 return 32 * 1024 * 1024;
464 case NV04_BOOT_0_RAM_AMOUNT_16MB:
465 return 16 * 1024 * 1024;
466 case NV04_BOOT_0_RAM_AMOUNT_8MB:
467 return 8 * 1024 * 1024;
468 case NV04_BOOT_0_RAM_AMOUNT_4MB:
469 return 4 * 1024 * 1024;
478 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
479 return nouveau_mem_fb_amount_igp(dev);
482 mem = (nv_rd32(dev, NV04_FIFO_DATA) &
483 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
484 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
485 return mem * 1024 * 1024;
491 "Unable to detect video ram size. Please report your setup to "
497 static void nouveau_mem_reset_agp(struct drm_device *dev)
499 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
501 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
502 saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
504 /* clear busmaster bit */
505 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
506 /* clear SBA and AGP bits */
507 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
509 /* power cycle pgraph, if enabled */
510 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
511 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
512 nv_wr32(dev, NV03_PMC_ENABLE,
513 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
514 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
515 NV_PMC_ENABLE_PGRAPH);
518 /* and restore (gives effect of resetting AGP) */
519 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
520 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
525 nouveau_mem_init_agp(struct drm_device *dev)
528 struct drm_nouveau_private *dev_priv = dev->dev_private;
529 struct drm_agp_info info;
530 struct drm_agp_mode mode;
536 nouveau_mem_reset_agp(dev);
538 if (!dev->agp->acquired) {
539 ret = drm_agp_acquire(dev);
541 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
546 ret = drm_agp_info(dev, &info);
548 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
552 /* see agp.h for the AGPSTAT_* modes available */
553 mode.mode = info.mode;
554 ret = drm_agp_enable(dev, mode);
556 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
560 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
561 dev_priv->gart_info.aper_base = info.aperture_base;
562 dev_priv->gart_info.aper_size = info.aperture_size;
568 nouveau_mem_init(struct drm_device *dev)
570 struct drm_nouveau_private *dev_priv = dev->dev_private;
571 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
572 int ret, dma_bits = 32;
574 dev_priv->fb_phys = drm_get_resource_start(dev, 1);
575 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
577 if (dev_priv->card_type >= NV_50 &&
578 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
581 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
583 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
587 ret = nouveau_ttm_global_init(dev_priv);
591 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
592 dev_priv->ttm.bo_global_ref.ref.object,
593 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
594 dma_bits <= 32 ? true : false);
596 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
600 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
601 spin_lock_init(&dev_priv->ttm.bo_list_lock);
602 spin_lock_init(&dev_priv->tile.lock);
604 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
606 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
607 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
608 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
609 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
611 NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
613 /* remove reserved space at end of vram from available amount */
614 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
615 dev_priv->fb_aper_free = dev_priv->fb_available_size;
618 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
619 dev_priv->fb_available_size >> PAGE_SHIFT);
621 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
626 #if !defined(__powerpc__) && !defined(__ia64__)
627 if (drm_device_is_agp(dev) && dev->agp) {
628 ret = nouveau_mem_init_agp(dev);
630 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
634 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
635 ret = nouveau_sgdma_init(dev);
637 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
642 NV_INFO(dev, "%d MiB GART (aperture)\n",
643 (int)(dev_priv->gart_info.aper_size >> 20));
644 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
646 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
647 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
649 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
653 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
654 drm_get_resource_len(dev, 1),