2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_ramht.h"
39 struct nouveau_gpuobj_method {
40 struct list_head head;
42 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
45 struct nouveau_gpuobj_class {
46 struct list_head head;
47 struct list_head methods;
53 nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_gpuobj_class *oc;
58 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
62 INIT_LIST_HEAD(&oc->methods);
65 list_add(&oc->head, &dev_priv->classes);
70 nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
71 int (*exec)(struct nouveau_channel *, u32, u32, u32))
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct nouveau_gpuobj_method *om;
75 struct nouveau_gpuobj_class *oc;
77 list_for_each_entry(oc, &dev_priv->classes, head) {
85 om = kzalloc(sizeof(*om), GFP_KERNEL);
91 list_add(&om->head, &oc->methods);
96 nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
97 u32 class, u32 mthd, u32 data)
99 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
100 struct nouveau_gpuobj_method *om;
101 struct nouveau_gpuobj_class *oc;
103 list_for_each_entry(oc, &dev_priv->classes, head) {
107 list_for_each_entry(om, &oc->methods, head) {
108 if (om->mthd == mthd)
109 return om->exec(chan, class, mthd, data);
117 nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
118 u32 class, u32 mthd, u32 data)
120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_channel *chan = NULL;
125 spin_lock_irqsave(&dev_priv->channels.lock, flags);
126 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
127 chan = dev_priv->channels.ptr[chid];
129 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
130 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
134 /* NVidia uses context objects to drive drawing operations.
136 Context objects can be selected into 8 subchannels in the FIFO,
137 and then used via DMA command buffers.
139 A context object is referenced by a user defined handle (CARD32). The HW
140 looks up graphics objects in a hash table in the instance RAM.
142 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
143 the handle, the second one a bitfield, that contains the address of the
144 object in instance RAM.
146 The format of the second CARD32 seems to be:
150 15: 0 instance_addr >> 4
151 17:16 engine (here uses 1 = graphics)
152 28:24 channel id (here uses 0)
157 15: 0 instance_addr >> 4 (maybe 19-0)
158 21:20 engine (here uses 1 = graphics)
159 I'm unsure about the other bits, but using 0 seems to work.
161 The key into the hash table depends on the object handle and channel id and
166 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
167 uint32_t size, int align, uint32_t flags,
168 struct nouveau_gpuobj **gpuobj_ret)
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
171 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
172 struct nouveau_gpuobj *gpuobj;
173 struct drm_mm_node *ramin = NULL;
176 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
177 chan ? chan->id : -1, size, align, flags);
179 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
182 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
184 gpuobj->flags = flags;
185 kref_init(&gpuobj->refcount);
188 spin_lock(&dev_priv->ramin_lock);
189 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
190 spin_unlock(&dev_priv->ramin_lock);
193 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
195 ramin = drm_mm_get_block(ramin, size, align);
197 nouveau_gpuobj_ref(NULL, &gpuobj);
201 gpuobj->pinst = chan->ramin->pinst;
202 if (gpuobj->pinst != ~0)
203 gpuobj->pinst += ramin->start;
205 gpuobj->cinst = ramin->start;
206 gpuobj->vinst = ramin->start + chan->ramin->vinst;
207 gpuobj->node = ramin;
209 ret = instmem->get(gpuobj, size, align);
211 nouveau_gpuobj_ref(NULL, &gpuobj);
216 if (dev_priv->ramin_available)
217 ret = instmem->map(gpuobj);
221 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
224 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
225 for (i = 0; i < gpuobj->size; i += 4)
226 nv_wo32(gpuobj, i, 0);
231 *gpuobj_ret = gpuobj;
236 nouveau_gpuobj_init(struct drm_device *dev)
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
242 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
243 INIT_LIST_HEAD(&dev_priv->classes);
244 spin_lock_init(&dev_priv->ramin_lock);
245 dev_priv->ramin_base = ~0;
251 nouveau_gpuobj_takedown(struct drm_device *dev)
253 struct drm_nouveau_private *dev_priv = dev->dev_private;
254 struct nouveau_gpuobj_method *om, *tm;
255 struct nouveau_gpuobj_class *oc, *tc;
259 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
260 list_for_each_entry_safe(om, tm, &oc->methods, head) {
268 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
273 nouveau_gpuobj_del(struct kref *ref)
275 struct nouveau_gpuobj *gpuobj =
276 container_of(ref, struct nouveau_gpuobj, refcount);
277 struct drm_device *dev = gpuobj->dev;
278 struct drm_nouveau_private *dev_priv = dev->dev_private;
279 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
282 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
284 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
285 for (i = 0; i < gpuobj->size; i += 4)
286 nv_wo32(gpuobj, i, 0);
291 gpuobj->dtor(dev, gpuobj);
293 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
295 instmem->unmap(gpuobj);
296 instmem->put(gpuobj);
300 spin_lock(&dev_priv->ramin_lock);
301 drm_mm_put_block(gpuobj->node);
302 spin_unlock(&dev_priv->ramin_lock);
306 spin_lock(&dev_priv->ramin_lock);
307 list_del(&gpuobj->list);
308 spin_unlock(&dev_priv->ramin_lock);
314 nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
317 kref_get(&ref->refcount);
320 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
326 nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
327 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
329 struct drm_nouveau_private *dev_priv = dev->dev_private;
330 struct nouveau_gpuobj *gpuobj = NULL;
334 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
335 pinst, vinst, size, flags);
337 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
340 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
342 gpuobj->flags = flags;
343 kref_init(&gpuobj->refcount);
345 gpuobj->pinst = pinst;
346 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
347 gpuobj->vinst = vinst;
349 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
350 for (i = 0; i < gpuobj->size; i += 4)
351 nv_wo32(gpuobj, i, 0);
352 dev_priv->engine.instmem.flush(dev);
355 spin_lock(&dev_priv->ramin_lock);
356 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
357 spin_unlock(&dev_priv->ramin_lock);
364 nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
366 struct drm_nouveau_private *dev_priv = dev->dev_private;
368 /*XXX: dodgy hack for now */
369 if (dev_priv->card_type >= NV_50)
371 if (dev_priv->card_type >= NV_40)
377 DMA objects are used to reference a piece of memory in the
378 framebuffer, PCI or AGP address space. Each object is 16 bytes big
379 and looks as follows:
382 11:0 class (seems like I can always use 0 here)
383 12 page table present?
384 13 page entry linear?
385 15:14 access: 0 rw, 1 ro, 2 wo
386 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
387 31:20 dma adjust (bits 0-11 of the address)
389 dma limit (size of transfer)
391 1 0 readonly, 1 readwrite
392 31:12 dma frame address of the page (bits 12-31 of the address)
394 page table terminator, same value as the first pte, as does nvidia
395 rivatv uses 0xffffffff
397 Non linear page tables need a list of frame addresses afterwards,
398 the rivatv project has some info on this.
400 The method below creates a DMA object in instance RAM and returns a handle
401 to it that can be used to set up context objects.
405 nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
406 u64 base, u64 size, int target, int access,
409 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
410 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
413 flags0 = (comp << 29) | (type << 22) | class;
414 flags0 |= 0x00100000;
417 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
418 case NV_MEM_ACCESS_RW:
419 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
425 case NV_MEM_TARGET_VRAM:
426 flags0 |= 0x00010000;
428 case NV_MEM_TARGET_PCI:
429 flags0 |= 0x00020000;
431 case NV_MEM_TARGET_PCI_NOSNOOP:
432 flags0 |= 0x00030000;
434 case NV_MEM_TARGET_GART:
435 base += dev_priv->vm_gart_base;
437 flags0 &= ~0x00100000;
441 /* convert to base + limit */
442 size = (base + size) - 1;
444 nv_wo32(obj, offset + 0x00, flags0);
445 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
446 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
447 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
448 upper_32_bits(base));
449 nv_wo32(obj, offset + 0x10, 0x00000000);
450 nv_wo32(obj, offset + 0x14, 0x00000000);
452 pinstmem->flush(obj->dev);
456 nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
457 int target, int access, u32 type, u32 comp,
458 struct nouveau_gpuobj **pobj)
460 struct drm_device *dev = chan->dev;
463 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
467 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
473 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
474 u64 size, int access, int target,
475 struct nouveau_gpuobj **pobj)
477 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
478 struct drm_device *dev = chan->dev;
479 struct nouveau_gpuobj *obj;
480 u32 page_addr, flags0, flags2;
483 if (dev_priv->card_type >= NV_50) {
484 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
485 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
487 return nv50_gpuobj_dma_new(chan, class, base, size,
488 target, access, type, comp, pobj);
491 if (target == NV_MEM_TARGET_GART) {
492 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
493 target = NV_MEM_TARGET_PCI_NOSNOOP;
494 base += dev_priv->gart_info.aper_base;
497 ret = nouveau_sgdma_get_page(dev, base, &page_addr);
501 target = NV_MEM_TARGET_PCI;
504 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
510 flags0 |= 0x00003000; /* PT present, PT linear */
514 case NV_MEM_TARGET_PCI:
515 flags0 |= 0x00020000;
517 case NV_MEM_TARGET_PCI_NOSNOOP:
518 flags0 |= 0x00030000;
525 case NV_MEM_ACCESS_RO:
526 flags0 |= 0x00004000;
528 case NV_MEM_ACCESS_WO:
529 flags0 |= 0x00008000;
531 flags2 |= 0x00000002;
535 flags0 |= (base & 0x00000fff) << 20;
536 flags2 |= (base & 0xfffff000);
538 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
542 nv_wo32(obj, 0x00, flags0);
543 nv_wo32(obj, 0x04, size - 1);
544 nv_wo32(obj, 0x08, flags2);
545 nv_wo32(obj, 0x0c, flags2);
547 obj->engine = NVOBJ_ENGINE_SW;
553 /* Context objects in the instance RAM have the following structure.
554 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
564 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
565 18 synchronize enable
566 19 endian: 1 big, 0 little
568 23 single step enable
569 24 patch status: 0 invalid, 1 valid
570 25 context_surface 0: 1 valid
571 26 context surface 1: 1 valid
572 27 context pattern: 1 valid
573 28 context rop: 1 valid
574 29,30 context beta, beta4
578 31:16 notify instance address
580 15:0 dma 0 instance address
581 31:16 dma 1 instance address
586 No idea what the exact format is. Here's what can be deducted:
589 11:0 class (maybe uses more bits here?)
592 25 patch status valid ?
594 15:0 DMA notifier (maybe 20:0)
596 15:0 DMA 0 instance (maybe 20:0)
599 15:0 DMA 1 instance (maybe 20:0)
605 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
606 struct nouveau_gpuobj **gpuobj_ret)
608 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
609 struct nouveau_gpuobj *gpuobj;
611 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
614 gpuobj->dev = chan->dev;
615 gpuobj->engine = NVOBJ_ENGINE_SW;
616 gpuobj->class = class;
617 kref_init(&gpuobj->refcount);
618 gpuobj->cinst = 0x40;
620 spin_lock(&dev_priv->ramin_lock);
621 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
622 spin_unlock(&dev_priv->ramin_lock);
623 *gpuobj_ret = gpuobj;
628 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
630 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
631 struct drm_device *dev = chan->dev;
632 struct nouveau_gpuobj_class *oc;
633 struct nouveau_gpuobj *gpuobj;
636 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
638 list_for_each_entry(oc, &dev_priv->classes, head) {
643 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
647 switch (oc->engine) {
648 case NVOBJ_ENGINE_SW:
649 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
653 case NVOBJ_ENGINE_GR:
654 if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
655 struct nouveau_pgraph_engine *pgraph =
656 &dev_priv->engine.graph;
658 ret = pgraph->create_context(chan);
663 case NVOBJ_ENGINE_CRYPT:
664 if (!chan->crypt_ctx) {
665 struct nouveau_crypt_engine *pcrypt =
666 &dev_priv->engine.crypt;
668 ret = pcrypt->create_context(chan);
675 ret = nouveau_gpuobj_new(dev, chan,
676 nouveau_gpuobj_class_instmem_size(dev, class),
678 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
681 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
685 if (dev_priv->card_type >= NV_50) {
686 nv_wo32(gpuobj, 0, class);
687 nv_wo32(gpuobj, 20, 0x00010000);
691 nv_wo32(gpuobj, 0, 0x00001030);
692 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
695 if (dev_priv->card_type >= NV_40) {
696 nv_wo32(gpuobj, 0, class);
698 nv_wo32(gpuobj, 8, 0x01000000);
702 nv_wo32(gpuobj, 0, class | 0x00080000);
704 nv_wo32(gpuobj, 0, class);
709 dev_priv->engine.instmem.flush(dev);
711 gpuobj->engine = oc->engine;
712 gpuobj->class = oc->id;
715 ret = nouveau_ramht_insert(chan, handle, gpuobj);
717 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
718 nouveau_gpuobj_ref(NULL, &gpuobj);
723 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
725 struct drm_device *dev = chan->dev;
726 struct drm_nouveau_private *dev_priv = dev->dev_private;
731 NV_DEBUG(dev, "ch%d\n", chan->id);
733 /* Base amount for object storage (4KiB enough?) */
738 size += dev_priv->engine.graph.grctx_size;
740 if (dev_priv->card_type == NV_50) {
741 /* Various fixed table thingos */
742 size += 0x1400; /* mostly unknown stuff */
743 size += 0x4000; /* vm pd */
745 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
751 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
753 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
757 ret = drm_mm_init(&chan->ramin_heap, base, size);
759 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
760 nouveau_gpuobj_ref(NULL, &chan->ramin);
768 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
769 uint32_t vram_h, uint32_t tt_h)
771 struct drm_device *dev = chan->dev;
772 struct drm_nouveau_private *dev_priv = dev->dev_private;
773 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
774 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
777 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
779 /* Allocate a chunk of memory for per-channel object storage */
780 ret = nouveau_gpuobj_channel_init_pramin(chan);
782 NV_ERROR(dev, "init pramin\n");
787 * - Allocate per-channel page-directory
788 * - Map GART and VRAM into the channel's address space at the
789 * locations determined during init.
791 if (dev_priv->card_type >= NV_50) {
792 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
793 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
794 u32 vm_pinst = chan->ramin->pinst;
798 vm_pinst += pgd_offs;
800 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
804 for (i = 0; i < 0x4000; i += 8) {
805 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
806 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
809 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
811 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
812 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
813 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
815 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
816 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
817 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
818 &chan->vm_vram_pt[i]);
820 nv_wo32(chan->vm_pd, pde + 0,
821 chan->vm_vram_pt[i]->vinst | 0x61);
822 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
830 if (dev_priv->card_type < NV_50) {
831 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
833 struct nouveau_gpuobj *ramht = NULL;
835 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
836 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
840 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
841 nouveau_gpuobj_ref(NULL, &ramht);
847 if (dev_priv->card_type >= NV_50) {
848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
851 NV_MEM_TARGET_VM, &vram);
853 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
857 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
858 0, dev_priv->fb_available_size,
860 NV_MEM_TARGET_VRAM, &vram);
862 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
867 ret = nouveau_ramht_insert(chan, vram_h, vram);
868 nouveau_gpuobj_ref(NULL, &vram);
870 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
874 /* TT memory ctxdma */
875 if (dev_priv->card_type >= NV_50) {
876 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
879 NV_MEM_TARGET_VM, &tt);
881 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
882 0, dev_priv->gart_info.aper_size,
884 NV_MEM_TARGET_GART, &tt);
888 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
892 ret = nouveau_ramht_insert(chan, tt_h, tt);
893 nouveau_gpuobj_ref(NULL, &tt);
895 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
903 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
905 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
906 struct drm_device *dev = chan->dev;
909 NV_DEBUG(dev, "ch%d\n", chan->id);
914 nouveau_ramht_ref(NULL, &chan->ramht, chan);
916 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
917 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
918 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
919 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
921 if (chan->ramin_heap.free_stack.next)
922 drm_mm_takedown(&chan->ramin_heap);
923 nouveau_gpuobj_ref(NULL, &chan->ramin);
927 nouveau_gpuobj_suspend(struct drm_device *dev)
929 struct drm_nouveau_private *dev_priv = dev->dev_private;
930 struct nouveau_gpuobj *gpuobj;
933 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
934 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
937 gpuobj->suspend = vmalloc(gpuobj->size);
938 if (!gpuobj->suspend) {
939 nouveau_gpuobj_resume(dev);
943 for (i = 0; i < gpuobj->size; i += 4)
944 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
951 nouveau_gpuobj_resume(struct drm_device *dev)
953 struct drm_nouveau_private *dev_priv = dev->dev_private;
954 struct nouveau_gpuobj *gpuobj;
957 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
958 if (!gpuobj->suspend)
961 for (i = 0; i < gpuobj->size; i += 4)
962 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
964 vfree(gpuobj->suspend);
965 gpuobj->suspend = NULL;
968 dev_priv->engine.instmem.flush(dev);
971 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
972 struct drm_file *file_priv)
974 struct drm_nouveau_grobj_alloc *init = data;
975 struct nouveau_channel *chan;
978 if (init->handle == ~0)
981 chan = nouveau_channel_get(dev, file_priv, init->channel);
983 return PTR_ERR(chan);
985 if (nouveau_ramht_find(chan, init->handle)) {
990 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
992 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
993 ret, init->channel, init->handle);
997 nouveau_channel_put(&chan);
1001 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1002 struct drm_file *file_priv)
1004 struct drm_nouveau_gpuobj_free *objfree = data;
1005 struct nouveau_channel *chan;
1008 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
1010 return PTR_ERR(chan);
1012 /* Synchronize with the user channel */
1013 nouveau_channel_idle(chan);
1015 ret = nouveau_ramht_remove(chan, objfree->handle);
1016 nouveau_channel_put(&chan);
1021 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1023 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1024 struct drm_device *dev = gpuobj->dev;
1026 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1027 u64 ptr = gpuobj->vinst + offset;
1028 u32 base = ptr >> 16;
1031 spin_lock(&dev_priv->ramin_lock);
1032 if (dev_priv->ramin_base != base) {
1033 dev_priv->ramin_base = base;
1034 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1036 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1037 spin_unlock(&dev_priv->ramin_lock);
1041 return nv_ri32(dev, gpuobj->pinst + offset);
1045 nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1047 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1048 struct drm_device *dev = gpuobj->dev;
1050 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1051 u64 ptr = gpuobj->vinst + offset;
1052 u32 base = ptr >> 16;
1054 spin_lock(&dev_priv->ramin_lock);
1055 if (dev_priv->ramin_base != base) {
1056 dev_priv->ramin_base = base;
1057 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1059 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1060 spin_unlock(&dev_priv->ramin_lock);
1064 nv_wi32(dev, gpuobj->pinst + offset, val);