2 * Copyright (C) 2007 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include "nouveau_drv.h"
31 #include "nouveau_vm.h"
33 #define BAR1_VM_BASE 0x0020000000ULL
34 #define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
35 #define BAR3_VM_BASE 0x0000000000ULL
36 #define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
38 struct nv50_instmem_priv {
39 uint32_t save1700[5]; /* 0x1700->0x1710 */
41 struct nouveau_gpuobj *bar1_dmaobj;
42 struct nouveau_gpuobj *bar3_dmaobj;
46 nv50_channel_del(struct nouveau_channel **pchan)
48 struct nouveau_channel *chan;
55 nouveau_gpuobj_ref(NULL, &chan->ramfc);
56 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
57 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
58 if (drm_mm_initialized(&chan->ramin_heap))
59 drm_mm_takedown(&chan->ramin_heap);
60 nouveau_gpuobj_ref(NULL, &chan->ramin);
65 nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
66 struct nouveau_channel **pchan)
68 struct drm_nouveau_private *dev_priv = dev->dev_private;
69 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
70 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
71 struct nouveau_channel *chan;
74 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
79 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
81 nv50_channel_del(&chan);
85 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
87 nv50_channel_del(&chan);
91 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
92 chan->ramin->pinst + pgd,
93 chan->ramin->vinst + pgd,
94 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
97 nv50_channel_del(&chan);
101 for (i = 0; i < 0x4000; i += 8) {
102 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
103 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
106 ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
108 nv50_channel_del(&chan);
112 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
113 chan->ramin->pinst + fc,
114 chan->ramin->vinst + fc, 0x100,
115 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
117 nv50_channel_del(&chan);
126 nv50_instmem_init(struct drm_device *dev)
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129 struct nv50_instmem_priv *priv;
130 struct nouveau_channel *chan;
131 struct nouveau_vm *vm;
135 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
138 dev_priv->engine.instmem.priv = priv;
140 /* Save state, will restore at takedown. */
141 for (i = 0x1700; i <= 0x1710; i += 4)
142 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
144 /* Global PRAMIN heap */
145 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
147 NV_ERROR(dev, "Failed to init RAMIN heap\n");
152 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
157 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
158 0x1000, NVOBJ_FLAG_DONT_MAP |
159 NVOBJ_FLAG_ZERO_ALLOC,
160 &dev_priv->bar3_vm->pgt[0].obj[0]);
163 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
165 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
167 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
170 dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
172 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
173 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
174 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
179 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
180 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
181 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
183 dev_priv->engine.instmem.flush(dev);
184 dev_priv->ramin_available = true;
186 tmp = nv_ro32(chan->ramin, 0);
187 nv_wo32(chan->ramin, 0, ~tmp);
188 if (nv_ro32(chan->ramin, 0) != ~tmp) {
189 NV_ERROR(dev, "PRAMIN readback failed\n");
193 nv_wo32(chan->ramin, 0, tmp);
196 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
200 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
203 nouveau_vm_ref(NULL, &vm, NULL);
205 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
206 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
207 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
212 nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
213 for (i = 0; i < 8; i++)
214 nv_wr32(dev, 0x1900 + (i*4), 0);
216 /* Create shared channel VM, space is reserved at the beginning
217 * to catch "NULL pointer" references
219 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
227 nv50_instmem_takedown(dev);
232 nv50_instmem_takedown(struct drm_device *dev)
234 struct drm_nouveau_private *dev_priv = dev->dev_private;
235 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
236 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
244 dev_priv->ramin_available = false;
246 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
248 for (i = 0x1700; i <= 0x1710; i += 4)
249 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
251 nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
252 nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
254 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
255 dev_priv->channels.ptr[127] = 0;
256 nv50_channel_del(&dev_priv->channels.ptr[0]);
258 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
259 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
261 if (drm_mm_initialized(&dev_priv->ramin_heap))
262 drm_mm_takedown(&dev_priv->ramin_heap);
264 dev_priv->engine.instmem.priv = NULL;
269 nv50_instmem_suspend(struct drm_device *dev)
271 struct drm_nouveau_private *dev_priv = dev->dev_private;
273 dev_priv->ramin_available = false;
278 nv50_instmem_resume(struct drm_device *dev)
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
282 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
285 /* Poke the relevant regs, and pray it works :) */
286 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
287 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
288 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
289 NV50_PUNK_BAR_CFG_BASE_VALID);
290 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
291 NV50_PUNK_BAR1_CTXDMA_VALID);
292 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
293 NV50_PUNK_BAR3_CTXDMA_VALID);
295 for (i = 0; i < 8; i++)
296 nv_wr32(dev, 0x1900 + (i*4), 0);
298 dev_priv->ramin_available = true;
301 struct nv50_gpuobj_node {
302 struct nouveau_mem *vram;
303 struct nouveau_vma chan_vma;
308 nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
311 struct drm_device *dev = gpuobj->dev;
312 struct drm_nouveau_private *dev_priv = dev->dev_private;
313 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
314 struct nv50_gpuobj_node *node = NULL;
317 node = kzalloc(sizeof(*node), GFP_KERNEL);
322 size = (size + 4095) & ~4095;
323 align = max(align, (u32)4096);
325 ret = vram->get(dev, size, align, 0, 0, &node->vram);
331 gpuobj->vinst = node->vram->offset;
333 if (gpuobj->flags & NVOBJ_FLAG_VM) {
334 u32 flags = NV_MEM_ACCESS_RW;
335 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
336 flags |= NV_MEM_ACCESS_SYS;
338 ret = nouveau_vm_get(chan->vm, size, 12, flags,
341 vram->put(dev, &node->vram);
346 nouveau_vm_map(&node->chan_vma, node->vram);
347 gpuobj->linst = node->chan_vma.offset;
356 nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
358 struct drm_device *dev = gpuobj->dev;
359 struct drm_nouveau_private *dev_priv = dev->dev_private;
360 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
361 struct nv50_gpuobj_node *node;
366 if (node->chan_vma.node) {
367 nouveau_vm_unmap(&node->chan_vma);
368 nouveau_vm_put(&node->chan_vma);
370 vram->put(dev, &node->vram);
375 nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
377 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
378 struct nv50_gpuobj_node *node = gpuobj->node;
381 ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
382 NV_MEM_ACCESS_RW, &node->vram->bar_vma);
386 nouveau_vm_map(&node->vram->bar_vma, node->vram);
387 gpuobj->pinst = node->vram->bar_vma.offset;
392 nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
394 struct nv50_gpuobj_node *node = gpuobj->node;
396 if (node->vram->bar_vma.node) {
397 nouveau_vm_unmap(&node->vram->bar_vma);
398 nouveau_vm_put(&node->vram->bar_vma);
403 nv50_instmem_flush(struct drm_device *dev)
405 struct drm_nouveau_private *dev_priv = dev->dev_private;
408 spin_lock_irqsave(&dev_priv->vm_lock, flags);
409 nv_wr32(dev, 0x00330c, 0x00000001);
410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
411 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
416 nv84_instmem_flush(struct drm_device *dev)
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
421 spin_lock_irqsave(&dev_priv->vm_lock, flags);
422 nv_wr32(dev, 0x070000, 0x00000001);
423 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
424 NV_ERROR(dev, "PRAMIN flush timeout\n");
425 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);