UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_gpuobj.c
1 /*
2  * Copyright (C) 2006 Ben Skeggs.
3  *
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial
16  * portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  */
27
28 /*
29  * Authors:
30  *   Ben Skeggs <darktama@iinet.net.au>
31  */
32
33 #include <drm/drmP.h>
34 #include "nouveau_drv.h"
35 #include <drm/nouveau_drm.h>
36 #include "nouveau_fifo.h"
37 #include "nouveau_ramht.h"
38 #include "nouveau_software.h"
39 #include "nouveau_vm.h"
40
41 struct nouveau_gpuobj_method {
42         struct list_head head;
43         u32 mthd;
44         int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
45 };
46
47 struct nouveau_gpuobj_class {
48         struct list_head head;
49         struct list_head methods;
50         u32 id;
51         u32 engine;
52 };
53
54 int
55 nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
56 {
57         struct drm_nouveau_private *dev_priv = dev->dev_private;
58         struct nouveau_gpuobj_class *oc;
59
60         oc = kzalloc(sizeof(*oc), GFP_KERNEL);
61         if (!oc)
62                 return -ENOMEM;
63
64         INIT_LIST_HEAD(&oc->methods);
65         oc->id = class;
66         oc->engine = engine;
67         list_add(&oc->head, &dev_priv->classes);
68         return 0;
69 }
70
71 int
72 nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
73                         int (*exec)(struct nouveau_channel *, u32, u32, u32))
74 {
75         struct drm_nouveau_private *dev_priv = dev->dev_private;
76         struct nouveau_gpuobj_method *om;
77         struct nouveau_gpuobj_class *oc;
78
79         list_for_each_entry(oc, &dev_priv->classes, head) {
80                 if (oc->id == class)
81                         goto found;
82         }
83
84         return -EINVAL;
85
86 found:
87         om = kzalloc(sizeof(*om), GFP_KERNEL);
88         if (!om)
89                 return -ENOMEM;
90
91         om->mthd = mthd;
92         om->exec = exec;
93         list_add(&om->head, &oc->methods);
94         return 0;
95 }
96
97 int
98 nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
99                          u32 class, u32 mthd, u32 data)
100 {
101         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
102         struct nouveau_gpuobj_method *om;
103         struct nouveau_gpuobj_class *oc;
104
105         list_for_each_entry(oc, &dev_priv->classes, head) {
106                 if (oc->id != class)
107                         continue;
108
109                 list_for_each_entry(om, &oc->methods, head) {
110                         if (om->mthd == mthd)
111                                 return om->exec(chan, class, mthd, data);
112                 }
113         }
114
115         return -ENOENT;
116 }
117
118 int
119 nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120                           u32 class, u32 mthd, u32 data)
121 {
122         struct drm_nouveau_private *dev_priv = dev->dev_private;
123         struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
124         struct nouveau_channel *chan = NULL;
125         unsigned long flags;
126         int ret = -EINVAL;
127
128         spin_lock_irqsave(&dev_priv->channels.lock, flags);
129         if (chid >= 0 && chid < pfifo->channels)
130                 chan = dev_priv->channels.ptr[chid];
131         if (chan)
132                 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
133         spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
134         return ret;
135 }
136
137 int
138 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
139                    uint32_t size, int align, uint32_t flags,
140                    struct nouveau_gpuobj **gpuobj_ret)
141 {
142         struct drm_nouveau_private *dev_priv = dev->dev_private;
143         struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
144         struct nouveau_gpuobj *gpuobj;
145         struct drm_mm_node *ramin = NULL;
146         int ret, i;
147
148         NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
149                  chan ? chan->id : -1, size, align, flags);
150
151         gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
152         if (!gpuobj)
153                 return -ENOMEM;
154         NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
155         gpuobj->dev = dev;
156         gpuobj->flags = flags;
157         kref_init(&gpuobj->refcount);
158         gpuobj->size = size;
159
160         spin_lock(&dev_priv->ramin_lock);
161         list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
162         spin_unlock(&dev_priv->ramin_lock);
163
164         if (!(flags & NVOBJ_FLAG_VM) && chan) {
165                 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
166                 if (ramin)
167                         ramin = drm_mm_get_block(ramin, size, align);
168                 if (!ramin) {
169                         nouveau_gpuobj_ref(NULL, &gpuobj);
170                         return -ENOMEM;
171                 }
172
173                 gpuobj->pinst = chan->ramin->pinst;
174                 if (gpuobj->pinst != ~0)
175                         gpuobj->pinst += ramin->start;
176
177                 gpuobj->cinst = ramin->start;
178                 gpuobj->vinst = ramin->start + chan->ramin->vinst;
179                 gpuobj->node  = ramin;
180         } else {
181                 ret = instmem->get(gpuobj, chan, size, align);
182                 if (ret) {
183                         nouveau_gpuobj_ref(NULL, &gpuobj);
184                         return ret;
185                 }
186
187                 ret = -ENOSYS;
188                 if (!(flags & NVOBJ_FLAG_DONT_MAP))
189                         ret = instmem->map(gpuobj);
190                 if (ret)
191                         gpuobj->pinst = ~0;
192
193                 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
194         }
195
196         if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
197                 for (i = 0; i < gpuobj->size; i += 4)
198                         nv_wo32(gpuobj, i, 0);
199                 instmem->flush(dev);
200         }
201
202
203         *gpuobj_ret = gpuobj;
204         return 0;
205 }
206
207 int
208 nouveau_gpuobj_init(struct drm_device *dev)
209 {
210         struct drm_nouveau_private *dev_priv = dev->dev_private;
211
212         NV_DEBUG(dev, "\n");
213
214         INIT_LIST_HEAD(&dev_priv->gpuobj_list);
215         INIT_LIST_HEAD(&dev_priv->classes);
216         spin_lock_init(&dev_priv->ramin_lock);
217         dev_priv->ramin_base = ~0;
218
219         return 0;
220 }
221
222 void
223 nouveau_gpuobj_takedown(struct drm_device *dev)
224 {
225         struct drm_nouveau_private *dev_priv = dev->dev_private;
226         struct nouveau_gpuobj_method *om, *tm;
227         struct nouveau_gpuobj_class *oc, *tc;
228
229         NV_DEBUG(dev, "\n");
230
231         list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
232                 list_for_each_entry_safe(om, tm, &oc->methods, head) {
233                         list_del(&om->head);
234                         kfree(om);
235                 }
236                 list_del(&oc->head);
237                 kfree(oc);
238         }
239
240         WARN_ON(!list_empty(&dev_priv->gpuobj_list));
241 }
242
243
244 static void
245 nouveau_gpuobj_del(struct kref *ref)
246 {
247         struct nouveau_gpuobj *gpuobj =
248                 container_of(ref, struct nouveau_gpuobj, refcount);
249         struct drm_device *dev = gpuobj->dev;
250         struct drm_nouveau_private *dev_priv = dev->dev_private;
251         struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
252         int i;
253
254         NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
255
256         if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
257                 for (i = 0; i < gpuobj->size; i += 4)
258                         nv_wo32(gpuobj, i, 0);
259                 instmem->flush(dev);
260         }
261
262         if (gpuobj->dtor)
263                 gpuobj->dtor(dev, gpuobj);
264
265         if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
266                 if (gpuobj->node) {
267                         instmem->unmap(gpuobj);
268                         instmem->put(gpuobj);
269                 }
270         } else {
271                 if (gpuobj->node) {
272                         spin_lock(&dev_priv->ramin_lock);
273                         drm_mm_put_block(gpuobj->node);
274                         spin_unlock(&dev_priv->ramin_lock);
275                 }
276         }
277
278         spin_lock(&dev_priv->ramin_lock);
279         list_del(&gpuobj->list);
280         spin_unlock(&dev_priv->ramin_lock);
281
282         kfree(gpuobj);
283 }
284
285 void
286 nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
287 {
288         if (ref)
289                 kref_get(&ref->refcount);
290
291         if (*ptr)
292                 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
293
294         *ptr = ref;
295 }
296
297 int
298 nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
299                         u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
300 {
301         struct drm_nouveau_private *dev_priv = dev->dev_private;
302         struct nouveau_gpuobj *gpuobj = NULL;
303         int i;
304
305         NV_DEBUG(dev,
306                  "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
307                  pinst, vinst, size, flags);
308
309         gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
310         if (!gpuobj)
311                 return -ENOMEM;
312         NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
313         gpuobj->dev = dev;
314         gpuobj->flags = flags;
315         kref_init(&gpuobj->refcount);
316         gpuobj->size  = size;
317         gpuobj->pinst = pinst;
318         gpuobj->cinst = NVOBJ_CINST_GLOBAL;
319         gpuobj->vinst = vinst;
320
321         if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
322                 for (i = 0; i < gpuobj->size; i += 4)
323                         nv_wo32(gpuobj, i, 0);
324                 dev_priv->engine.instmem.flush(dev);
325         }
326
327         spin_lock(&dev_priv->ramin_lock);
328         list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
329         spin_unlock(&dev_priv->ramin_lock);
330         *pgpuobj = gpuobj;
331         return 0;
332 }
333
334 void
335 nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
336                      u64 base, u64 size, int target, int access,
337                      u32 type, u32 comp)
338 {
339         struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
340         struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
341         u32 flags0;
342
343         flags0  = (comp << 29) | (type << 22) | class;
344         flags0 |= 0x00100000;
345
346         switch (access) {
347         case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
348         case NV_MEM_ACCESS_RW:
349         case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
350         default:
351                 break;
352         }
353
354         switch (target) {
355         case NV_MEM_TARGET_VRAM:
356                 flags0 |= 0x00010000;
357                 break;
358         case NV_MEM_TARGET_PCI:
359                 flags0 |= 0x00020000;
360                 break;
361         case NV_MEM_TARGET_PCI_NOSNOOP:
362                 flags0 |= 0x00030000;
363                 break;
364         case NV_MEM_TARGET_GART:
365                 base += dev_priv->gart_info.aper_base;
366         default:
367                 flags0 &= ~0x00100000;
368                 break;
369         }
370
371         /* convert to base + limit */
372         size = (base + size) - 1;
373
374         nv_wo32(obj, offset + 0x00, flags0);
375         nv_wo32(obj, offset + 0x04, lower_32_bits(size));
376         nv_wo32(obj, offset + 0x08, lower_32_bits(base));
377         nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
378                                     upper_32_bits(base));
379         nv_wo32(obj, offset + 0x10, 0x00000000);
380         nv_wo32(obj, offset + 0x14, 0x00000000);
381
382         pinstmem->flush(obj->dev);
383 }
384
385 int
386 nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
387                     int target, int access, u32 type, u32 comp,
388                     struct nouveau_gpuobj **pobj)
389 {
390         struct drm_device *dev = chan->dev;
391         int ret;
392
393         ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
394         if (ret)
395                 return ret;
396
397         nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
398                              access, type, comp);
399         return 0;
400 }
401
402 int
403 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
404                        u64 size, int access, int target,
405                        struct nouveau_gpuobj **pobj)
406 {
407         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
408         struct drm_device *dev = chan->dev;
409         struct nouveau_gpuobj *obj;
410         u32 flags0, flags2;
411         int ret;
412
413         if (dev_priv->card_type >= NV_50) {
414                 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
415                 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
416
417                 return nv50_gpuobj_dma_new(chan, class, base, size,
418                                            target, access, type, comp, pobj);
419         }
420
421         if (target == NV_MEM_TARGET_GART) {
422                 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
423
424                 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
425                         if (base == 0) {
426                                 nouveau_gpuobj_ref(gart, pobj);
427                                 return 0;
428                         }
429
430                         base   = nouveau_sgdma_get_physical(dev, base);
431                         target = NV_MEM_TARGET_PCI;
432                 } else {
433                         base += dev_priv->gart_info.aper_base;
434                         if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
435                                 target = NV_MEM_TARGET_PCI_NOSNOOP;
436                         else
437                                 target = NV_MEM_TARGET_PCI;
438                 }
439         }
440
441         flags0  = class;
442         flags0 |= 0x00003000; /* PT present, PT linear */
443         flags2  = 0;
444
445         switch (target) {
446         case NV_MEM_TARGET_PCI:
447                 flags0 |= 0x00020000;
448                 break;
449         case NV_MEM_TARGET_PCI_NOSNOOP:
450                 flags0 |= 0x00030000;
451                 break;
452         default:
453                 break;
454         }
455
456         switch (access) {
457         case NV_MEM_ACCESS_RO:
458                 flags0 |= 0x00004000;
459                 break;
460         case NV_MEM_ACCESS_WO:
461                 flags0 |= 0x00008000;
462         default:
463                 flags2 |= 0x00000002;
464                 break;
465         }
466
467         flags0 |= (base & 0x00000fff) << 20;
468         flags2 |= (base & 0xfffff000);
469
470         ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
471         if (ret)
472                 return ret;
473
474         nv_wo32(obj, 0x00, flags0);
475         nv_wo32(obj, 0x04, size - 1);
476         nv_wo32(obj, 0x08, flags2);
477         nv_wo32(obj, 0x0c, flags2);
478
479         obj->engine = NVOBJ_ENGINE_SW;
480         obj->class  = class;
481         *pobj = obj;
482         return 0;
483 }
484
485 int
486 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
487 {
488         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
489         struct drm_device *dev = chan->dev;
490         struct nouveau_gpuobj_class *oc;
491         int ret;
492
493         NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
494
495         list_for_each_entry(oc, &dev_priv->classes, head) {
496                 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
497
498                 if (oc->id != class)
499                         continue;
500
501                 if (!chan->engctx[oc->engine]) {
502                         ret = eng->context_new(chan, oc->engine);
503                         if (ret)
504                                 return ret;
505                 }
506
507                 return eng->object_new(chan, oc->engine, handle, class);
508         }
509
510         return -EINVAL;
511 }
512
513 static int
514 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
515 {
516         struct drm_device *dev = chan->dev;
517         struct drm_nouveau_private *dev_priv = dev->dev_private;
518         uint32_t size;
519         uint32_t base;
520         int ret;
521
522         NV_DEBUG(dev, "ch%d\n", chan->id);
523
524         /* Base amount for object storage (4KiB enough?) */
525         size = 0x2000;
526         base = 0;
527
528         if (dev_priv->card_type == NV_50) {
529                 /* Various fixed table thingos */
530                 size += 0x1400; /* mostly unknown stuff */
531                 size += 0x4000; /* vm pd */
532                 base  = 0x6000;
533                 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
534                 size += 0x8000;
535                 /* RAMFC */
536                 size += 0x1000;
537         }
538
539         ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
540         if (ret) {
541                 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
542                 return ret;
543         }
544
545         ret = drm_mm_init(&chan->ramin_heap, base, size - base);
546         if (ret) {
547                 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
548                 nouveau_gpuobj_ref(NULL, &chan->ramin);
549                 return ret;
550         }
551
552         return 0;
553 }
554
555 static int
556 nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
557 {
558         struct drm_device *dev = chan->dev;
559         struct nouveau_gpuobj *pgd = NULL;
560         struct nouveau_vm_pgd *vpgd;
561         int ret;
562
563         ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
564         if (ret)
565                 return ret;
566
567         /* create page directory for this vm if none currently exists,
568          * will be destroyed automagically when last reference to the
569          * vm is removed
570          */
571         if (list_empty(&vm->pgd_list)) {
572                 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
573                 if (ret)
574                         return ret;
575         }
576         nouveau_vm_ref(vm, &chan->vm, pgd);
577         nouveau_gpuobj_ref(NULL, &pgd);
578
579         /* point channel at vm's page directory */
580         vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
581         nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
582         nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
583         nv_wo32(chan->ramin, 0x0208, 0xffffffff);
584         nv_wo32(chan->ramin, 0x020c, 0x000000ff);
585
586         return 0;
587 }
588
589 int
590 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
591                             uint32_t vram_h, uint32_t tt_h)
592 {
593         struct drm_device *dev = chan->dev;
594         struct drm_nouveau_private *dev_priv = dev->dev_private;
595         struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
596         struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
597         struct nouveau_gpuobj *vram = NULL, *tt = NULL;
598         int ret;
599
600         NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
601         if (dev_priv->card_type >= NV_C0)
602                 return nvc0_gpuobj_channel_init(chan, vm);
603
604         /* Allocate a chunk of memory for per-channel object storage */
605         ret = nouveau_gpuobj_channel_init_pramin(chan);
606         if (ret) {
607                 NV_ERROR(dev, "init pramin\n");
608                 return ret;
609         }
610
611         /* NV50 VM
612          *  - Allocate per-channel page-directory
613          *  - Link with shared channel VM
614          */
615         if (vm) {
616                 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
617                 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
618                 u32 vm_pinst = chan->ramin->pinst;
619
620                 if (vm_pinst != ~0)
621                         vm_pinst += pgd_offs;
622
623                 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
624                                               0, &chan->vm_pd);
625                 if (ret)
626                         return ret;
627
628                 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
629         }
630
631         /* RAMHT */
632         if (dev_priv->card_type < NV_50) {
633                 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
634         } else {
635                 struct nouveau_gpuobj *ramht = NULL;
636
637                 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
638                                          NVOBJ_FLAG_ZERO_ALLOC, &ramht);
639                 if (ret)
640                         return ret;
641
642                 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
643                 nouveau_gpuobj_ref(NULL, &ramht);
644                 if (ret)
645                         return ret;
646         }
647
648         /* VRAM ctxdma */
649         if (dev_priv->card_type >= NV_50) {
650                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
651                                              0, (1ULL << 40), NV_MEM_ACCESS_RW,
652                                              NV_MEM_TARGET_VM, &vram);
653                 if (ret) {
654                         NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
655                         return ret;
656                 }
657         } else {
658                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
659                                              0, dev_priv->fb_available_size,
660                                              NV_MEM_ACCESS_RW,
661                                              NV_MEM_TARGET_VRAM, &vram);
662                 if (ret) {
663                         NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
664                         return ret;
665                 }
666         }
667
668         ret = nouveau_ramht_insert(chan, vram_h, vram);
669         nouveau_gpuobj_ref(NULL, &vram);
670         if (ret) {
671                 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
672                 return ret;
673         }
674
675         /* TT memory ctxdma */
676         if (dev_priv->card_type >= NV_50) {
677                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
678                                              0, (1ULL << 40), NV_MEM_ACCESS_RW,
679                                              NV_MEM_TARGET_VM, &tt);
680         } else {
681                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
682                                              0, dev_priv->gart_info.aper_size,
683                                              NV_MEM_ACCESS_RW,
684                                              NV_MEM_TARGET_GART, &tt);
685         }
686
687         if (ret) {
688                 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
689                 return ret;
690         }
691
692         ret = nouveau_ramht_insert(chan, tt_h, tt);
693         nouveau_gpuobj_ref(NULL, &tt);
694         if (ret) {
695                 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
696                 return ret;
697         }
698
699         return 0;
700 }
701
702 void
703 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
704 {
705         NV_DEBUG(chan->dev, "ch%d\n", chan->id);
706
707         nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
708         nouveau_gpuobj_ref(NULL, &chan->vm_pd);
709
710         if (drm_mm_initialized(&chan->ramin_heap))
711                 drm_mm_takedown(&chan->ramin_heap);
712         nouveau_gpuobj_ref(NULL, &chan->ramin);
713 }
714
715 int
716 nouveau_gpuobj_suspend(struct drm_device *dev)
717 {
718         struct drm_nouveau_private *dev_priv = dev->dev_private;
719         struct nouveau_gpuobj *gpuobj;
720         int i;
721
722         list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
723                 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
724                         continue;
725
726                 gpuobj->suspend = vmalloc(gpuobj->size);
727                 if (!gpuobj->suspend) {
728                         nouveau_gpuobj_resume(dev);
729                         return -ENOMEM;
730                 }
731
732                 for (i = 0; i < gpuobj->size; i += 4)
733                         gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
734         }
735
736         return 0;
737 }
738
739 void
740 nouveau_gpuobj_resume(struct drm_device *dev)
741 {
742         struct drm_nouveau_private *dev_priv = dev->dev_private;
743         struct nouveau_gpuobj *gpuobj;
744         int i;
745
746         list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
747                 if (!gpuobj->suspend)
748                         continue;
749
750                 for (i = 0; i < gpuobj->size; i += 4)
751                         nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
752
753                 vfree(gpuobj->suspend);
754                 gpuobj->suspend = NULL;
755         }
756
757         dev_priv->engine.instmem.flush(dev);
758 }
759
760 u32
761 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
762 {
763         struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
764         struct drm_device *dev = gpuobj->dev;
765         unsigned long flags;
766
767         if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
768                 u64  ptr = gpuobj->vinst + offset;
769                 u32 base = ptr >> 16;
770                 u32  val;
771
772                 spin_lock_irqsave(&dev_priv->vm_lock, flags);
773                 if (dev_priv->ramin_base != base) {
774                         dev_priv->ramin_base = base;
775                         nv_wr32(dev, 0x001700, dev_priv->ramin_base);
776                 }
777                 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
778                 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
779                 return val;
780         }
781
782         return nv_ri32(dev, gpuobj->pinst + offset);
783 }
784
785 void
786 nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
787 {
788         struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
789         struct drm_device *dev = gpuobj->dev;
790         unsigned long flags;
791
792         if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
793                 u64  ptr = gpuobj->vinst + offset;
794                 u32 base = ptr >> 16;
795
796                 spin_lock_irqsave(&dev_priv->vm_lock, flags);
797                 if (dev_priv->ramin_base != base) {
798                         dev_priv->ramin_base = base;
799                         nv_wr32(dev, 0x001700, dev_priv->ramin_base);
800                 }
801                 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
802                 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
803                 return;
804         }
805
806         nv_wi32(dev, gpuobj->pinst + offset, val);
807 }