UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nve0_fifo.c
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include <drm/drmP.h>
26
27 #include "nouveau_drv.h"
28 #include "nouveau_mm.h"
29 #include "nouveau_fifo.h"
30
31 #define NVE0_FIFO_ENGINE_NUM 32
32
33 static void nve0_fifo_isr(struct drm_device *);
34
35 struct nve0_fifo_engine {
36         struct nouveau_gpuobj *playlist[2];
37         int cur_playlist;
38 };
39
40 struct nve0_fifo_priv {
41         struct nouveau_fifo_priv base;
42         struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43         struct {
44                 struct nouveau_gpuobj *mem;
45                 struct nouveau_vma bar;
46         } user;
47         int spoon_nr;
48 };
49
50 struct nve0_fifo_chan {
51         struct nouveau_fifo_chan base;
52         u32 engine;
53 };
54
55 static void
56 nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57 {
58         struct drm_nouveau_private *dev_priv = dev->dev_private;
59         struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60         struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61         struct nve0_fifo_engine *peng = &priv->engine[engine];
62         struct nouveau_gpuobj *cur;
63         u32 match = (engine << 16) | 0x00000001;
64         int ret, i, p;
65
66         cur = peng->playlist[peng->cur_playlist];
67         if (unlikely(cur == NULL)) {
68                 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69                 if (ret) {
70                         NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71                         return;
72                 }
73
74                 peng->playlist[peng->cur_playlist] = cur;
75         }
76
77         peng->cur_playlist = !peng->cur_playlist;
78
79         for (i = 0, p = 0; i < priv->base.channels; i++) {
80                 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81                 if (ctrl != match)
82                         continue;
83                 nv_wo32(cur, p + 0, i);
84                 nv_wo32(cur, p + 4, 0x00000000);
85                 p += 8;
86         }
87         pinstmem->flush(dev);
88
89         nv_wr32(dev, 0x002270, cur->vinst >> 12);
90         nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91         if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92                 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93 }
94
95 static int
96 nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97 {
98         struct drm_device *dev = chan->dev;
99         struct drm_nouveau_private *dev_priv = dev->dev_private;
100         struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101         struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102         struct nve0_fifo_chan *fctx;
103         u64 usermem = priv->user.mem->vinst + chan->id * 512;
104         u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105         int ret = 0, i;
106
107         fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108         if (!fctx)
109                 return -ENOMEM;
110
111         fctx->engine = 0; /* PGRAPH */
112
113         /* allocate vram for control regs, map into polling area */
114         chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115                                 priv->user.bar.offset + (chan->id * 512), 512);
116         if (!chan->user) {
117                 ret = -ENOMEM;
118                 goto error;
119         }
120
121         for (i = 0; i < 0x100; i += 4)
122                 nv_wo32(chan->ramin, i, 0x00000000);
123         nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124         nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125         nv_wo32(chan->ramin, 0x10, 0x0000face);
126         nv_wo32(chan->ramin, 0x30, 0xfffff902);
127         nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128         nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
129                                      upper_32_bits(ib_virt));
130         nv_wo32(chan->ramin, 0x84, 0x20400000);
131         nv_wo32(chan->ramin, 0x94, 0x30000001);
132         nv_wo32(chan->ramin, 0x9c, 0x00000100);
133         nv_wo32(chan->ramin, 0xac, 0x0000001f);
134         nv_wo32(chan->ramin, 0xe4, 0x00000000);
135         nv_wo32(chan->ramin, 0xe8, chan->id);
136         nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137         nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138         pinstmem->flush(dev);
139
140         nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141                                                 (chan->ramin->vinst >> 12));
142         nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143         nve0_fifo_playlist_update(dev, fctx->engine);
144         nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
145
146 error:
147         if (ret)
148                 priv->base.base.context_del(chan, engine);
149         return ret;
150 }
151
152 static void
153 nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
154 {
155         struct nve0_fifo_chan *fctx = chan->engctx[engine];
156         struct drm_device *dev = chan->dev;
157
158         nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159         nv_wr32(dev, 0x002634, chan->id);
160         if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161                 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
162         nve0_fifo_playlist_update(dev, fctx->engine);
163         nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165         if (chan->user) {
166                 iounmap(chan->user);
167                 chan->user = NULL;
168         }
169
170         chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171         kfree(fctx);
172 }
173
174 static int
175 nve0_fifo_init(struct drm_device *dev, int engine)
176 {
177         struct drm_nouveau_private *dev_priv = dev->dev_private;
178         struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179         struct nve0_fifo_chan *fctx;
180         int i;
181
182         /* reset PFIFO, enable all available PSUBFIFO areas */
183         nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184         nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185         nv_wr32(dev, 0x000204, 0xffffffff);
186
187         priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188         NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190         /* PSUBFIFO[n] */
191         for (i = 0; i < priv->spoon_nr; i++) {
192                 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193                 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194                 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195         }
196
197         nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199         nv_wr32(dev, 0x002a00, 0xffffffff);
200         nv_wr32(dev, 0x002100, 0xffffffff);
201         nv_wr32(dev, 0x002140, 0xbfffffff);
202
203         /* restore PFIFO context table */
204         for (i = 0; i < priv->base.channels; i++) {
205                 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206                 if (!chan || !(fctx = chan->engctx[engine]))
207                         continue;
208
209                 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210                                                  (chan->ramin->vinst >> 12));
211                 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212                 nve0_fifo_playlist_update(dev, fctx->engine);
213                 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214         }
215
216         return 0;
217 }
218
219 static int
220 nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221 {
222         struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223         int i;
224
225         for (i = 0; i < priv->base.channels; i++) {
226                 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227                         continue;
228
229                 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230                 nv_wr32(dev, 0x002634, i);
231                 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232                         NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233                                 i, nv_rd32(dev, 0x002634));
234                         return -EBUSY;
235                 }
236         }
237
238         nv_wr32(dev, 0x002140, 0x00000000);
239         return 0;
240 }
241
242 struct nouveau_enum nve0_fifo_fault_unit[] = {
243         {}
244 };
245
246 struct nouveau_enum nve0_fifo_fault_reason[] = {
247         { 0x00, "PT_NOT_PRESENT" },
248         { 0x01, "PT_TOO_SHORT" },
249         { 0x02, "PAGE_NOT_PRESENT" },
250         { 0x03, "VM_LIMIT_EXCEEDED" },
251         { 0x04, "NO_CHANNEL" },
252         { 0x05, "PAGE_SYSTEM_ONLY" },
253         { 0x06, "PAGE_READ_ONLY" },
254         { 0x0a, "COMPRESSED_SYSRAM" },
255         { 0x0c, "INVALID_STORAGE_TYPE" },
256         {}
257 };
258
259 struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260         {}
261 };
262
263 struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264         {}
265 };
266
267 struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268         { 0x00200000, "ILLEGAL_MTHD" },
269         { 0x00800000, "EMPTY_SUBC" },
270         {}
271 };
272
273 static void
274 nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275 {
276         u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277         u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278         u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279         u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280         u32 client = (stat & 0x00001f00) >> 8;
281
282         NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283                 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284         nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285         printk("] from ");
286         nouveau_enum_print(nve0_fifo_fault_unit, unit);
287         if (stat & 0x00000040) {
288                 printk("/");
289                 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290         } else {
291                 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292                 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293         }
294         printk(" on channel 0x%010llx\n", (u64)inst << 12);
295 }
296
297 static int
298 nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
299 {
300         struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
301         struct drm_nouveau_private *dev_priv = dev->dev_private;
302         struct nouveau_channel *chan = NULL;
303         unsigned long flags;
304         int ret = -EINVAL;
305
306         spin_lock_irqsave(&dev_priv->channels.lock, flags);
307         if (likely(chid >= 0 && chid < priv->base.channels)) {
308                 chan = dev_priv->channels.ptr[chid];
309                 if (likely(chan))
310                         ret = nouveau_finish_page_flip(chan, NULL);
311         }
312         spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
313         return ret;
314 }
315
316 static void
317 nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
318 {
319         u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
320         u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
321         u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
322         u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
323         u32 subc = (addr & 0x00070000);
324         u32 mthd = (addr & 0x00003ffc);
325         u32 show = stat;
326
327         if (stat & 0x00200000) {
328                 if (mthd == 0x0054) {
329                         if (!nve0_fifo_page_flip(dev, chid))
330                                 show &= ~0x00200000;
331                 }
332         }
333
334         if (show) {
335                 NV_INFO(dev, "PFIFO%d:", unit);
336                 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
337                 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
338                         unit, chid, subc, mthd, data);
339         }
340
341         nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
342         nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
343 }
344
345 static void
346 nve0_fifo_isr(struct drm_device *dev)
347 {
348         u32 mask = nv_rd32(dev, 0x002140);
349         u32 stat = nv_rd32(dev, 0x002100) & mask;
350
351         if (stat & 0x00000100) {
352                 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
353                 nv_wr32(dev, 0x002100, 0x00000100);
354                 stat &= ~0x00000100;
355         }
356
357         if (stat & 0x10000000) {
358                 u32 units = nv_rd32(dev, 0x00259c);
359                 u32 u = units;
360
361                 while (u) {
362                         int i = ffs(u) - 1;
363                         nve0_fifo_isr_vm_fault(dev, i);
364                         u &= ~(1 << i);
365                 }
366
367                 nv_wr32(dev, 0x00259c, units);
368                 stat &= ~0x10000000;
369         }
370
371         if (stat & 0x20000000) {
372                 u32 units = nv_rd32(dev, 0x0025a0);
373                 u32 u = units;
374
375                 while (u) {
376                         int i = ffs(u) - 1;
377                         nve0_fifo_isr_subfifo_intr(dev, i);
378                         u &= ~(1 << i);
379                 }
380
381                 nv_wr32(dev, 0x0025a0, units);
382                 stat &= ~0x20000000;
383         }
384
385         if (stat & 0x40000000) {
386                 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
387                 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
388                 stat &= ~0x40000000;
389         }
390
391         if (stat) {
392                 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
393                 nv_wr32(dev, 0x002100, stat);
394                 nv_wr32(dev, 0x002140, 0);
395         }
396 }
397
398 static void
399 nve0_fifo_destroy(struct drm_device *dev, int engine)
400 {
401         struct drm_nouveau_private *dev_priv = dev->dev_private;
402         struct nve0_fifo_priv *priv = nv_engine(dev, engine);
403         int i;
404
405         nouveau_vm_put(&priv->user.bar);
406         nouveau_gpuobj_ref(NULL, &priv->user.mem);
407
408         for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
409                 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
410                 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
411         }
412
413         dev_priv->eng[engine] = NULL;
414         kfree(priv);
415 }
416
417 int
418 nve0_fifo_create(struct drm_device *dev)
419 {
420         struct drm_nouveau_private *dev_priv = dev->dev_private;
421         struct nve0_fifo_priv *priv;
422         int ret;
423
424         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
425         if (!priv)
426                 return -ENOMEM;
427
428         priv->base.base.destroy = nve0_fifo_destroy;
429         priv->base.base.init = nve0_fifo_init;
430         priv->base.base.fini = nve0_fifo_fini;
431         priv->base.base.context_new = nve0_fifo_context_new;
432         priv->base.base.context_del = nve0_fifo_context_del;
433         priv->base.channels = 4096;
434         dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
435
436         ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
437                                  NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
438         if (ret)
439                 goto error;
440
441         ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
442                              12, NV_MEM_ACCESS_RW, &priv->user.bar);
443         if (ret)
444                 goto error;
445
446         nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
447
448         nouveau_irq_register(dev, 8, nve0_fifo_isr);
449 error:
450         if (ret)
451                 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
452         return ret;
453 }