drm/nouveau: make fifo.create_context() responsible for mapping control regs
[linux-block.git] / drivers / gpu / drm / nouveau / nv50_fifo.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
31
32 static void
33 nv50_fifo_playlist_update(struct drm_device *dev)
34 {
35         struct drm_nouveau_private *dev_priv = dev->dev_private;
36         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
37         struct nouveau_gpuobj *cur;
38         int i, nr;
39
40         NV_DEBUG(dev, "\n");
41
42         cur = pfifo->playlist[pfifo->cur_playlist];
43         pfifo->cur_playlist = !pfifo->cur_playlist;
44
45         /* We never schedule channel 0 or 127 */
46         for (i = 1, nr = 0; i < 127; i++) {
47                 if (dev_priv->channels.ptr[i] &&
48                     dev_priv->channels.ptr[i]->ramfc) {
49                         nv_wo32(cur, (nr * 4), i);
50                         nr++;
51                 }
52         }
53         dev_priv->engine.instmem.flush(dev);
54
55         nv_wr32(dev, 0x32f4, cur->vinst >> 12);
56         nv_wr32(dev, 0x32ec, nr);
57         nv_wr32(dev, 0x2500, 0x101);
58 }
59
60 static void
61 nv50_fifo_channel_enable(struct drm_device *dev, int channel)
62 {
63         struct drm_nouveau_private *dev_priv = dev->dev_private;
64         struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
65         uint32_t inst;
66
67         NV_DEBUG(dev, "ch%d\n", channel);
68
69         if (dev_priv->chipset == 0x50)
70                 inst = chan->ramfc->vinst >> 12;
71         else
72                 inst = chan->ramfc->vinst >> 8;
73
74         nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
75                      NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
76 }
77
78 static void
79 nv50_fifo_channel_disable(struct drm_device *dev, int channel)
80 {
81         struct drm_nouveau_private *dev_priv = dev->dev_private;
82         uint32_t inst;
83
84         NV_DEBUG(dev, "ch%d\n", channel);
85
86         if (dev_priv->chipset == 0x50)
87                 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
88         else
89                 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
90         nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
91 }
92
93 static void
94 nv50_fifo_init_reset(struct drm_device *dev)
95 {
96         uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
97
98         NV_DEBUG(dev, "\n");
99
100         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
101         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
102 }
103
104 static void
105 nv50_fifo_init_intr(struct drm_device *dev)
106 {
107         NV_DEBUG(dev, "\n");
108
109         nouveau_irq_register(dev, 8, nv04_fifo_isr);
110         nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
111         nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
112 }
113
114 static void
115 nv50_fifo_init_context_table(struct drm_device *dev)
116 {
117         struct drm_nouveau_private *dev_priv = dev->dev_private;
118         int i;
119
120         NV_DEBUG(dev, "\n");
121
122         for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
123                 if (dev_priv->channels.ptr[i])
124                         nv50_fifo_channel_enable(dev, i);
125                 else
126                         nv50_fifo_channel_disable(dev, i);
127         }
128
129         nv50_fifo_playlist_update(dev);
130 }
131
132 static void
133 nv50_fifo_init_regs__nv(struct drm_device *dev)
134 {
135         NV_DEBUG(dev, "\n");
136
137         nv_wr32(dev, 0x250c, 0x6f3cfc34);
138 }
139
140 static void
141 nv50_fifo_init_regs(struct drm_device *dev)
142 {
143         NV_DEBUG(dev, "\n");
144
145         nv_wr32(dev, 0x2500, 0);
146         nv_wr32(dev, 0x3250, 0);
147         nv_wr32(dev, 0x3220, 0);
148         nv_wr32(dev, 0x3204, 0);
149         nv_wr32(dev, 0x3210, 0);
150         nv_wr32(dev, 0x3270, 0);
151
152         /* Enable dummy channels setup by nv50_instmem.c */
153         nv50_fifo_channel_enable(dev, 0);
154         nv50_fifo_channel_enable(dev, 127);
155 }
156
157 int
158 nv50_fifo_init(struct drm_device *dev)
159 {
160         struct drm_nouveau_private *dev_priv = dev->dev_private;
161         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
162         int ret;
163
164         NV_DEBUG(dev, "\n");
165
166         if (pfifo->playlist[0]) {
167                 pfifo->cur_playlist = !pfifo->cur_playlist;
168                 goto just_reset;
169         }
170
171         ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
172                                  NVOBJ_FLAG_ZERO_ALLOC,
173                                  &pfifo->playlist[0]);
174         if (ret) {
175                 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
176                 return ret;
177         }
178
179         ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
180                                  NVOBJ_FLAG_ZERO_ALLOC,
181                                  &pfifo->playlist[1]);
182         if (ret) {
183                 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
184                 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
185                 return ret;
186         }
187
188 just_reset:
189         nv50_fifo_init_reset(dev);
190         nv50_fifo_init_intr(dev);
191         nv50_fifo_init_context_table(dev);
192         nv50_fifo_init_regs__nv(dev);
193         nv50_fifo_init_regs(dev);
194         dev_priv->engine.fifo.enable(dev);
195         dev_priv->engine.fifo.reassign(dev, true);
196
197         return 0;
198 }
199
200 void
201 nv50_fifo_takedown(struct drm_device *dev)
202 {
203         struct drm_nouveau_private *dev_priv = dev->dev_private;
204         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
205
206         NV_DEBUG(dev, "\n");
207
208         if (!pfifo->playlist[0])
209                 return;
210
211         nv_wr32(dev, 0x2140, 0x00000000);
212         nouveau_irq_unregister(dev, 8);
213
214         nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
215         nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
216 }
217
218 int
219 nv50_fifo_channel_id(struct drm_device *dev)
220 {
221         return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
222                         NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
223 }
224
225 int
226 nv50_fifo_create_context(struct nouveau_channel *chan)
227 {
228         struct drm_device *dev = chan->dev;
229         struct drm_nouveau_private *dev_priv = dev->dev_private;
230         struct nouveau_gpuobj *ramfc = NULL;
231         unsigned long flags;
232         int ret;
233
234         NV_DEBUG(dev, "ch%d\n", chan->id);
235
236         if (dev_priv->chipset == 0x50) {
237                 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
238                                               chan->ramin->vinst, 0x100,
239                                               NVOBJ_FLAG_ZERO_ALLOC |
240                                               NVOBJ_FLAG_ZERO_FREE,
241                                               &chan->ramfc);
242                 if (ret)
243                         return ret;
244
245                 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
246                                               chan->ramin->vinst + 0x0400,
247                                               4096, 0, &chan->cache);
248                 if (ret)
249                         return ret;
250         } else {
251                 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
252                                          NVOBJ_FLAG_ZERO_ALLOC |
253                                          NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
254                 if (ret)
255                         return ret;
256
257                 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
258                                          0, &chan->cache);
259                 if (ret)
260                         return ret;
261         }
262         ramfc = chan->ramfc;
263
264         chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
265                              NV50_USER(chan->id), PAGE_SIZE);
266         if (!chan->user)
267                 return -ENOMEM;
268
269         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
270
271         nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
272         nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
273                              (4 << 24) /* SEARCH_FULL */ |
274                              (chan->ramht->gpuobj->cinst >> 4));
275         nv_wo32(ramfc, 0x44, 0x2101ffff);
276         nv_wo32(ramfc, 0x60, 0x7fffffff);
277         nv_wo32(ramfc, 0x40, 0x00000000);
278         nv_wo32(ramfc, 0x7c, 0x30000001);
279         nv_wo32(ramfc, 0x78, 0x00000000);
280         nv_wo32(ramfc, 0x3c, 0x403f6078);
281         nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
282         nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
283
284         if (dev_priv->chipset != 0x50) {
285                 nv_wo32(chan->ramin, 0, chan->id);
286                 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
287
288                 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
289                 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
290         }
291
292         dev_priv->engine.instmem.flush(dev);
293
294         nv50_fifo_channel_enable(dev, chan->id);
295         nv50_fifo_playlist_update(dev);
296         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
297         return 0;
298 }
299
300 void
301 nv50_fifo_destroy_context(struct nouveau_channel *chan)
302 {
303         struct drm_device *dev = chan->dev;
304         struct drm_nouveau_private *dev_priv = dev->dev_private;
305         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
306         struct nouveau_gpuobj *ramfc = NULL;
307         unsigned long flags;
308
309         NV_DEBUG(dev, "ch%d\n", chan->id);
310
311         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
312         pfifo->reassign(dev, false);
313
314         /* Unload the context if it's the currently active one */
315         if (pfifo->channel_id(dev) == chan->id) {
316                 pfifo->disable(dev);
317                 pfifo->unload_context(dev);
318                 pfifo->enable(dev);
319         }
320
321         /* This will ensure the channel is seen as disabled. */
322         nouveau_gpuobj_ref(chan->ramfc, &ramfc);
323         nouveau_gpuobj_ref(NULL, &chan->ramfc);
324         nv50_fifo_channel_disable(dev, chan->id);
325
326         /* Dummy channel, also used on ch 127 */
327         if (chan->id == 0)
328                 nv50_fifo_channel_disable(dev, 127);
329         nv50_fifo_playlist_update(dev);
330
331         pfifo->reassign(dev, true);
332         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
333
334         /* Free the channel resources */
335         if (chan->user) {
336                 iounmap(chan->user);
337                 chan->user = NULL;
338         }
339         nouveau_gpuobj_ref(NULL, &ramfc);
340         nouveau_gpuobj_ref(NULL, &chan->cache);
341 }
342
343 int
344 nv50_fifo_load_context(struct nouveau_channel *chan)
345 {
346         struct drm_device *dev = chan->dev;
347         struct drm_nouveau_private *dev_priv = dev->dev_private;
348         struct nouveau_gpuobj *ramfc = chan->ramfc;
349         struct nouveau_gpuobj *cache = chan->cache;
350         int ptr, cnt;
351
352         NV_DEBUG(dev, "ch%d\n", chan->id);
353
354         nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
355         nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
356         nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
357         nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
358         nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
359         nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
360         nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
361         nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
362         nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
363         nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
364         nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
365         nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
366         nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
367         nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
368         nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
369         nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
370         nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
371         nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
372         nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
373         nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
374         nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
375         nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
376         nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
377         nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
378         nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
379         nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
380         nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
381         nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
382         nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
383         nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
384         nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
385         nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
386         nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
387
388         cnt = nv_ro32(ramfc, 0x84);
389         for (ptr = 0; ptr < cnt; ptr++) {
390                 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
391                         nv_ro32(cache, (ptr * 8) + 0));
392                 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
393                         nv_ro32(cache, (ptr * 8) + 4));
394         }
395         nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
396         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
397
398         /* guessing that all the 0x34xx regs aren't on NV50 */
399         if (dev_priv->chipset != 0x50) {
400                 nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
401                 nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
402                 nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
403                 nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
404                 nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
405         }
406
407         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
408         return 0;
409 }
410
411 int
412 nv50_fifo_unload_context(struct drm_device *dev)
413 {
414         struct drm_nouveau_private *dev_priv = dev->dev_private;
415         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
416         struct nouveau_gpuobj *ramfc, *cache;
417         struct nouveau_channel *chan = NULL;
418         int chid, get, put, ptr;
419
420         NV_DEBUG(dev, "\n");
421
422         chid = pfifo->channel_id(dev);
423         if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
424                 return 0;
425
426         chan = dev_priv->channels.ptr[chid];
427         if (!chan) {
428                 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
429                 return -EINVAL;
430         }
431         NV_DEBUG(dev, "ch%d\n", chan->id);
432         ramfc = chan->ramfc;
433         cache = chan->cache;
434
435         nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
436         nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
437         nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
438         nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
439         nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
440         nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
441         nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
442         nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
443         nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
444         nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
445         nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
446         nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
447         nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
448         nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
449         nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
450         nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
451         nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
452         nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
453         nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
454         nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
455         nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
456         nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
457         nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
458         nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
459         nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
460         nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
461         nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
462         nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
463         nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
464         nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
465         nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
466         nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
467         nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
468
469         put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
470         get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
471         ptr = 0;
472         while (put != get) {
473                 nv_wo32(cache, ptr + 0,
474                         nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
475                 nv_wo32(cache, ptr + 4,
476                         nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
477                 get = (get + 1) & 0x1ff;
478                 ptr += 8;
479         }
480
481         /* guessing that all the 0x34xx regs aren't on NV50 */
482         if (dev_priv->chipset != 0x50) {
483                 nv_wo32(ramfc, 0x84, ptr >> 3);
484                 nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
485                 nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
486                 nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
487                 nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
488                 nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
489         }
490
491         dev_priv->engine.instmem.flush(dev);
492
493         /*XXX: probably reload ch127 (NULL) state back too */
494         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
495         return 0;
496 }
497
498 void
499 nv50_fifo_tlb_flush(struct drm_device *dev)
500 {
501         nv50_vm_flush(dev, 5);
502 }