2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragr) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
24 #include <engine/gr.h>
27 #include <core/client.h>
28 #include <engine/fifo.h>
29 #include <engine/fifo/chan.h>
30 #include <subdev/fb.h>
33 u32 pipe_0x0000[0x040/4];
34 u32 pipe_0x0040[0x010/4];
35 u32 pipe_0x0200[0x0c0/4];
36 u32 pipe_0x4400[0x080/4];
37 u32 pipe_0x6400[0x3b0/4];
38 u32 pipe_0x6800[0x2f0/4];
39 u32 pipe_0x6c00[0x030/4];
40 u32 pipe_0x7000[0x130/4];
41 u32 pipe_0x7400[0x0c0/4];
42 u32 pipe_0x7800[0x0c0/4];
45 static int nv10_gr_ctx_regs[] = {
46 NV10_PGRAPH_CTX_SWITCH(0),
47 NV10_PGRAPH_CTX_SWITCH(1),
48 NV10_PGRAPH_CTX_SWITCH(2),
49 NV10_PGRAPH_CTX_SWITCH(3),
50 NV10_PGRAPH_CTX_SWITCH(4),
51 NV10_PGRAPH_CTX_CACHE(0, 0),
52 NV10_PGRAPH_CTX_CACHE(0, 1),
53 NV10_PGRAPH_CTX_CACHE(0, 2),
54 NV10_PGRAPH_CTX_CACHE(0, 3),
55 NV10_PGRAPH_CTX_CACHE(0, 4),
56 NV10_PGRAPH_CTX_CACHE(1, 0),
57 NV10_PGRAPH_CTX_CACHE(1, 1),
58 NV10_PGRAPH_CTX_CACHE(1, 2),
59 NV10_PGRAPH_CTX_CACHE(1, 3),
60 NV10_PGRAPH_CTX_CACHE(1, 4),
61 NV10_PGRAPH_CTX_CACHE(2, 0),
62 NV10_PGRAPH_CTX_CACHE(2, 1),
63 NV10_PGRAPH_CTX_CACHE(2, 2),
64 NV10_PGRAPH_CTX_CACHE(2, 3),
65 NV10_PGRAPH_CTX_CACHE(2, 4),
66 NV10_PGRAPH_CTX_CACHE(3, 0),
67 NV10_PGRAPH_CTX_CACHE(3, 1),
68 NV10_PGRAPH_CTX_CACHE(3, 2),
69 NV10_PGRAPH_CTX_CACHE(3, 3),
70 NV10_PGRAPH_CTX_CACHE(3, 4),
71 NV10_PGRAPH_CTX_CACHE(4, 0),
72 NV10_PGRAPH_CTX_CACHE(4, 1),
73 NV10_PGRAPH_CTX_CACHE(4, 2),
74 NV10_PGRAPH_CTX_CACHE(4, 3),
75 NV10_PGRAPH_CTX_CACHE(4, 4),
76 NV10_PGRAPH_CTX_CACHE(5, 0),
77 NV10_PGRAPH_CTX_CACHE(5, 1),
78 NV10_PGRAPH_CTX_CACHE(5, 2),
79 NV10_PGRAPH_CTX_CACHE(5, 3),
80 NV10_PGRAPH_CTX_CACHE(5, 4),
81 NV10_PGRAPH_CTX_CACHE(6, 0),
82 NV10_PGRAPH_CTX_CACHE(6, 1),
83 NV10_PGRAPH_CTX_CACHE(6, 2),
84 NV10_PGRAPH_CTX_CACHE(6, 3),
85 NV10_PGRAPH_CTX_CACHE(6, 4),
86 NV10_PGRAPH_CTX_CACHE(7, 0),
87 NV10_PGRAPH_CTX_CACHE(7, 1),
88 NV10_PGRAPH_CTX_CACHE(7, 2),
89 NV10_PGRAPH_CTX_CACHE(7, 3),
90 NV10_PGRAPH_CTX_CACHE(7, 4),
92 NV04_PGRAPH_DMA_START_0,
93 NV04_PGRAPH_DMA_START_1,
94 NV04_PGRAPH_DMA_LENGTH,
96 NV10_PGRAPH_DMA_PITCH,
100 NV04_PGRAPH_BOFFSET1,
103 NV04_PGRAPH_BOFFSET2,
106 NV04_PGRAPH_BOFFSET3,
109 NV04_PGRAPH_BOFFSET4,
112 NV04_PGRAPH_BOFFSET5,
122 NV04_PGRAPH_BSWIZZLE2,
123 NV04_PGRAPH_BSWIZZLE5,
126 NV04_PGRAPH_PATT_COLOR0,
127 NV04_PGRAPH_PATT_COLOR1,
128 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
192 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
194 NV04_PGRAPH_PATTERN_SHAPE,
195 NV03_PGRAPH_MONO_COLOR0,
198 NV04_PGRAPH_BETA_AND,
199 NV04_PGRAPH_BETA_PREMULT,
215 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
216 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
233 NV10_PGRAPH_GLOBALSTATE0,
234 NV10_PGRAPH_GLOBALSTATE1,
235 NV04_PGRAPH_STORED_FMT,
236 NV04_PGRAPH_SOURCE_COLOR,
237 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
238 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
301 NV03_PGRAPH_ABS_UCLIP_XMIN,
302 NV03_PGRAPH_ABS_UCLIP_XMAX,
303 NV03_PGRAPH_ABS_UCLIP_YMIN,
304 NV03_PGRAPH_ABS_UCLIP_YMAX,
309 NV03_PGRAPH_ABS_UCLIPA_XMIN,
310 NV03_PGRAPH_ABS_UCLIPA_XMAX,
311 NV03_PGRAPH_ABS_UCLIPA_YMIN,
312 NV03_PGRAPH_ABS_UCLIPA_YMAX,
313 NV03_PGRAPH_ABS_ICLIP_XMAX,
314 NV03_PGRAPH_ABS_ICLIP_YMAX,
315 NV03_PGRAPH_XY_LOGIC_MISC0,
316 NV03_PGRAPH_XY_LOGIC_MISC1,
317 NV03_PGRAPH_XY_LOGIC_MISC2,
318 NV03_PGRAPH_XY_LOGIC_MISC3,
323 NV10_PGRAPH_COMBINER0_IN_ALPHA,
324 NV10_PGRAPH_COMBINER1_IN_ALPHA,
325 NV10_PGRAPH_COMBINER0_IN_RGB,
326 NV10_PGRAPH_COMBINER1_IN_RGB,
327 NV10_PGRAPH_COMBINER_COLOR0,
328 NV10_PGRAPH_COMBINER_COLOR1,
329 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
330 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
331 NV10_PGRAPH_COMBINER0_OUT_RGB,
332 NV10_PGRAPH_COMBINER1_OUT_RGB,
333 NV10_PGRAPH_COMBINER_FINAL0,
334 NV10_PGRAPH_COMBINER_FINAL1,
351 NV04_PGRAPH_PASSTHRU_0,
352 NV04_PGRAPH_PASSTHRU_1,
353 NV04_PGRAPH_PASSTHRU_2,
354 NV10_PGRAPH_DIMX_TEXTURE,
355 NV10_PGRAPH_WDIMX_TEXTURE,
356 NV10_PGRAPH_DVD_COLORFMT,
357 NV10_PGRAPH_SCALED_FORMAT,
358 NV04_PGRAPH_MISC24_0,
359 NV04_PGRAPH_MISC24_1,
360 NV04_PGRAPH_MISC24_2,
367 static int nv17_gr_ctx_regs[] = {
390 struct nv10_gr_chan *chan[32];
394 struct nv10_gr_chan {
395 struct nvkm_object base;
397 int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
398 int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
399 struct pipe_state pipe_state;
404 static inline struct nv10_gr *
405 nv10_gr(struct nv10_gr_chan *chan)
407 return (void *)nv_object(chan)->engine;
410 /*******************************************************************************
411 * Graphics object classes
412 ******************************************************************************/
414 #define PIPE_SAVE(gr, state, addr) \
417 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
418 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
419 state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
422 #define PIPE_RESTORE(gr, state, addr) \
425 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
426 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
427 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
430 static struct nvkm_oclass
432 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
433 { 0x0019, &nv04_gr_ofuncs }, /* clip */
434 { 0x0030, &nv04_gr_ofuncs }, /* null */
435 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
436 { 0x0043, &nv04_gr_ofuncs }, /* rop */
437 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
438 { 0x004a, &nv04_gr_ofuncs }, /* gdi */
439 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
440 { 0x005f, &nv04_gr_ofuncs }, /* blit */
441 { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
442 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
443 { 0x0089, &nv04_gr_ofuncs }, /* sifm */
444 { 0x008a, &nv04_gr_ofuncs }, /* ifc */
445 { 0x009f, &nv04_gr_ofuncs }, /* blit */
446 { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
447 { 0x0094, &nv04_gr_ofuncs }, /* ttri */
448 { 0x0095, &nv04_gr_ofuncs }, /* mtri */
449 { 0x0056, &nv04_gr_ofuncs }, /* celcius */
453 static struct nvkm_oclass
455 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
456 { 0x0019, &nv04_gr_ofuncs }, /* clip */
457 { 0x0030, &nv04_gr_ofuncs }, /* null */
458 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
459 { 0x0043, &nv04_gr_ofuncs }, /* rop */
460 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
461 { 0x004a, &nv04_gr_ofuncs }, /* gdi */
462 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
463 { 0x005f, &nv04_gr_ofuncs }, /* blit */
464 { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
465 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
466 { 0x0089, &nv04_gr_ofuncs }, /* sifm */
467 { 0x008a, &nv04_gr_ofuncs }, /* ifc */
468 { 0x009f, &nv04_gr_ofuncs }, /* blit */
469 { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
470 { 0x0094, &nv04_gr_ofuncs }, /* ttri */
471 { 0x0095, &nv04_gr_ofuncs }, /* mtri */
472 { 0x0096, &nv04_gr_ofuncs }, /* celcius */
477 nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
479 struct nvkm_device *device = chan->base.engine->subdev.device;
480 struct nvkm_gr *gr = nvkm_gr(chan);
481 struct pipe_state *pipe = &chan->pipe_state;
482 u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
483 u32 xfmode0, xfmode1;
486 chan->lma_window[(mthd - 0x1638) / 4] = data;
493 PIPE_SAVE(device, pipe_0x0040, 0x0040);
494 PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
496 PIPE_RESTORE(device, chan->lma_window, 0x6790);
500 xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
501 xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
503 PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
504 PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
505 PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
506 PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
510 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
511 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
512 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
513 for (i = 0; i < 4; i++)
514 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
515 for (i = 0; i < 4; i++)
516 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
518 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
519 for (i = 0; i < 3; i++)
520 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
522 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
523 for (i = 0; i < 3; i++)
524 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
526 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
527 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
529 PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
533 PIPE_RESTORE(device, pipe_0x0040, 0x0040);
535 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
536 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
538 PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
539 PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
540 PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
541 PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
543 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
544 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
550 nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
552 struct nvkm_device *device = chan->base.engine->subdev.device;
553 struct nvkm_gr *gr = nvkm_gr(chan);
557 nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
558 nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
562 nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
564 void (*func)(struct nv10_gr_chan *, u32, u32);
566 case 0x1638 ... 0x1644:
567 func = nv17_gr_mthd_lma_window; break;
568 case 0x1658: func = nv17_gr_mthd_lma_enable; break;
572 func(chan, mthd, data);
577 nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
579 bool (*func)(struct nv10_gr_chan *, u32, u32);
581 case 0x99: func = nv17_gr_mthd_celcius; break;
585 return func(chan, mthd, data);
588 static struct nvkm_oclass
590 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
591 { 0x0019, &nv04_gr_ofuncs }, /* clip */
592 { 0x0030, &nv04_gr_ofuncs }, /* null */
593 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
594 { 0x0043, &nv04_gr_ofuncs }, /* rop */
595 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
596 { 0x004a, &nv04_gr_ofuncs }, /* gdi */
597 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
598 { 0x005f, &nv04_gr_ofuncs }, /* blit */
599 { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
600 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
601 { 0x0089, &nv04_gr_ofuncs }, /* sifm */
602 { 0x008a, &nv04_gr_ofuncs }, /* ifc */
603 { 0x009f, &nv04_gr_ofuncs }, /* blit */
604 { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
605 { 0x0094, &nv04_gr_ofuncs }, /* ttri */
606 { 0x0095, &nv04_gr_ofuncs }, /* mtri */
607 { 0x0099, &nv04_gr_ofuncs },
611 /*******************************************************************************
613 ******************************************************************************/
615 static struct nv10_gr_chan *
616 nv10_gr_channel(struct nv10_gr *gr)
618 struct nvkm_device *device = gr->base.engine.subdev.device;
619 struct nv10_gr_chan *chan = NULL;
620 if (nvkm_rd32(device, 0x400144) & 0x00010000) {
621 int chid = nvkm_rd32(device, 0x400148) >> 24;
622 if (chid < ARRAY_SIZE(gr->chan))
623 chan = gr->chan[chid];
629 nv10_gr_save_pipe(struct nv10_gr_chan *chan)
631 struct nv10_gr *gr = nv10_gr(chan);
632 struct pipe_state *pipe = &chan->pipe_state;
633 struct nvkm_device *device = gr->base.engine.subdev.device;
635 PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
636 PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
637 PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
638 PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
639 PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
640 PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
641 PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
642 PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
643 PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
644 PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
648 nv10_gr_load_pipe(struct nv10_gr_chan *chan)
650 struct nv10_gr *gr = nv10_gr(chan);
651 struct pipe_state *pipe = &chan->pipe_state;
652 struct nvkm_device *device = gr->base.engine.subdev.device;
653 u32 xfmode0, xfmode1;
657 /* XXX check haiku comments */
658 xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
659 xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
660 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
661 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
662 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
663 for (i = 0; i < 4; i++)
664 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
665 for (i = 0; i < 4; i++)
666 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
668 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
669 for (i = 0; i < 3; i++)
670 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
672 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
673 for (i = 0; i < 3; i++)
674 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
676 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
677 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
680 PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
684 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
685 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
686 PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
687 PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
688 PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
689 PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
690 PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
691 PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
692 PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
693 PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
694 PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
699 nv10_gr_create_pipe(struct nv10_gr_chan *chan)
701 struct nv10_gr *gr = nv10_gr(chan);
702 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
703 struct pipe_state *pipe_state = &chan->pipe_state;
704 u32 *pipe_state_addr;
706 #define PIPE_INIT(addr) \
708 pipe_state_addr = pipe_state->pipe_##addr; \
710 #define PIPE_INIT_END(addr) \
712 u32 *__end_addr = pipe_state->pipe_##addr + \
713 ARRAY_SIZE(pipe_state->pipe_##addr); \
714 if (pipe_state_addr != __end_addr) \
715 nvkm_error(subdev, "incomplete pipe init for 0x%x : %p/%p\n", \
716 addr, pipe_state_addr, __end_addr); \
718 #define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
721 for (i = 0; i < 48; i++)
722 NV_WRITE_PIPE_INIT(0x00000000);
723 PIPE_INIT_END(0x0200);
726 for (i = 0; i < 211; i++)
727 NV_WRITE_PIPE_INIT(0x00000000);
728 NV_WRITE_PIPE_INIT(0x3f800000);
729 NV_WRITE_PIPE_INIT(0x40000000);
730 NV_WRITE_PIPE_INIT(0x40000000);
731 NV_WRITE_PIPE_INIT(0x40000000);
732 NV_WRITE_PIPE_INIT(0x40000000);
733 NV_WRITE_PIPE_INIT(0x00000000);
734 NV_WRITE_PIPE_INIT(0x00000000);
735 NV_WRITE_PIPE_INIT(0x3f800000);
736 NV_WRITE_PIPE_INIT(0x00000000);
737 NV_WRITE_PIPE_INIT(0x3f000000);
738 NV_WRITE_PIPE_INIT(0x3f000000);
739 NV_WRITE_PIPE_INIT(0x00000000);
740 NV_WRITE_PIPE_INIT(0x00000000);
741 NV_WRITE_PIPE_INIT(0x00000000);
742 NV_WRITE_PIPE_INIT(0x00000000);
743 NV_WRITE_PIPE_INIT(0x3f800000);
744 NV_WRITE_PIPE_INIT(0x00000000);
745 NV_WRITE_PIPE_INIT(0x00000000);
746 NV_WRITE_PIPE_INIT(0x00000000);
747 NV_WRITE_PIPE_INIT(0x00000000);
748 NV_WRITE_PIPE_INIT(0x00000000);
749 NV_WRITE_PIPE_INIT(0x3f800000);
750 NV_WRITE_PIPE_INIT(0x3f800000);
751 NV_WRITE_PIPE_INIT(0x3f800000);
752 NV_WRITE_PIPE_INIT(0x3f800000);
753 PIPE_INIT_END(0x6400);
756 for (i = 0; i < 162; i++)
757 NV_WRITE_PIPE_INIT(0x00000000);
758 NV_WRITE_PIPE_INIT(0x3f800000);
759 for (i = 0; i < 25; i++)
760 NV_WRITE_PIPE_INIT(0x00000000);
761 PIPE_INIT_END(0x6800);
764 NV_WRITE_PIPE_INIT(0x00000000);
765 NV_WRITE_PIPE_INIT(0x00000000);
766 NV_WRITE_PIPE_INIT(0x00000000);
767 NV_WRITE_PIPE_INIT(0x00000000);
768 NV_WRITE_PIPE_INIT(0xbf800000);
769 NV_WRITE_PIPE_INIT(0x00000000);
770 NV_WRITE_PIPE_INIT(0x00000000);
771 NV_WRITE_PIPE_INIT(0x00000000);
772 NV_WRITE_PIPE_INIT(0x00000000);
773 NV_WRITE_PIPE_INIT(0x00000000);
774 NV_WRITE_PIPE_INIT(0x00000000);
775 NV_WRITE_PIPE_INIT(0x00000000);
776 PIPE_INIT_END(0x6c00);
779 NV_WRITE_PIPE_INIT(0x00000000);
780 NV_WRITE_PIPE_INIT(0x00000000);
781 NV_WRITE_PIPE_INIT(0x00000000);
782 NV_WRITE_PIPE_INIT(0x00000000);
783 NV_WRITE_PIPE_INIT(0x00000000);
784 NV_WRITE_PIPE_INIT(0x00000000);
785 NV_WRITE_PIPE_INIT(0x00000000);
786 NV_WRITE_PIPE_INIT(0x00000000);
787 NV_WRITE_PIPE_INIT(0x00000000);
788 NV_WRITE_PIPE_INIT(0x00000000);
789 NV_WRITE_PIPE_INIT(0x00000000);
790 NV_WRITE_PIPE_INIT(0x00000000);
791 NV_WRITE_PIPE_INIT(0x7149f2ca);
792 NV_WRITE_PIPE_INIT(0x00000000);
793 NV_WRITE_PIPE_INIT(0x00000000);
794 NV_WRITE_PIPE_INIT(0x00000000);
795 NV_WRITE_PIPE_INIT(0x7149f2ca);
796 NV_WRITE_PIPE_INIT(0x00000000);
797 NV_WRITE_PIPE_INIT(0x00000000);
798 NV_WRITE_PIPE_INIT(0x00000000);
799 NV_WRITE_PIPE_INIT(0x7149f2ca);
800 NV_WRITE_PIPE_INIT(0x00000000);
801 NV_WRITE_PIPE_INIT(0x00000000);
802 NV_WRITE_PIPE_INIT(0x00000000);
803 NV_WRITE_PIPE_INIT(0x7149f2ca);
804 NV_WRITE_PIPE_INIT(0x00000000);
805 NV_WRITE_PIPE_INIT(0x00000000);
806 NV_WRITE_PIPE_INIT(0x00000000);
807 NV_WRITE_PIPE_INIT(0x7149f2ca);
808 NV_WRITE_PIPE_INIT(0x00000000);
809 NV_WRITE_PIPE_INIT(0x00000000);
810 NV_WRITE_PIPE_INIT(0x00000000);
811 NV_WRITE_PIPE_INIT(0x7149f2ca);
812 NV_WRITE_PIPE_INIT(0x00000000);
813 NV_WRITE_PIPE_INIT(0x00000000);
814 NV_WRITE_PIPE_INIT(0x00000000);
815 NV_WRITE_PIPE_INIT(0x7149f2ca);
816 NV_WRITE_PIPE_INIT(0x00000000);
817 NV_WRITE_PIPE_INIT(0x00000000);
818 NV_WRITE_PIPE_INIT(0x00000000);
819 NV_WRITE_PIPE_INIT(0x7149f2ca);
820 for (i = 0; i < 35; i++)
821 NV_WRITE_PIPE_INIT(0x00000000);
822 PIPE_INIT_END(0x7000);
825 for (i = 0; i < 48; i++)
826 NV_WRITE_PIPE_INIT(0x00000000);
827 PIPE_INIT_END(0x7400);
830 for (i = 0; i < 48; i++)
831 NV_WRITE_PIPE_INIT(0x00000000);
832 PIPE_INIT_END(0x7800);
835 for (i = 0; i < 32; i++)
836 NV_WRITE_PIPE_INIT(0x00000000);
837 PIPE_INIT_END(0x4400);
840 for (i = 0; i < 16; i++)
841 NV_WRITE_PIPE_INIT(0x00000000);
842 PIPE_INIT_END(0x0000);
845 for (i = 0; i < 4; i++)
846 NV_WRITE_PIPE_INIT(0x00000000);
847 PIPE_INIT_END(0x0040);
851 #undef NV_WRITE_PIPE_INIT
855 nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
857 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
859 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
860 if (nv10_gr_ctx_regs[i] == reg)
863 nvkm_error(subdev, "unknow offset nv10_ctx_regs %d\n", reg);
868 nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
870 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
872 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
873 if (nv17_gr_ctx_regs[i] == reg)
876 nvkm_error(subdev, "unknow offset nv17_ctx_regs %d\n", reg);
881 nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
883 struct nv10_gr *gr = nv10_gr(chan);
884 struct nvkm_device *device = gr->base.engine.subdev.device;
885 u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
886 u32 ctx_user, ctx_switch[5];
889 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
890 * that cannot be restored via MMIO. Do it through the FIFO
894 /* Look for a celsius object */
895 for (i = 0; i < 8; i++) {
896 int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
898 if (class == 0x56 || class == 0x96 || class == 0x99) {
904 if (subchan < 0 || !inst)
907 /* Save the current ctx object */
908 ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
909 for (i = 0; i < 5; i++)
910 ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
912 /* Save the FIFO state */
913 st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
914 st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
915 st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
916 fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
918 for (i = 0; i < ARRAY_SIZE(fifo); i++)
919 fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
921 /* Switch to the celsius subchannel */
922 for (i = 0; i < 5; i++)
923 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
924 nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
925 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
927 /* Inject NV10TCL_DMA_VTXBUF */
928 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
929 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
930 0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
931 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
932 nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
933 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
934 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
936 /* Restore the FIFO state */
937 for (i = 0; i < ARRAY_SIZE(fifo); i++)
938 nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
940 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
941 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
942 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
943 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
945 /* Restore the current ctx object */
946 for (i = 0; i < 5; i++)
947 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
948 nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
952 nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
954 struct nv10_gr *gr = nv10_gr(chan);
955 struct nvkm_device *device = gr->base.engine.subdev.device;
959 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
960 nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
962 if (nv_device(gr)->card_type >= NV_11 &&
963 nv_device(gr)->chipset >= 0x17) {
964 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
965 nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
968 nv10_gr_load_pipe(chan);
970 inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
971 nv10_gr_load_dma_vtxbuf(chan, chid, inst);
973 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
974 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
975 nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
980 nv10_gr_unload_context(struct nv10_gr_chan *chan)
982 struct nv10_gr *gr = nv10_gr(chan);
983 struct nvkm_device *device = gr->base.engine.subdev.device;
986 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
987 chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
989 if (nv_device(gr)->card_type >= NV_11 &&
990 nv_device(gr)->chipset >= 0x17) {
991 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
992 chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
995 nv10_gr_save_pipe(chan);
997 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
998 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1003 nv10_gr_context_switch(struct nv10_gr *gr)
1005 struct nvkm_device *device = gr->base.engine.subdev.device;
1006 struct nv10_gr_chan *prev = NULL;
1007 struct nv10_gr_chan *next = NULL;
1012 /* If previous context is valid, we need to save it */
1013 prev = nv10_gr_channel(gr);
1015 nv10_gr_unload_context(prev);
1017 /* load context for next channel */
1018 chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
1019 next = gr->chan[chid];
1021 nv10_gr_load_context(next, chid);
1024 #define NV_WRITE_CTX(reg, val) do { \
1025 int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
1027 chan->nv10[offset] = val; \
1030 #define NV17_WRITE_CTX(reg, val) do { \
1031 int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
1033 chan->nv17[offset] = val; \
1037 nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1038 struct nvkm_oclass *oclass, void *data, u32 size,
1039 struct nvkm_object **pobject)
1041 struct nvkm_fifo_chan *fifo = (void *)parent;
1042 struct nv10_gr *gr = (void *)engine;
1043 struct nv10_gr_chan *chan;
1044 struct nvkm_device *device = gr->base.engine.subdev.device;
1045 unsigned long flags;
1048 ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
1049 *pobject = nv_object(chan);
1053 spin_lock_irqsave(&gr->lock, flags);
1054 if (gr->chan[fifo->chid]) {
1055 *pobject = nv_object(gr->chan[fifo->chid]);
1056 atomic_inc(&(*pobject)->refcount);
1057 spin_unlock_irqrestore(&gr->lock, flags);
1058 nvkm_object_destroy(&chan->base);
1062 NV_WRITE_CTX(0x00400e88, 0x08000000);
1063 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
1064 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
1065 NV_WRITE_CTX(0x00400e10, 0x00001000);
1066 NV_WRITE_CTX(0x00400e14, 0x00001000);
1067 NV_WRITE_CTX(0x00400e30, 0x00080008);
1068 NV_WRITE_CTX(0x00400e34, 0x00080008);
1069 if (nv_device(gr)->card_type >= NV_11 &&
1070 nv_device(gr)->chipset >= 0x17) {
1071 /* is it really needed ??? */
1072 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1073 nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
1074 NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
1075 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
1076 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
1077 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
1078 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
1080 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
1082 nv10_gr_create_pipe(chan);
1084 gr->chan[fifo->chid] = chan;
1085 chan->chid = fifo->chid;
1086 spin_unlock_irqrestore(&gr->lock, flags);
1091 nv10_gr_context_dtor(struct nvkm_object *object)
1093 struct nv10_gr *gr = (void *)object->engine;
1094 struct nv10_gr_chan *chan = (void *)object;
1095 unsigned long flags;
1097 spin_lock_irqsave(&gr->lock, flags);
1098 gr->chan[chan->chid] = NULL;
1099 spin_unlock_irqrestore(&gr->lock, flags);
1101 nvkm_object_destroy(&chan->base);
1105 nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
1107 struct nv10_gr *gr = (void *)object->engine;
1108 struct nv10_gr_chan *chan = (void *)object;
1109 struct nvkm_device *device = gr->base.engine.subdev.device;
1110 unsigned long flags;
1112 spin_lock_irqsave(&gr->lock, flags);
1113 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1114 if (nv10_gr_channel(gr) == chan)
1115 nv10_gr_unload_context(chan);
1116 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1117 spin_unlock_irqrestore(&gr->lock, flags);
1119 return _nvkm_object_fini(&chan->base, suspend);
1122 static struct nvkm_oclass
1124 .handle = NV_ENGCTX(GR, 0x10),
1125 .ofuncs = &(struct nvkm_ofuncs) {
1126 .ctor = nv10_gr_context_ctor,
1127 .dtor = nv10_gr_context_dtor,
1128 .init = _nvkm_object_init,
1129 .fini = nv10_gr_context_fini,
1133 /*******************************************************************************
1134 * PGRAPH engine/subdev functions
1135 ******************************************************************************/
1138 nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
1140 struct nv10_gr *gr = (void *)engine;
1141 struct nvkm_device *device = gr->base.engine.subdev.device;
1142 struct nvkm_fifo *fifo = device->fifo;
1143 struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
1144 unsigned long flags;
1146 fifo->pause(fifo, &flags);
1149 nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
1150 nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
1151 nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
1153 fifo->start(fifo, &flags);
1156 const struct nvkm_bitfield nv10_gr_intr_name[] = {
1157 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1158 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1162 const struct nvkm_bitfield nv10_gr_nstatus[] = {
1163 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1164 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1165 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1166 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1171 nv10_gr_intr(struct nvkm_subdev *subdev)
1173 struct nv10_gr *gr = (void *)subdev;
1174 struct nv10_gr_chan *chan = NULL;
1175 struct nvkm_device *device = gr->base.engine.subdev.device;
1176 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1177 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1178 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1179 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1180 u32 chid = (addr & 0x01f00000) >> 20;
1181 u32 subc = (addr & 0x00070000) >> 16;
1182 u32 mthd = (addr & 0x00001ffc);
1183 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1184 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
1186 char msg[128], src[128], sta[128];
1187 unsigned long flags;
1189 spin_lock_irqsave(&gr->lock, flags);
1190 chan = gr->chan[chid];
1192 if (stat & NV_PGRAPH_INTR_ERROR) {
1193 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1194 if (!nv10_gr_mthd(chan, class, mthd, data))
1195 show &= ~NV_PGRAPH_INTR_ERROR;
1199 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1200 nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1201 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1202 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1203 nv10_gr_context_switch(gr);
1206 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1207 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1210 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
1211 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
1212 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
1213 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
1214 "nstatus %08x [%s] ch %d [%s] subc %d "
1215 "class %04x mthd %04x data %08x\n",
1216 show, msg, nsource, src, nstatus, sta, chid,
1217 nvkm_client_name(chan), subc, class, mthd, data);
1220 spin_unlock_irqrestore(&gr->lock, flags);
1224 nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1225 struct nvkm_oclass *oclass, void *data, u32 size,
1226 struct nvkm_object **pobject)
1231 ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
1232 *pobject = nv_object(gr);
1236 nv_subdev(gr)->unit = 0x00001000;
1237 nv_subdev(gr)->intr = nv10_gr_intr;
1238 nv_engine(gr)->cclass = &nv10_gr_cclass;
1240 if (nv_device(gr)->chipset <= 0x10)
1241 nv_engine(gr)->sclass = nv10_gr_sclass;
1243 if (nv_device(gr)->chipset < 0x17 ||
1244 nv_device(gr)->card_type < NV_11)
1245 nv_engine(gr)->sclass = nv15_gr_sclass;
1247 nv_engine(gr)->sclass = nv17_gr_sclass;
1249 nv_engine(gr)->tile_prog = nv10_gr_tile_prog;
1250 spin_lock_init(&gr->lock);
1255 nv10_gr_dtor(struct nvkm_object *object)
1257 struct nv10_gr *gr = (void *)object;
1258 nvkm_gr_destroy(&gr->base);
1262 nv10_gr_init(struct nvkm_object *object)
1264 struct nvkm_engine *engine = nv_engine(object);
1265 struct nv10_gr *gr = (void *)engine;
1266 struct nvkm_device *device = gr->base.engine.subdev.device;
1267 struct nvkm_fb *fb = device->fb;
1270 ret = nvkm_gr_init(&gr->base);
1274 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
1275 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1277 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1278 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
1279 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
1280 /* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1281 nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1282 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1284 if (nv_device(gr)->card_type >= NV_11 &&
1285 nv_device(gr)->chipset >= 0x17) {
1286 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1287 nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
1288 nvkm_wr32(device, 0x400838, 0x002f8684);
1289 nvkm_wr32(device, 0x40083c, 0x00115f3f);
1290 nvkm_wr32(device, 0x4006b0, 0x40000020);
1292 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
1295 /* Turn all the tiling regions off. */
1296 for (i = 0; i < fb->tile.regions; i++)
1297 engine->tile_prog(engine, i);
1299 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1300 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1301 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1302 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1303 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1304 nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1306 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1307 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1308 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1313 nv10_gr_fini(struct nvkm_object *object, bool suspend)
1315 struct nv10_gr *gr = (void *)object;
1316 return nvkm_gr_fini(&gr->base, suspend);
1321 .handle = NV_ENGINE(GR, 0x10),
1322 .ofuncs = &(struct nvkm_ofuncs) {
1323 .ctor = nv10_gr_ctor,
1324 .dtor = nv10_gr_dtor,
1325 .init = nv10_gr_init,
1326 .fini = nv10_gr_fini,