Merge branches 'amd-iommu/fixes' and 'dma-debug/fixes' into iommu/fixes
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_dma.h
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__
29
30#ifndef NOUVEAU_DMA_DEBUG
31#define NOUVEAU_DMA_DEBUG 0
32#endif
33
34/*
35 * There's a hw race condition where you can't jump to your PUT offset,
36 * to avoid this we jump to offset + SKIPS and fill the difference with
37 * NOPs.
38 *
39 * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
40 * a SKIPS value of 8. Lets assume that the race condition is to do
41 * with writing into the fetch area, we configure a fetch size of 128
42 * bytes so we need a larger SKIPS value.
43 */
44#define NOUVEAU_DMA_SKIPS (128 / 4)
45
46/* Hardcoded object assignments to subchannels (subchannel id). */
47enum {
48 NvSubM2MF = 0,
ca4362ad
FJ
49 NvSubSw = 1,
50 NvSub2D = 2,
51 NvSubCtxSurf2D = 2,
52 NvSubGdiRect = 3,
53 NvSubImageBlit = 4
6ee73861
BS
54};
55
56/* Object handles. */
57enum {
58 NvM2MF = 0x80000001,
59 NvDmaFB = 0x80000002,
60 NvDmaTT = 0x80000003,
61 NvDmaVRAM = 0x80000004,
62 NvDmaGART = 0x80000005,
63 NvNotify0 = 0x80000006,
64 Nv2D = 0x80000007,
65 NvCtxSurf2D = 0x80000008,
66 NvRop = 0x80000009,
67 NvImagePatt = 0x8000000a,
68 NvClipRect = 0x8000000b,
69 NvGdiRect = 0x8000000c,
70 NvImageBlit = 0x8000000d,
ca4362ad 71 NvSw = 0x8000000e,
6ee73861
BS
72
73 /* G80+ display objects */
74 NvEvoVRAM = 0x01000000,
75 NvEvoFB16 = 0x01000001,
76 NvEvoFB32 = 0x01000002
77};
78
79#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
80#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
81#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
82#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
83#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
84#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
85#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
86#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
87#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
88#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
89
90#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
91#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
92#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
93#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
94#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
95
96static __must_check inline int
97RING_SPACE(struct nouveau_channel *chan, int size)
98{
99 if (chan->dma.free < size) {
100 int ret;
101
102 ret = nouveau_dma_wait(chan, size);
103 if (ret)
104 return ret;
105 }
106
107 chan->dma.free -= size;
108 return 0;
109}
110
111static inline void
112OUT_RING(struct nouveau_channel *chan, int data)
113{
114 if (NOUVEAU_DMA_DEBUG) {
115 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
116 chan->id, chan->dma.cur << 2, data);
117 }
118
119 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
120}
121
122extern void
123OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
124
125static inline void
126BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
127{
128 OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
129}
130
131#define WRITE_PUT(val) do { \
132 DRM_MEMORYBARRIER(); \
133 nouveau_bo_rd32(chan->pushbuf_bo, 0); \
134 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
135} while (0)
136
137static inline void
138FIRE_RING(struct nouveau_channel *chan)
139{
140 if (NOUVEAU_DMA_DEBUG) {
141 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
142 chan->id, chan->dma.cur << 2);
143 }
144
145 if (chan->dma.cur == chan->dma.put)
146 return;
147 chan->accel_done = true;
148
149 WRITE_PUT(chan->dma.cur);
150 chan->dma.put = chan->dma.cur;
151}
152
153static inline void
154WIND_RING(struct nouveau_channel *chan)
155{
156 chan->dma.cur = chan->dma.put;
157}
158
159#endif