Commit | Line | Data |
---|---|---|
6ee73861 | 1 | /* |
ebb945a9 | 2 | * Copyright 2012 Red Hat Inc. |
6ee73861 | 3 | * |
ebb945a9 BS |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
6ee73861 | 10 | * |
ebb945a9 BS |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. | |
6ee73861 | 13 | * |
ebb945a9 BS |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
6ee73861 | 21 | * |
ebb945a9 | 22 | * Authors: Ben Skeggs |
6ee73861 | 23 | */ |
05c7145d | 24 | #include "nv04.h" |
6ee73861 | 25 | |
bbf8906b | 26 | #include <core/client.h> |
9e79a853 | 27 | #include <core/device.h> |
ebb945a9 | 28 | #include <core/engctx.h> |
ebb945a9 | 29 | #include <core/handle.h> |
02a841d4 | 30 | #include <core/ramht.h> |
ebb945a9 BS |
31 | #include <subdev/instmem/nv04.h> |
32 | #include <subdev/timer.h> | |
ebb945a9 | 33 | |
05c7145d BS |
34 | #include <nvif/class.h> |
35 | #include <nvif/unpack.h> | |
ebb945a9 BS |
36 | |
37 | static struct ramfc_desc | |
38 | nv04_ramfc[] = { | |
c420b2dc BS |
39 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, |
40 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, | |
41 | { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, | |
42 | { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, | |
43 | { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, | |
44 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, | |
45 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, | |
46 | { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, | |
47 | {} | |
48 | }; | |
49 | ||
ebb945a9 BS |
50 | /******************************************************************************* |
51 | * FIFO channel objects | |
52 | ******************************************************************************/ | |
c420b2dc | 53 | |
ebb945a9 | 54 | int |
05c7145d BS |
55 | nv04_fifo_object_attach(struct nvkm_object *parent, |
56 | struct nvkm_object *object, u32 handle) | |
588d7d12 | 57 | { |
ebb945a9 BS |
58 | struct nv04_fifo_priv *priv = (void *)parent->engine; |
59 | struct nv04_fifo_chan *chan = (void *)parent; | |
60 | u32 context, chid = chan->base.chid; | |
61 | int ret; | |
62 | ||
63 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) | |
64 | context = nv_gpuobj(object)->addr >> 4; | |
65 | else | |
66 | context = 0x00000004; /* just non-zero */ | |
67 | ||
68 | switch (nv_engidx(object->engine)) { | |
69 | case NVDEV_ENGINE_DMAOBJ: | |
70 | case NVDEV_ENGINE_SW: | |
71 | context |= 0x00000000; | |
72 | break; | |
73 | case NVDEV_ENGINE_GR: | |
74 | context |= 0x00010000; | |
75 | break; | |
76 | case NVDEV_ENGINE_MPEG: | |
77 | context |= 0x00020000; | |
78 | break; | |
79 | default: | |
80 | return -EINVAL; | |
588d7d12 FJ |
81 | } |
82 | ||
ebb945a9 BS |
83 | context |= 0x80000000; /* valid */ |
84 | context |= chid << 24; | |
85 | ||
86 | mutex_lock(&nv_subdev(priv)->mutex); | |
05c7145d | 87 | ret = nvkm_ramht_insert(priv->ramht, chid, handle, context); |
ebb945a9 BS |
88 | mutex_unlock(&nv_subdev(priv)->mutex); |
89 | return ret; | |
90 | } | |
91 | ||
92 | void | |
05c7145d | 93 | nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) |
ebb945a9 BS |
94 | { |
95 | struct nv04_fifo_priv *priv = (void *)parent->engine; | |
96 | mutex_lock(&nv_subdev(priv)->mutex); | |
05c7145d | 97 | nvkm_ramht_remove(priv->ramht, cookie); |
ebb945a9 | 98 | mutex_unlock(&nv_subdev(priv)->mutex); |
588d7d12 FJ |
99 | } |
100 | ||
4c2d4222 | 101 | int |
05c7145d BS |
102 | nv04_fifo_context_attach(struct nvkm_object *parent, |
103 | struct nvkm_object *object) | |
4c2d4222 | 104 | { |
05c7145d | 105 | nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; |
4c2d4222 BS |
106 | return 0; |
107 | } | |
108 | ||
c420b2dc | 109 | static int |
05c7145d BS |
110 | nv04_fifo_chan_ctor(struct nvkm_object *parent, |
111 | struct nvkm_object *engine, | |
112 | struct nvkm_oclass *oclass, void *data, u32 size, | |
113 | struct nvkm_object **pobject) | |
6ee73861 | 114 | { |
bbf8906b BS |
115 | union { |
116 | struct nv03_channel_dma_v0 v0; | |
117 | } *args = data; | |
ebb945a9 BS |
118 | struct nv04_fifo_priv *priv = (void *)engine; |
119 | struct nv04_fifo_chan *chan; | |
6ee73861 BS |
120 | int ret; |
121 | ||
bbf8906b BS |
122 | nv_ioctl(parent, "create channel dma size %d\n", size); |
123 | if (nvif_unpack(args->v0, 0, 0, false)) { | |
124 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | |
125 | "offset %016llx\n", args->v0.version, | |
126 | args->v0.pushbuf, args->v0.offset); | |
127 | } else | |
128 | return ret; | |
6ee73861 | 129 | |
05c7145d BS |
130 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, |
131 | 0x10000, args->v0.pushbuf, | |
132 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | |
133 | (1ULL << NVDEV_ENGINE_SW) | | |
134 | (1ULL << NVDEV_ENGINE_GR), &chan); | |
ebb945a9 BS |
135 | *pobject = nv_object(chan); |
136 | if (ret) | |
137 | return ret; | |
70ee6f1c | 138 | |
bbf8906b BS |
139 | args->v0.chid = chan->base.chid; |
140 | ||
ebb945a9 BS |
141 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; |
142 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | |
4c2d4222 | 143 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; |
ebb945a9 | 144 | chan->ramfc = chan->base.chid * 32; |
6ee73861 | 145 | |
bbf8906b BS |
146 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); |
147 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); | |
ebb945a9 BS |
148 | nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); |
149 | nv_wo32(priv->ramfc, chan->ramfc + 0x10, | |
70ee6f1c BS |
150 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
151 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | |
c420b2dc | 152 | #ifdef __BIG_ENDIAN |
70ee6f1c | 153 | NV_PFIFO_CACHE1_BIG_ENDIAN | |
c420b2dc | 154 | #endif |
70ee6f1c | 155 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); |
ebb945a9 BS |
156 | return 0; |
157 | } | |
158 | ||
159 | void | |
05c7145d | 160 | nv04_fifo_chan_dtor(struct nvkm_object *object) |
ebb945a9 BS |
161 | { |
162 | struct nv04_fifo_priv *priv = (void *)object->engine; | |
163 | struct nv04_fifo_chan *chan = (void *)object; | |
164 | struct ramfc_desc *c = priv->ramfc_desc; | |
ff9e5279 | 165 | |
ebb945a9 BS |
166 | do { |
167 | nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000); | |
168 | } while ((++c)->bits); | |
169 | ||
05c7145d | 170 | nvkm_fifo_channel_destroy(&chan->base); |
ebb945a9 | 171 | } |
c420b2dc | 172 | |
ebb945a9 | 173 | int |
05c7145d | 174 | nv04_fifo_chan_init(struct nvkm_object *object) |
ebb945a9 BS |
175 | { |
176 | struct nv04_fifo_priv *priv = (void *)object->engine; | |
177 | struct nv04_fifo_chan *chan = (void *)object; | |
178 | u32 mask = 1 << chan->base.chid; | |
179 | unsigned long flags; | |
180 | int ret; | |
181 | ||
05c7145d | 182 | ret = nvkm_fifo_channel_init(&chan->base); |
c420b2dc | 183 | if (ret) |
ebb945a9 BS |
184 | return ret; |
185 | ||
186 | spin_lock_irqsave(&priv->base.lock, flags); | |
187 | nv_mask(priv, NV04_PFIFO_MODE, mask, mask); | |
188 | spin_unlock_irqrestore(&priv->base.lock, flags); | |
189 | return 0; | |
6ee73861 BS |
190 | } |
191 | ||
ebb945a9 | 192 | int |
05c7145d | 193 | nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) |
6ee73861 | 194 | { |
ebb945a9 BS |
195 | struct nv04_fifo_priv *priv = (void *)object->engine; |
196 | struct nv04_fifo_chan *chan = (void *)object; | |
05c7145d | 197 | struct nvkm_gpuobj *fctx = priv->ramfc; |
ebb945a9 | 198 | struct ramfc_desc *c; |
3945e475 | 199 | unsigned long flags; |
ebb945a9 BS |
200 | u32 data = chan->ramfc; |
201 | u32 chid; | |
6ee73861 | 202 | |
c420b2dc | 203 | /* prevent fifo context switches */ |
ebb945a9 BS |
204 | spin_lock_irqsave(&priv->base.lock, flags); |
205 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); | |
3945e475 | 206 | |
c420b2dc | 207 | /* if this channel is active, replace it with a null context */ |
ebb945a9 BS |
208 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
209 | if (chid == chan->base.chid) { | |
210 | nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); | |
211 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0); | |
212 | nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); | |
c420b2dc | 213 | |
ebb945a9 | 214 | c = priv->ramfc_desc; |
c420b2dc | 215 | do { |
ebb945a9 BS |
216 | u32 rm = ((1ULL << c->bits) - 1) << c->regs; |
217 | u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; | |
218 | u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs; | |
219 | u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); | |
220 | nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); | |
221 | } while ((++c)->bits); | |
222 | ||
223 | c = priv->ramfc_desc; | |
224 | do { | |
225 | nv_wr32(priv, c->regp, 0x00000000); | |
c420b2dc BS |
226 | } while ((++c)->bits); |
227 | ||
ebb945a9 BS |
228 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0); |
229 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0); | |
230 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); | |
231 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); | |
232 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | |
3945e475 FJ |
233 | } |
234 | ||
c420b2dc | 235 | /* restore normal operation, after disabling dma mode */ |
ebb945a9 BS |
236 | nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); |
237 | nv_wr32(priv, NV03_PFIFO_CACHES, 1); | |
238 | spin_unlock_irqrestore(&priv->base.lock, flags); | |
239 | ||
05c7145d | 240 | return nvkm_fifo_channel_fini(&chan->base, suspend); |
6ee73861 BS |
241 | } |
242 | ||
05c7145d | 243 | static struct nvkm_ofuncs |
ebb945a9 BS |
244 | nv04_fifo_ofuncs = { |
245 | .ctor = nv04_fifo_chan_ctor, | |
246 | .dtor = nv04_fifo_chan_dtor, | |
247 | .init = nv04_fifo_chan_init, | |
248 | .fini = nv04_fifo_chan_fini, | |
05c7145d BS |
249 | .map = _nvkm_fifo_channel_map, |
250 | .rd32 = _nvkm_fifo_channel_rd32, | |
251 | .wr32 = _nvkm_fifo_channel_wr32, | |
252 | .ntfy = _nvkm_fifo_channel_ntfy | |
ebb945a9 BS |
253 | }; |
254 | ||
05c7145d | 255 | static struct nvkm_oclass |
ebb945a9 | 256 | nv04_fifo_sclass[] = { |
bbf8906b | 257 | { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, |
ebb945a9 BS |
258 | {} |
259 | }; | |
260 | ||
261 | /******************************************************************************* | |
262 | * FIFO context - basically just the instmem reserved for the channel | |
263 | ******************************************************************************/ | |
264 | ||
6ee73861 | 265 | int |
05c7145d BS |
266 | nv04_fifo_context_ctor(struct nvkm_object *parent, |
267 | struct nvkm_object *engine, | |
268 | struct nvkm_oclass *oclass, void *data, u32 size, | |
269 | struct nvkm_object **pobject) | |
6ee73861 | 270 | { |
ebb945a9 BS |
271 | struct nv04_fifo_base *base; |
272 | int ret; | |
6ee73861 | 273 | |
05c7145d BS |
274 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, |
275 | 0x1000, NVOBJ_FLAG_HEAP, &base); | |
ebb945a9 BS |
276 | *pobject = nv_object(base); |
277 | if (ret) | |
278 | return ret; | |
6ee73861 | 279 | |
ebb945a9 BS |
280 | return 0; |
281 | } | |
6ee73861 | 282 | |
05c7145d | 283 | static struct nvkm_oclass |
ebb945a9 BS |
284 | nv04_fifo_cclass = { |
285 | .handle = NV_ENGCTX(FIFO, 0x04), | |
05c7145d | 286 | .ofuncs = &(struct nvkm_ofuncs) { |
ebb945a9 | 287 | .ctor = nv04_fifo_context_ctor, |
05c7145d BS |
288 | .dtor = _nvkm_fifo_context_dtor, |
289 | .init = _nvkm_fifo_context_init, | |
290 | .fini = _nvkm_fifo_context_fini, | |
291 | .rd32 = _nvkm_fifo_context_rd32, | |
292 | .wr32 = _nvkm_fifo_context_wr32, | |
ebb945a9 BS |
293 | }, |
294 | }; | |
6ee73861 | 295 | |
ebb945a9 BS |
296 | /******************************************************************************* |
297 | * PFIFO engine | |
298 | ******************************************************************************/ | |
6ee73861 | 299 | |
ebb945a9 | 300 | void |
05c7145d | 301 | nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags) |
ebb945a9 BS |
302 | __acquires(priv->base.lock) |
303 | { | |
304 | struct nv04_fifo_priv *priv = (void *)pfifo; | |
305 | unsigned long flags; | |
6ee73861 | 306 | |
ebb945a9 BS |
307 | spin_lock_irqsave(&priv->base.lock, flags); |
308 | *pflags = flags; | |
309 | ||
310 | nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); | |
311 | nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); | |
312 | ||
313 | /* in some cases the puller may be left in an inconsistent state | |
314 | * if you try to stop it while it's busy translating handles. | |
315 | * sometimes you get a CACHE_ERROR, sometimes it just fails | |
316 | * silently; sending incorrect instance offsets to PGRAPH after | |
317 | * it's started up again. | |
318 | * | |
319 | * to avoid this, we invalidate the most recently calculated | |
320 | * instance. | |
321 | */ | |
322 | if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, | |
323 | NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) | |
324 | nv_warn(priv, "timeout idling puller\n"); | |
325 | ||
326 | if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & | |
327 | NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) | |
328 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); | |
329 | ||
330 | nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); | |
331 | } | |
6ee73861 | 332 | |
ebb945a9 | 333 | void |
05c7145d | 334 | nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags) |
ebb945a9 BS |
335 | __releases(priv->base.lock) |
336 | { | |
337 | struct nv04_fifo_priv *priv = (void *)pfifo; | |
338 | unsigned long flags = *pflags; | |
6ee73861 | 339 | |
ebb945a9 BS |
340 | nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); |
341 | nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); | |
342 | ||
343 | spin_unlock_irqrestore(&priv->base.lock, flags); | |
6ee73861 BS |
344 | } |
345 | ||
ebb945a9 BS |
346 | static const char * |
347 | nv_dma_state_err(u32 state) | |
5178d40d | 348 | { |
ebb945a9 BS |
349 | static const char * const desc[] = { |
350 | "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", | |
351 | "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" | |
352 | }; | |
353 | return desc[(state >> 29) & 0x7]; | |
5178d40d BS |
354 | } |
355 | ||
356 | static bool | |
ebb945a9 | 357 | nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) |
5178d40d | 358 | { |
ebb945a9 | 359 | struct nv04_fifo_chan *chan = NULL; |
05c7145d | 360 | struct nvkm_handle *bind; |
5178d40d BS |
361 | const int subc = (addr >> 13) & 0x7; |
362 | const int mthd = addr & 0x1ffc; | |
363 | bool handled = false; | |
ebb945a9 | 364 | unsigned long flags; |
5178d40d BS |
365 | u32 engine; |
366 | ||
ebb945a9 BS |
367 | spin_lock_irqsave(&priv->base.lock, flags); |
368 | if (likely(chid >= priv->base.min && chid <= priv->base.max)) | |
369 | chan = (void *)priv->base.channel[chid]; | |
5178d40d BS |
370 | if (unlikely(!chan)) |
371 | goto out; | |
372 | ||
373 | switch (mthd) { | |
ebb945a9 | 374 | case 0x0000: |
05c7145d | 375 | bind = nvkm_namedb_get(nv_namedb(chan), data); |
ebb945a9 | 376 | if (unlikely(!bind)) |
5178d40d BS |
377 | break; |
378 | ||
ebb945a9 BS |
379 | if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { |
380 | engine = 0x0000000f << (subc * 4); | |
381 | chan->subc[subc] = data; | |
382 | handled = true; | |
383 | ||
384 | nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0); | |
385 | } | |
5178d40d | 386 | |
05c7145d | 387 | nvkm_namedb_put(bind); |
5178d40d BS |
388 | break; |
389 | default: | |
ebb945a9 | 390 | engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE); |
5178d40d BS |
391 | if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) |
392 | break; | |
393 | ||
05c7145d | 394 | bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]); |
ebb945a9 BS |
395 | if (likely(bind)) { |
396 | if (!nv_call(bind->object, mthd, data)) | |
397 | handled = true; | |
05c7145d | 398 | nvkm_namedb_put(bind); |
ebb945a9 | 399 | } |
5178d40d BS |
400 | break; |
401 | } | |
402 | ||
403 | out: | |
ebb945a9 | 404 | spin_unlock_irqrestore(&priv->base.lock, flags); |
5178d40d BS |
405 | return handled; |
406 | } | |
407 | ||
fc10199e | 408 | static void |
05c7145d BS |
409 | nv04_fifo_cache_error(struct nvkm_device *device, |
410 | struct nv04_fifo_priv *priv, u32 chid, u32 get) | |
fc10199e MS |
411 | { |
412 | u32 mthd, data; | |
413 | int ptr; | |
414 | ||
415 | /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my | |
416 | * G80 chips, but CACHE1 isn't big enough for this much data.. Tests | |
417 | * show that it wraps around to the start at GET=0x800.. No clue as to | |
418 | * why.. | |
419 | */ | |
420 | ptr = (get & 0x7ff) >> 2; | |
421 | ||
422 | if (device->card_type < NV_40) { | |
423 | mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); | |
424 | data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); | |
425 | } else { | |
426 | mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); | |
427 | data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); | |
428 | } | |
429 | ||
430 | if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { | |
93260d3c | 431 | const char *client_name = |
05c7145d | 432 | nvkm_client_name_for_fifo_chid(&priv->base, chid); |
fc10199e | 433 | nv_error(priv, |
93260d3c MS |
434 | "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", |
435 | chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, | |
436 | data); | |
fc10199e MS |
437 | } |
438 | ||
439 | nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); | |
440 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); | |
441 | ||
442 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, | |
443 | nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); | |
444 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); | |
445 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, | |
446 | nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); | |
447 | nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); | |
448 | ||
449 | nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, | |
450 | nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); | |
451 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | |
452 | } | |
453 | ||
454 | static void | |
05c7145d BS |
455 | nv04_fifo_dma_pusher(struct nvkm_device *device, |
456 | struct nv04_fifo_priv *priv, u32 chid) | |
fc10199e | 457 | { |
93260d3c | 458 | const char *client_name; |
fc10199e MS |
459 | u32 dma_get = nv_rd32(priv, 0x003244); |
460 | u32 dma_put = nv_rd32(priv, 0x003240); | |
461 | u32 push = nv_rd32(priv, 0x003220); | |
462 | u32 state = nv_rd32(priv, 0x003228); | |
463 | ||
05c7145d | 464 | client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid); |
93260d3c | 465 | |
fc10199e MS |
466 | if (device->card_type == NV_50) { |
467 | u32 ho_get = nv_rd32(priv, 0x003328); | |
468 | u32 ho_put = nv_rd32(priv, 0x003320); | |
469 | u32 ib_get = nv_rd32(priv, 0x003334); | |
470 | u32 ib_put = nv_rd32(priv, 0x003330); | |
471 | ||
472 | nv_error(priv, | |
93260d3c MS |
473 | "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", |
474 | chid, client_name, ho_get, dma_get, ho_put, dma_put, | |
475 | ib_get, ib_put, state, nv_dma_state_err(state), push); | |
fc10199e MS |
476 | |
477 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | |
478 | nv_wr32(priv, 0x003364, 0x00000000); | |
479 | if (dma_get != dma_put || ho_get != ho_put) { | |
480 | nv_wr32(priv, 0x003244, dma_put); | |
481 | nv_wr32(priv, 0x003328, ho_put); | |
482 | } else | |
483 | if (ib_get != ib_put) | |
484 | nv_wr32(priv, 0x003334, ib_put); | |
485 | } else { | |
486 | nv_error(priv, | |
93260d3c MS |
487 | "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", |
488 | chid, client_name, dma_get, dma_put, state, | |
489 | nv_dma_state_err(state), push); | |
fc10199e MS |
490 | |
491 | if (dma_get != dma_put) | |
492 | nv_wr32(priv, 0x003244, dma_put); | |
493 | } | |
494 | ||
495 | nv_wr32(priv, 0x003228, 0x00000000); | |
496 | nv_wr32(priv, 0x003220, 0x00000001); | |
497 | nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); | |
498 | } | |
499 | ||
5178d40d | 500 | void |
05c7145d | 501 | nv04_fifo_intr(struct nvkm_subdev *subdev) |
5178d40d | 502 | { |
05c7145d | 503 | struct nvkm_device *device = nv_device(subdev); |
ebb945a9 | 504 | struct nv04_fifo_priv *priv = (void *)subdev; |
adc346b1 BS |
505 | u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); |
506 | u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; | |
507 | u32 reassign, chid, get, sem; | |
5178d40d | 508 | |
ebb945a9 | 509 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; |
adc346b1 | 510 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); |
5178d40d | 511 | |
adc346b1 BS |
512 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
513 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); | |
5178d40d | 514 | |
adc346b1 BS |
515 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { |
516 | nv04_fifo_cache_error(device, priv, chid, get); | |
517 | stat &= ~NV_PFIFO_INTR_CACHE_ERROR; | |
518 | } | |
5178d40d | 519 | |
adc346b1 BS |
520 | if (stat & NV_PFIFO_INTR_DMA_PUSHER) { |
521 | nv04_fifo_dma_pusher(device, priv, chid); | |
522 | stat &= ~NV_PFIFO_INTR_DMA_PUSHER; | |
523 | } | |
5178d40d | 524 | |
adc346b1 BS |
525 | if (stat & NV_PFIFO_INTR_SEMAPHORE) { |
526 | stat &= ~NV_PFIFO_INTR_SEMAPHORE; | |
527 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); | |
5178d40d | 528 | |
adc346b1 BS |
529 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); |
530 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | |
5178d40d | 531 | |
adc346b1 BS |
532 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); |
533 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | |
534 | } | |
5178d40d | 535 | |
adc346b1 BS |
536 | if (device->card_type == NV_50) { |
537 | if (stat & 0x00000010) { | |
538 | stat &= ~0x00000010; | |
539 | nv_wr32(priv, 0x002100, 0x00000010); | |
5178d40d BS |
540 | } |
541 | ||
adc346b1 BS |
542 | if (stat & 0x40000000) { |
543 | nv_wr32(priv, 0x002100, 0x40000000); | |
544 | nvkm_fifo_uevent(&priv->base); | |
545 | stat &= ~0x40000000; | |
5178d40d | 546 | } |
5178d40d BS |
547 | } |
548 | ||
adc346b1 BS |
549 | if (stat) { |
550 | nv_warn(priv, "unknown intr 0x%08x\n", stat); | |
551 | nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); | |
552 | nv_wr32(priv, NV03_PFIFO_INTR_0, stat); | |
5178d40d BS |
553 | } |
554 | ||
adc346b1 | 555 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); |
5178d40d | 556 | } |
c420b2dc | 557 | |
ebb945a9 | 558 | static int |
05c7145d BS |
559 | nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
560 | struct nvkm_oclass *oclass, void *data, u32 size, | |
561 | struct nvkm_object **pobject) | |
c420b2dc | 562 | { |
ebb945a9 BS |
563 | struct nv04_instmem_priv *imem = nv04_instmem(parent); |
564 | struct nv04_fifo_priv *priv; | |
565 | int ret; | |
c420b2dc | 566 | |
05c7145d | 567 | ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv); |
ebb945a9 BS |
568 | *pobject = nv_object(priv); |
569 | if (ret) | |
570 | return ret; | |
571 | ||
05c7145d BS |
572 | nvkm_ramht_ref(imem->ramht, &priv->ramht); |
573 | nvkm_gpuobj_ref(imem->ramro, &priv->ramro); | |
574 | nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); | |
ebb945a9 BS |
575 | |
576 | nv_subdev(priv)->unit = 0x00000100; | |
577 | nv_subdev(priv)->intr = nv04_fifo_intr; | |
578 | nv_engine(priv)->cclass = &nv04_fifo_cclass; | |
579 | nv_engine(priv)->sclass = nv04_fifo_sclass; | |
580 | priv->base.pause = nv04_fifo_pause; | |
581 | priv->base.start = nv04_fifo_start; | |
582 | priv->ramfc_desc = nv04_ramfc; | |
583 | return 0; | |
584 | } | |
c420b2dc | 585 | |
ebb945a9 | 586 | void |
05c7145d | 587 | nv04_fifo_dtor(struct nvkm_object *object) |
ebb945a9 BS |
588 | { |
589 | struct nv04_fifo_priv *priv = (void *)object; | |
05c7145d BS |
590 | nvkm_gpuobj_ref(NULL, &priv->ramfc); |
591 | nvkm_gpuobj_ref(NULL, &priv->ramro); | |
592 | nvkm_ramht_ref(NULL, &priv->ramht); | |
593 | nvkm_fifo_destroy(&priv->base); | |
c420b2dc BS |
594 | } |
595 | ||
596 | int | |
05c7145d | 597 | nv04_fifo_init(struct nvkm_object *object) |
c420b2dc | 598 | { |
ebb945a9 BS |
599 | struct nv04_fifo_priv *priv = (void *)object; |
600 | int ret; | |
601 | ||
05c7145d | 602 | ret = nvkm_fifo_init(&priv->base); |
ebb945a9 BS |
603 | if (ret) |
604 | return ret; | |
c420b2dc | 605 | |
ebb945a9 BS |
606 | nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); |
607 | nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); | |
c420b2dc | 608 | |
ebb945a9 BS |
609 | nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | |
610 | ((priv->ramht->bits - 9) << 16) | | |
587f7a5b | 611 | (priv->ramht->gpuobj.addr >> 8)); |
ebb945a9 BS |
612 | nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); |
613 | nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); | |
5787640d | 614 | |
ebb945a9 | 615 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); |
c420b2dc | 616 | |
ebb945a9 BS |
617 | nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); |
618 | nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); | |
619 | ||
620 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); | |
621 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | |
622 | nv_wr32(priv, NV03_PFIFO_CACHES, 1); | |
c420b2dc BS |
623 | return 0; |
624 | } | |
ebb945a9 | 625 | |
05c7145d BS |
626 | struct nvkm_oclass * |
627 | nv04_fifo_oclass = &(struct nvkm_oclass) { | |
ebb945a9 | 628 | .handle = NV_ENGINE(FIFO, 0x04), |
05c7145d | 629 | .ofuncs = &(struct nvkm_ofuncs) { |
ebb945a9 BS |
630 | .ctor = nv04_fifo_ctor, |
631 | .dtor = nv04_fifo_dtor, | |
632 | .init = nv04_fifo_init, | |
05c7145d | 633 | .fini = _nvkm_fifo_fini, |
ebb945a9 BS |
634 | }, |
635 | }; |