2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "changf100.h"
26 #include <core/client.h>
27 #include <subdev/fb.h>
28 #include <subdev/timer.h>
30 #include <nvif/class.h>
31 #include <nvif/unpack.h>
34 gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
35 struct nvkm_object *object)
37 struct gf100_fifo *fifo = (void *)parent->engine;
38 struct gf100_fifo_base *base = (void *)parent->parent;
39 struct gf100_fifo_chan *chan = (void *)parent;
40 struct nvkm_gpuobj *engn = &base->base.gpuobj;
41 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
42 struct nvkm_device *device = subdev->device;
45 switch (nv_engidx(object->engine)) {
46 case NVDEV_ENGINE_SW : return 0;
47 case NVDEV_ENGINE_GR : addr = 0x0210; break;
48 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
49 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
50 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
51 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
52 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
57 nvkm_wr32(device, 0x002634, chan->base.chid);
58 if (nvkm_msec(device, 2000,
59 if (nvkm_rd32(device, 0x002634) == chan->base.chid)
62 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
63 chan->base.chid, nvkm_client_name(chan));
69 nvkm_wo32(engn, addr + 0x00, 0x00000000);
70 nvkm_wo32(engn, addr + 0x04, 0x00000000);
76 gf100_fifo_context_attach(struct nvkm_object *parent,
77 struct nvkm_object *object)
79 struct gf100_fifo_base *base = (void *)parent->parent;
80 struct nvkm_gpuobj *engn = &base->base.gpuobj;
81 struct nvkm_engctx *ectx = (void *)object;
85 switch (nv_engidx(object->engine)) {
86 case NVDEV_ENGINE_SW : return 0;
87 case NVDEV_ENGINE_GR : addr = 0x0210; break;
88 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
89 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
90 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
91 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
92 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
97 if (!ectx->vma.node) {
98 ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
99 NV_MEM_ACCESS_RW, &ectx->vma);
103 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
107 nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
108 nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
114 gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
116 struct gf100_fifo *fifo = (void *)object->engine;
117 struct gf100_fifo_chan *chan = (void *)object;
118 struct nvkm_device *device = fifo->base.engine.subdev.device;
119 u32 chid = chan->base.chid;
121 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
122 nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
123 gf100_fifo_runlist_update(fifo);
126 gf100_fifo_intr_engine(fifo);
128 nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
129 return nvkm_fifo_channel_fini(&chan->base, suspend);
133 gf100_fifo_chan_init(struct nvkm_object *object)
135 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
136 struct gf100_fifo *fifo = (void *)object->engine;
137 struct gf100_fifo_chan *chan = (void *)object;
138 struct nvkm_device *device = fifo->base.engine.subdev.device;
139 u32 chid = chan->base.chid;
142 ret = nvkm_fifo_channel_init(&chan->base);
146 nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
148 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
149 nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
150 gf100_fifo_runlist_update(fifo);
157 gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
158 struct nvkm_oclass *oclass, void *data, u32 size,
159 struct nvkm_object **pobject)
162 struct fermi_channel_gpfifo_v0 v0;
164 struct gf100_fifo *fifo = (void *)engine;
165 struct gf100_fifo_base *base = (void *)parent;
166 struct gf100_fifo_chan *chan;
167 struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
168 u64 usermem, ioffset, ilength;
171 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
172 if (nvif_unpack(args->v0, 0, 0, false)) {
173 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx"
174 "ioffset %016llx ilength %08x\n",
175 args->v0.version, args->v0.vm, args->v0.ioffset,
182 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
183 fifo->user.bar.offset, 0x1000, 0,
184 (1ULL << NVDEV_ENGINE_SW) |
185 (1ULL << NVDEV_ENGINE_GR) |
186 (1ULL << NVDEV_ENGINE_CE0) |
187 (1ULL << NVDEV_ENGINE_CE1) |
188 (1ULL << NVDEV_ENGINE_MSVLD) |
189 (1ULL << NVDEV_ENGINE_MSPDEC) |
190 (1ULL << NVDEV_ENGINE_MSPPP), &chan);
191 *pobject = nv_object(chan);
195 chan->base.inst = base->base.gpuobj.addr;
196 args->v0.chid = chan->base.chid;
198 nv_parent(chan)->context_attach = gf100_fifo_context_attach;
199 nv_parent(chan)->context_detach = gf100_fifo_context_detach;
201 usermem = chan->base.chid * 0x1000;
202 ioffset = args->v0.ioffset;
203 ilength = order_base_2(args->v0.ilength / 8);
205 nvkm_kmap(fifo->user.mem);
206 for (i = 0; i < 0x1000; i += 4)
207 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
208 nvkm_done(fifo->user.mem);
209 usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
212 nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
213 nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
214 nvkm_wo32(ramfc, 0x10, 0x0000face);
215 nvkm_wo32(ramfc, 0x30, 0xfffff902);
216 nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
217 nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
218 nvkm_wo32(ramfc, 0x54, 0x00000002);
219 nvkm_wo32(ramfc, 0x84, 0x20400000);
220 nvkm_wo32(ramfc, 0x94, 0x30000001);
221 nvkm_wo32(ramfc, 0x9c, 0x00000100);
222 nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
223 nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
224 nvkm_wo32(ramfc, 0xac, 0x0000001f);
225 nvkm_wo32(ramfc, 0xb8, 0xf8000000);
226 nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
227 nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
232 static struct nvkm_ofuncs
233 gf100_fifo_ofuncs = {
234 .ctor = gf100_fifo_chan_ctor,
235 .dtor = _nvkm_fifo_channel_dtor,
236 .init = gf100_fifo_chan_init,
237 .fini = gf100_fifo_chan_fini,
238 .map = _nvkm_fifo_channel_map,
239 .rd32 = _nvkm_fifo_channel_rd32,
240 .wr32 = _nvkm_fifo_channel_wr32,
241 .ntfy = _nvkm_fifo_channel_ntfy
245 gf100_fifo_sclass[] = {
246 { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
251 gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
252 struct nvkm_oclass *oclass, void *data, u32 size,
253 struct nvkm_object **pobject)
255 struct nvkm_device *device = nv_engine(engine)->subdev.device;
256 struct gf100_fifo_base *base;
259 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
260 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
261 NVOBJ_FLAG_HEAP, &base);
262 *pobject = nv_object(base);
266 ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
270 nvkm_kmap(&base->base.gpuobj);
271 nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
272 nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
273 nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
274 nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
275 nvkm_done(&base->base.gpuobj);
277 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
285 gf100_fifo_context_dtor(struct nvkm_object *object)
287 struct gf100_fifo_base *base = (void *)object;
288 nvkm_vm_ref(NULL, &base->vm, base->pgd);
289 nvkm_gpuobj_del(&base->pgd);
290 nvkm_fifo_context_destroy(&base->base);
294 gf100_fifo_cclass = {
295 .handle = NV_ENGCTX(FIFO, 0xc0),
296 .ofuncs = &(struct nvkm_ofuncs) {
297 .ctor = gf100_fifo_context_ctor,
298 .dtor = gf100_fifo_context_dtor,
299 .init = _nvkm_fifo_context_init,
300 .fini = _nvkm_fifo_context_fini,
301 .rd32 = _nvkm_fifo_context_rd32,
302 .wr32 = _nvkm_fifo_context_wr32,