2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
31 #include "changk104.h"
33 #include <core/gpuobj.h>
34 #include <subdev/bar.h>
35 #include <subdev/mc.h>
36 #include <subdev/top.h>
38 #include <nvif/class.h>
41 gk104_chan_stop(struct nvkm_chan *chan)
43 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
45 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
49 gk104_chan_start(struct nvkm_chan *chan)
51 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
53 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
57 gk104_chan_unbind(struct nvkm_chan *chan)
59 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
61 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000);
65 gk104_chan_bind_inst(struct nvkm_chan *chan)
67 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
69 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12);
73 gk104_chan_bind(struct nvkm_chan *chan)
75 struct nvkm_runl *runl = chan->cgrp->runl;
76 struct nvkm_device *device = runl->fifo->engine.subdev.device;
78 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16);
79 gk104_chan_bind_inst(chan);
82 static const struct nvkm_chan_func
84 .bind = gk104_chan_bind,
85 .unbind = gk104_chan_unbind,
86 .start = gk104_chan_start,
87 .stop = gk104_chan_stop,
88 .preempt = gf100_chan_preempt,
91 /*TODO: clean this up */
92 struct gk104_engn_status {
105 gk104_engn_status(struct nvkm_engn *engn, struct gk104_engn_status *status)
107 u32 stat = nvkm_rd32(engn->runl->fifo->engine.subdev.device, 0x002640 + (engn->id * 0x08));
109 status->busy = !!(stat & 0x80000000);
110 status->faulted = !!(stat & 0x40000000);
111 status->next.tsg = !!(stat & 0x10000000);
112 status->next.id = (stat & 0x0fff0000) >> 16;
113 status->chsw = !!(stat & 0x00008000);
114 status->save = !!(stat & 0x00004000);
115 status->load = !!(stat & 0x00002000);
116 status->prev.tsg = !!(stat & 0x00001000);
117 status->prev.id = (stat & 0x00000fff);
120 if (status->busy && status->chsw) {
121 if (status->load && status->save) {
122 if (nvkm_engine_chsw_load(engn->engine))
123 status->chan = &status->next;
125 status->chan = &status->prev;
128 status->chan = &status->next;
130 status->chan = &status->prev;
134 status->chan = &status->prev;
137 ENGN_DEBUG(engn, "%08x: busy %d faulted %d chsw %d save %d load %d %sid %d%s-> %sid %d%s",
138 stat, status->busy, status->faulted, status->chsw, status->save, status->load,
139 status->prev.tsg ? "tsg" : "ch", status->prev.id,
140 status->chan == &status->prev ? "*" : " ",
141 status->next.tsg ? "tsg" : "ch", status->next.id,
142 status->chan == &status->next ? "*" : " ");
146 gk104_engn_cxid(struct nvkm_engn *engn, bool *cgid)
148 struct gk104_engn_status status;
150 gk104_engn_status(engn, &status);
152 *cgid = status.chan->tsg;
153 return status.chan->id;
160 gk104_engn_chsw(struct nvkm_engn *engn)
162 struct gk104_engn_status status;
164 gk104_engn_status(engn, &status);
165 if (status.busy && status.chsw)
171 const struct nvkm_engn_func
173 .chsw = gk104_engn_chsw,
174 .cxid = gk104_engn_cxid,
175 .mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
176 .mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
179 const struct nvkm_engn_func
181 .chsw = gk104_engn_chsw,
182 .cxid = gk104_engn_cxid,
183 .mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
184 .mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
188 gk104_runq_idle(struct nvkm_runq *runq)
190 struct nvkm_device *device = runq->fifo->engine.subdev.device;
192 return !(nvkm_rd32(device, 0x003080 + (runq->id * 4)) & 0x0000e000);
195 static const struct nvkm_bitfield
196 gk104_runq_intr_1_names[] = {
197 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
198 { 0x00000002, "HCE_RE_ALIGNB" },
199 { 0x00000004, "HCE_PRIV" },
200 { 0x00000008, "HCE_ILLEGAL_MTHD" },
201 { 0x00000010, "HCE_ILLEGAL_CLASS" },
206 gk104_runq_intr_1(struct nvkm_runq *runq)
208 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev;
209 struct nvkm_device *device = subdev->device;
210 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000));
211 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask;
212 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff;
215 if (stat & 0x80000000) {
216 if (runq->func->intr_1_ctxnotvalid &&
217 runq->func->intr_1_ctxnotvalid(runq, chid))
222 nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat);
223 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
224 runq->id, stat, msg, chid,
225 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)),
226 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000)));
229 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat);
233 const struct nvkm_bitfield
234 gk104_runq_intr_0_names[] = {
235 { 0x00000001, "MEMREQ" },
236 { 0x00000002, "MEMACK_TIMEOUT" },
237 { 0x00000004, "MEMACK_EXTRA" },
238 { 0x00000008, "MEMDAT_TIMEOUT" },
239 { 0x00000010, "MEMDAT_EXTRA" },
240 { 0x00000020, "MEMFLUSH" },
241 { 0x00000040, "MEMOP" },
242 { 0x00000080, "LBCONNECT" },
243 { 0x00000100, "LBREQ" },
244 { 0x00000200, "LBACK_TIMEOUT" },
245 { 0x00000400, "LBACK_EXTRA" },
246 { 0x00000800, "LBDAT_TIMEOUT" },
247 { 0x00001000, "LBDAT_EXTRA" },
248 { 0x00002000, "GPFIFO" },
249 { 0x00004000, "GPPTR" },
250 { 0x00008000, "GPENTRY" },
251 { 0x00010000, "GPCRC" },
252 { 0x00020000, "PBPTR" },
253 { 0x00040000, "PBENTRY" },
254 { 0x00080000, "PBCRC" },
255 { 0x00100000, "XBARCONNECT" },
256 { 0x00200000, "METHOD" },
257 { 0x00400000, "METHODCRC" },
258 { 0x00800000, "DEVICE" },
259 { 0x02000000, "SEMAPHORE" },
260 { 0x04000000, "ACQUIRE" },
261 { 0x08000000, "PRI" },
262 { 0x20000000, "NO_CTXSW_SEG" },
263 { 0x40000000, "PBSEG" },
264 { 0x80000000, "SIGNATURE" },
269 gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null)
271 bool intr0 = gf100_runq_intr(runq, NULL);
272 bool intr1 = gk104_runq_intr_1(runq);
274 return intr0 || intr1;
278 gk104_runq_init(struct nvkm_runq *runq)
280 struct nvkm_device *device = runq->fifo->engine.subdev.device;
282 gf100_runq_init(runq);
284 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */
285 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */
289 gk104_runq_runm(struct nvkm_runq *runq)
291 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04));
294 const struct nvkm_runq_func
296 .init = gk104_runq_init,
297 .intr = gk104_runq_intr,
298 .intr_0_names = gk104_runq_intr_0_names,
299 .idle = gk104_runq_idle,
303 gk104_runl_fault_clear(struct nvkm_runl *runl)
305 nvkm_wr32(runl->fifo->engine.subdev.device, 0x00262c, BIT(runl->id));
309 gk104_runl_allow(struct nvkm_runl *runl, u32 engm)
311 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000);
315 gk104_runl_block(struct nvkm_runl *runl, u32 engm)
317 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id));
321 gk104_runl_pending(struct nvkm_runl *runl)
323 struct nvkm_device *device = runl->fifo->engine.subdev.device;
325 return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000;
329 gk104_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
331 struct nvkm_fifo *fifo = runl->fifo;
332 struct nvkm_device *device = fifo->engine.subdev.device;
333 u64 addr = nvkm_memory_addr(memory) + start;
336 switch (nvkm_memory_target(memory)) {
337 case NVKM_MEM_TARGET_VRAM: target = 0; break;
338 case NVKM_MEM_TARGET_NCOH: target = 3; break;
344 spin_lock_irq(&fifo->lock);
345 nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
346 nvkm_wr32(device, 0x002274, (runl->id << 20) | count);
347 spin_unlock_irq(&fifo->lock);
351 gk104_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
353 nvkm_wo32(memory, offset + 0, chan->id);
354 nvkm_wo32(memory, offset + 4, 0x00000000);
357 static const struct nvkm_runl_func
360 .update = nv50_runl_update,
361 .insert_chan = gk104_runl_insert_chan,
362 .commit = gk104_runl_commit,
363 .wait = nv50_runl_wait,
364 .pending = gk104_runl_pending,
365 .block = gk104_runl_block,
366 .allow = gk104_runl_allow,
367 .fault_clear = gk104_runl_fault_clear,
368 .preempt_pending = gf100_runl_preempt_pending,
372 gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
374 struct gk104_fifo *fifo = gk104_fifo(base);
377 if (engine->subdev.type == NVKM_ENGINE_SW)
378 return GK104_FIFO_ENGN_SW;
380 for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
381 if (fifo->engine[engn].engine == engine)
389 static const struct nvkm_enum
390 gk104_fifo_mmu_fault_engine[] = {
391 { 0x00, "GR", NULL, NVKM_ENGINE_GR },
394 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
395 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
396 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
407 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
408 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
410 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
411 { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
412 { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
415 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
416 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
420 const struct nvkm_enum
421 gk104_fifo_mmu_fault_reason[] = {
423 { 0x01, "PDE_SIZE" },
425 { 0x03, "VA_LIMIT_VIOLATION" },
426 { 0x04, "UNBOUND_INST_BLOCK" },
427 { 0x05, "PRIV_VIOLATION" },
428 { 0x06, "RO_VIOLATION" },
429 { 0x07, "WO_VIOLATION" },
430 { 0x08, "PITCH_MASK_VIOLATION" },
431 { 0x09, "WORK_CREATION" },
432 { 0x0a, "UNSUPPORTED_APERTURE" },
433 { 0x0b, "COMPRESSION_FAILURE" },
434 { 0x0c, "UNSUPPORTED_KIND" },
435 { 0x0d, "REGION_VIOLATION" },
436 { 0x0e, "BOTH_PTES_VALID" },
437 { 0x0f, "INFO_TYPE_POISONED" },
441 const struct nvkm_enum
442 gk104_fifo_mmu_fault_hubclient[] = {
450 { 0x07, "HOST_CPU" },
451 { 0x08, "HOST_CPU_NB" },
462 { 0x13, "RASTERTWOD" },
478 const struct nvkm_enum
479 gk104_fifo_mmu_fault_gpcclient[] = {
480 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
481 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
482 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
483 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
491 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
492 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
493 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
494 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
496 { 0x20, "LTP_UTLB_0" },
497 { 0x21, "LTP_UTLB_1" },
498 { 0x22, "LTP_UTLB_2" },
499 { 0x23, "LTP_UTLB_3" },
500 { 0x24, "GPC_RGG_UTLB" },
504 const struct nvkm_fifo_func_mmu_fault
505 gk104_fifo_mmu_fault = {
506 .recover = gf100_fifo_mmu_fault_recover,
507 .access = gf100_fifo_mmu_fault_access,
508 .engine = gk104_fifo_mmu_fault_engine,
509 .reason = gk104_fifo_mmu_fault_reason,
510 .hubclient = gk104_fifo_mmu_fault_hubclient,
511 .gpcclient = gk104_fifo_mmu_fault_gpcclient,
514 static const struct nvkm_enum
515 gk104_fifo_intr_bind_reason[] = {
516 { 0x01, "BIND_NOT_UNBOUND" },
517 { 0x02, "SNOOP_WITHOUT_BAR1" },
518 { 0x03, "UNBIND_WHILE_RUNNING" },
519 { 0x05, "INVALID_RUNLIST" },
520 { 0x06, "INVALID_CTX_TGT" },
521 { 0x0b, "UNBIND_WHILE_PARKED" },
526 gk104_fifo_intr_bind(struct nvkm_fifo *fifo)
528 struct nvkm_subdev *subdev = &fifo->engine.subdev;
529 u32 intr = nvkm_rd32(subdev->device, 0x00252c);
530 u32 code = intr & 0x000000ff;
531 const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code);
533 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
537 gk104_fifo_intr_chsw(struct nvkm_fifo *fifo)
539 struct nvkm_subdev *subdev = &fifo->engine.subdev;
540 struct nvkm_device *device = subdev->device;
541 u32 stat = nvkm_rd32(device, 0x00256c);
543 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
544 nvkm_wr32(device, 0x00256c, stat);
548 gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo)
550 struct nvkm_subdev *subdev = &fifo->engine.subdev;
551 u32 stat = nvkm_rd32(subdev->device, 0x00259c);
553 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
557 gk104_fifo_intr_runlist(struct nvkm_fifo *fifo)
559 struct nvkm_device *device = fifo->engine.subdev.device;
560 struct nvkm_runl *runl;
561 u32 mask = nvkm_rd32(device, 0x002a00);
563 nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) {
564 nvkm_wr32(device, 0x002a00, BIT(runl->id));
569 gk104_fifo_intr(struct nvkm_inth *inth)
571 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
572 struct nvkm_subdev *subdev = &fifo->engine.subdev;
573 struct nvkm_device *device = subdev->device;
574 u32 mask = nvkm_rd32(device, 0x002140);
575 u32 stat = nvkm_rd32(device, 0x002100) & mask;
577 if (stat & 0x00000001) {
578 gk104_fifo_intr_bind(fifo);
579 nvkm_wr32(device, 0x002100, 0x00000001);
583 if (stat & 0x00000010) {
584 nvkm_error(subdev, "PIO_ERROR\n");
585 nvkm_wr32(device, 0x002100, 0x00000010);
589 if (stat & 0x00000100) {
590 gf100_fifo_intr_sched(fifo);
591 nvkm_wr32(device, 0x002100, 0x00000100);
595 if (stat & 0x00010000) {
596 gk104_fifo_intr_chsw(fifo);
597 nvkm_wr32(device, 0x002100, 0x00010000);
601 if (stat & 0x00800000) {
602 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
603 nvkm_wr32(device, 0x002100, 0x00800000);
607 if (stat & 0x01000000) {
608 nvkm_error(subdev, "LB_ERROR\n");
609 nvkm_wr32(device, 0x002100, 0x01000000);
613 if (stat & 0x08000000) {
614 gk104_fifo_intr_dropped_fault(fifo);
615 nvkm_wr32(device, 0x002100, 0x08000000);
619 if (stat & 0x10000000) {
620 gf100_fifo_intr_mmu_fault(fifo);
624 if (stat & 0x20000000) {
625 if (gf100_fifo_intr_pbdma(fifo))
629 if (stat & 0x40000000) {
630 gk104_fifo_intr_runlist(fifo);
634 if (stat & 0x80000000) {
635 nvkm_wr32(device, 0x002100, 0x80000000);
636 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
641 nvkm_error(subdev, "INTR %08x\n", stat);
642 spin_lock(&fifo->lock);
643 nvkm_mask(device, 0x002140, stat, 0x00000000);
644 spin_unlock(&fifo->lock);
645 nvkm_wr32(device, 0x002100, stat);
652 gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
654 struct nvkm_device *device = fifo->engine.subdev.device;
656 nvkm_wr32(device, 0x000204, mask);
657 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff);
661 gk104_fifo_init(struct nvkm_fifo *base)
663 struct gk104_fifo *fifo = gk104_fifo(base);
664 struct nvkm_device *device = fifo->base.engine.subdev.device;
666 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
668 nvkm_wr32(device, 0x002100, 0xffffffff);
669 nvkm_wr32(device, 0x002140, 0x7fffffff);
673 gk104_fifo_runl_ctor(struct nvkm_fifo *fifo)
675 struct nvkm_device *device = fifo->engine.subdev.device;
676 struct nvkm_top_device *tdev;
677 struct nvkm_runl *runl;
678 struct nvkm_runq *runq;
679 const struct nvkm_engn_func *func;
681 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) {
682 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist);
684 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0);
686 return PTR_ERR(runl);
688 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) {
689 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq)))
692 runl->runq[runl->runq_nr++] = runq;
697 if (tdev->engine < 0)
700 switch (tdev->type) {
702 func = fifo->func->engn_ce;
705 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0);
708 func = fifo->func->engn;
712 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst);
719 gk104_fifo_chid_nr(struct nvkm_fifo *fifo)
725 gk104_fifo_oneinit(struct nvkm_fifo *base)
727 struct gk104_fifo *fifo = gk104_fifo(base);
728 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
729 struct nvkm_device *device = subdev->device;
730 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
731 struct nvkm_top_device *tdev;
734 /* Determine runlist configuration from topology device info. */
735 list_for_each_entry(tdev, &device->top->device, head) {
736 const int engn = tdev->engine;
741 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
742 fifo->engine_nr = max(fifo->engine_nr, engn + 1);
743 fifo->runlist[tdev->runlist].engm |= BIT(engn);
744 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
745 if (tdev->type == NVKM_ENGINE_GR)
746 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW);
747 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
750 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
751 fifo->base.nr * 0x200, 0x1000, true,
756 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
761 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
765 gk104_fifo_dtor(struct nvkm_fifo *base)
767 struct gk104_fifo *fifo = gk104_fifo(base);
768 struct nvkm_device *device = fifo->base.engine.subdev.device;
770 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
771 nvkm_memory_unref(&fifo->user.mem);
776 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
777 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo)
779 struct gk104_fifo *fifo;
781 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
784 *pfifo = &fifo->base;
786 return nvkm_fifo_ctor(func, device, type, inst, &fifo->base);
789 static const struct nvkm_fifo_func
791 .dtor = gk104_fifo_dtor,
792 .oneinit = gk104_fifo_oneinit,
793 .chid_nr = gk104_fifo_chid_nr,
794 .chid_ctor = gf100_fifo_chid_ctor,
795 .runq_nr = gf100_fifo_runq_nr,
796 .runl_ctor = gk104_fifo_runl_ctor,
797 .init = gk104_fifo_init,
798 .init_pbdmas = gk104_fifo_init_pbdmas,
799 .intr = gk104_fifo_intr,
800 .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
801 .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
802 .mmu_fault = &gk104_fifo_mmu_fault,
803 .engine_id = gk104_fifo_engine_id,
804 .nonstall = &gf100_fifo_nonstall,
808 .engn_ce = &gk104_engn_ce,
809 .cgrp = {{ }, &nv04_cgrp },
810 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan, .ctor = &gk104_fifo_gpfifo_new },
814 gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
815 struct nvkm_fifo **pfifo)
817 return gk104_fifo_new_(&gk104_fifo, device, type, inst, 0, pfifo);