2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/device.h>
27 #include <core/client.h>
28 #include <core/option.h>
29 #include <nvif/unpack.h>
30 #include <nvif/class.h>
32 #include <subdev/bios.h>
33 #include <subdev/fb.h>
34 #include <subdev/instmem.h>
39 static DEFINE_MUTEX(nv_devices_mutex);
40 static LIST_HEAD(nv_devices);
42 struct nouveau_device *
43 nouveau_device_find(u64 name)
45 struct nouveau_device *device, *match = NULL;
46 mutex_lock(&nv_devices_mutex);
47 list_for_each_entry(device, &nv_devices, head) {
48 if (device->handle == name) {
53 mutex_unlock(&nv_devices_mutex);
58 nouveau_device_list(u64 *name, int size)
60 struct nouveau_device *device;
62 mutex_lock(&nv_devices_mutex);
63 list_for_each_entry(device, &nv_devices, head) {
65 name[nr - 1] = device->handle;
67 mutex_unlock(&nv_devices_mutex);
71 /******************************************************************************
72 * nouveau_devobj (0x0080): class implementation
73 *****************************************************************************/
75 struct nouveau_devobj {
76 struct nouveau_parent base;
77 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
81 nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
83 struct nouveau_device *device = nv_device(object);
84 struct nouveau_fb *pfb = nouveau_fb(device);
85 struct nouveau_instmem *imem = nouveau_instmem(device);
87 struct nv_device_info_v0 v0;
91 nv_ioctl(object, "device info size %d\n", size);
92 if (nvif_unpack(args->v0, 0, 0, false)) {
93 nv_ioctl(object, "device info vers %d\n", args->v0.version);
97 switch (device->chipset) {
108 args->v0.platform = NV_DEVICE_INFO_V0_IGP;
112 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
113 args->v0.platform = NV_DEVICE_INFO_V0_AGP;
115 if (pci_is_pcie(device->pdev))
116 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
118 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
120 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
125 switch (device->card_type) {
126 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
128 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
129 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
130 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
131 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
132 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
133 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
134 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
135 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
141 args->v0.chipset = device->chipset;
142 args->v0.revision = device->chiprev;
143 if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
144 else args->v0.ram_size = args->v0.ram_user = 0;
145 if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
150 nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd,
151 void *data, u32 size)
154 case NV_DEVICE_V0_INFO:
155 return nouveau_devobj_info(object, data, size);
163 nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
165 return nv_rd08(object->engine, addr);
169 nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
171 return nv_rd16(object->engine, addr);
175 nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
177 return nv_rd32(object->engine, addr);
181 nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
183 nv_wr08(object->engine, addr, data);
187 nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
189 nv_wr16(object->engine, addr, data);
193 nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
195 nv_wr32(object->engine, addr, data);
199 nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size)
201 struct nouveau_device *device = nv_device(object);
202 *addr = nv_device_resource_start(device, 0);
203 *size = nv_device_resource_len(device, 0);
207 static const u64 disable_map[] = {
208 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
209 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
210 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
211 [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
212 [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE,
213 [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
214 [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
215 [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
216 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
217 [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
218 [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
219 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
220 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
221 [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE,
222 [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
223 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
224 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
225 [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE,
226 [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
227 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
228 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
229 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
230 [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
231 [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR,
232 [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
233 [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
234 [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
235 [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER,
236 [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
237 [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
238 [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0,
239 [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1,
240 [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2,
241 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
242 [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
243 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
244 [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD,
245 [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC,
246 [NVDEV_SUBDEV_NR] = 0,
250 nouveau_devobj_dtor(struct nouveau_object *object)
252 struct nouveau_devobj *devobj = (void *)object;
255 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
256 nouveau_object_ref(NULL, &devobj->subdev[i]);
258 nouveau_parent_destroy(&devobj->base);
261 static struct nouveau_oclass
262 nouveau_devobj_oclass_super = {
264 .ofuncs = &(struct nouveau_ofuncs) {
265 .dtor = nouveau_devobj_dtor,
266 .init = _nouveau_parent_init,
267 .fini = _nouveau_parent_fini,
268 .mthd = nouveau_devobj_mthd,
269 .map = nouveau_devobj_map,
270 .rd08 = nouveau_devobj_rd08,
271 .rd16 = nouveau_devobj_rd16,
272 .rd32 = nouveau_devobj_rd32,
273 .wr08 = nouveau_devobj_wr08,
274 .wr16 = nouveau_devobj_wr16,
275 .wr32 = nouveau_devobj_wr32,
280 nouveau_devobj_ctor(struct nouveau_object *parent,
281 struct nouveau_object *engine,
282 struct nouveau_oclass *oclass, void *data, u32 size,
283 struct nouveau_object **pobject)
286 struct nv_device_v0 v0;
288 struct nouveau_client *client = nv_client(parent);
289 struct nouveau_device *device;
290 struct nouveau_devobj *devobj;
292 u64 disable, mmio_base, mmio_size;
296 nv_ioctl(parent, "create device size %d\n", size);
297 if (nvif_unpack(args->v0, 0, 0, false)) {
298 nv_ioctl(parent, "create device v%d device %016llx "
299 "disable %016llx debug0 %016llx\n",
300 args->v0.version, args->v0.device,
301 args->v0.disable, args->v0.debug0);
305 /* give priviledged clients register access */
307 oclass = &nouveau_devobj_oclass_super;
309 /* find the device subdev that matches what the client requested */
310 device = nv_device(client->device);
311 if (args->v0.device != ~0) {
312 device = nouveau_device_find(args->v0.device);
317 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
318 nouveau_control_oclass,
319 (1ULL << NVDEV_ENGINE_DMAOBJ) |
320 (1ULL << NVDEV_ENGINE_FIFO) |
321 (1ULL << NVDEV_ENGINE_DISP) |
322 (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
323 *pobject = nv_object(devobj);
327 mmio_base = nv_device_resource_start(device, 0);
328 mmio_size = nv_device_resource_len(device, 0);
330 /* translate api disable mask into internal mapping */
331 disable = args->v0.debug0;
332 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
333 if (args->v0.disable & disable_map[i])
334 disable |= (1ULL << i);
337 /* identify the chipset, and determine classes of subdev/engines */
338 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
339 !device->card_type) {
340 map = ioremap(mmio_base, 0x102000);
344 /* switch mmio to cpu's native endianness */
346 if (ioread32_native(map + 0x000004) != 0x00000000)
348 if (ioread32_native(map + 0x000004) == 0x00000000)
350 iowrite32_native(0x01000001, map + 0x000004);
352 /* read boot0 and strapping information */
353 boot0 = ioread32_native(map + 0x000000);
354 strap = ioread32_native(map + 0x101000);
357 /* determine chipset and derive architecture from it */
358 if ((boot0 & 0x1f000000) > 0) {
359 device->chipset = (boot0 & 0x1ff00000) >> 20;
360 device->chiprev = (boot0 & 0x000000ff);
361 switch (device->chipset & 0x1f0) {
363 if (0x461 & (1 << (device->chipset & 0xf)))
364 device->card_type = NV_10;
366 device->card_type = NV_11;
367 device->chiprev = 0x00;
370 case 0x020: device->card_type = NV_20; break;
371 case 0x030: device->card_type = NV_30; break;
373 case 0x060: device->card_type = NV_40; break;
377 case 0x0a0: device->card_type = NV_50; break;
379 case 0x0d0: device->card_type = NV_C0; break;
382 case 0x100: device->card_type = NV_E0; break;
384 case 0x120: device->card_type = GM100; break;
389 if ((boot0 & 0xff00fff0) == 0x20004000) {
390 if (boot0 & 0x00f00000)
391 device->chipset = 0x05;
393 device->chipset = 0x04;
394 device->card_type = NV_04;
397 switch (device->card_type) {
398 case NV_04: ret = nv04_identify(device); break;
400 case NV_11: ret = nv10_identify(device); break;
401 case NV_20: ret = nv20_identify(device); break;
402 case NV_30: ret = nv30_identify(device); break;
403 case NV_40: ret = nv40_identify(device); break;
404 case NV_50: ret = nv50_identify(device); break;
405 case NV_C0: ret = nvc0_identify(device); break;
406 case NV_E0: ret = nve0_identify(device); break;
407 case GM100: ret = gm100_identify(device); break;
414 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
418 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
419 nv_info(device, "Chipset: %s (NV%02X)\n",
420 device->cname, device->chipset);
421 nv_info(device, "Family : NV%02X\n", device->card_type);
423 /* determine frequency of timing crystal */
424 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
425 (device->chipset >= 0x20 && device->chipset < 0x25))
431 case 0x00000000: device->crystal = 13500; break;
432 case 0x00000040: device->crystal = 14318; break;
433 case 0x00400000: device->crystal = 27000; break;
434 case 0x00400040: device->crystal = 25000; break;
437 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
439 if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
440 device->cname = "NULL";
441 device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass;
444 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
445 !nv_subdev(device)->mmio) {
446 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
447 if (!nv_subdev(device)->mmio) {
448 nv_error(device, "unable to map device registers\n");
453 /* ensure requested subsystems are available for use */
454 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
455 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
458 if (device->subdev[i]) {
459 nouveau_object_ref(device->subdev[i],
464 ret = nouveau_object_ctor(nv_object(device), NULL,
472 device->subdev[i] = devobj->subdev[i];
474 /* note: can't init *any* subdevs until devinit has been run
475 * due to not knowing exactly what the vbios init tables will
476 * mess with. devinit also can't be run until all of its
477 * dependencies have been created.
479 * this code delays init of any subdev until all of devinit's
480 * dependencies have been created, and then initialises each
481 * subdev in turn as they're created.
483 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
484 struct nouveau_object *subdev = devobj->subdev[c++];
485 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
486 ret = nouveau_object_inc(subdev);
489 atomic_dec(&nv_object(device)->usecount);
492 nouveau_subdev_reset(subdev);
500 static struct nouveau_ofuncs
501 nouveau_devobj_ofuncs = {
502 .ctor = nouveau_devobj_ctor,
503 .dtor = nouveau_devobj_dtor,
504 .init = _nouveau_parent_init,
505 .fini = _nouveau_parent_fini,
506 .mthd = nouveau_devobj_mthd,
509 /******************************************************************************
510 * nouveau_device: engine functions
511 *****************************************************************************/
513 struct nouveau_device *
516 struct nouveau_object *device = nv_object(obj);
517 if (device->engine == NULL) {
518 while (device && device->parent)
519 device = device->parent;
521 device = &nv_object(obj)->engine->subdev.object;
522 if (device && device->parent)
523 device = device->parent;
525 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
526 if (unlikely(!device))
527 nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
529 return (void *)device;
532 static struct nouveau_oclass
533 nouveau_device_sclass[] = {
534 { 0x0080, &nouveau_devobj_ofuncs },
539 nouveau_device_event_ctor(struct nouveau_object *object, void *data, u32 size,
540 struct nvkm_notify *notify)
542 if (!WARN_ON(size != 0)) {
551 static const struct nvkm_event_func
552 nouveau_device_event_func = {
553 .ctor = nouveau_device_event_ctor,
557 nouveau_device_fini(struct nouveau_object *object, bool suspend)
559 struct nouveau_device *device = (void *)object;
560 struct nouveau_object *subdev;
563 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
564 if ((subdev = device->subdev[i])) {
565 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
566 ret = nouveau_object_dec(subdev, suspend);
573 ret = nvkm_acpi_fini(device, suspend);
575 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
576 if ((subdev = device->subdev[i])) {
577 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
578 ret = nouveau_object_inc(subdev);
590 nouveau_device_init(struct nouveau_object *object)
592 struct nouveau_device *device = (void *)object;
593 struct nouveau_object *subdev;
596 ret = nvkm_acpi_init(device);
600 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
601 if ((subdev = device->subdev[i])) {
602 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
603 ret = nouveau_object_inc(subdev);
607 nouveau_subdev_reset(subdev);
614 for (--i; ret && i >= 0; i--) {
615 if ((subdev = device->subdev[i])) {
616 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
617 nouveau_object_dec(subdev, false);
622 nvkm_acpi_fini(device, false);
627 nouveau_device_dtor(struct nouveau_object *object)
629 struct nouveau_device *device = (void *)object;
631 nvkm_event_fini(&device->event);
633 mutex_lock(&nv_devices_mutex);
634 list_del(&device->head);
635 mutex_unlock(&nv_devices_mutex);
637 if (nv_subdev(device)->mmio)
638 iounmap(nv_subdev(device)->mmio);
640 nouveau_engine_destroy(&device->engine);
644 nv_device_resource_start(struct nouveau_device *device, unsigned int bar)
646 if (nv_device_is_pci(device)) {
647 return pci_resource_start(device->pdev, bar);
649 struct resource *res;
650 res = platform_get_resource(device->platformdev,
651 IORESOURCE_MEM, bar);
659 nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
661 if (nv_device_is_pci(device)) {
662 return pci_resource_len(device->pdev, bar);
664 struct resource *res;
665 res = platform_get_resource(device->platformdev,
666 IORESOURCE_MEM, bar);
669 return resource_size(res);
674 nv_device_get_irq(struct nouveau_device *device, bool stall)
676 if (nv_device_is_pci(device)) {
677 return device->pdev->irq;
679 return platform_get_irq_byname(device->platformdev,
680 stall ? "stall" : "nonstall");
684 static struct nouveau_oclass
685 nouveau_device_oclass = {
686 .handle = NV_ENGINE(DEVICE, 0x00),
687 .ofuncs = &(struct nouveau_ofuncs) {
688 .dtor = nouveau_device_dtor,
689 .init = nouveau_device_init,
690 .fini = nouveau_device_fini,
695 nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
696 const char *sname, const char *cfg, const char *dbg,
697 int length, void **pobject)
699 struct nouveau_device *device;
702 mutex_lock(&nv_devices_mutex);
703 list_for_each_entry(device, &nv_devices, head) {
704 if (device->handle == name)
708 ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
709 "DEVICE", "device", length, pobject);
715 case NOUVEAU_BUS_PCI:
718 case NOUVEAU_BUS_PLATFORM:
719 device->platformdev = dev;
722 device->handle = name;
723 device->cfgopt = cfg;
724 device->dbgopt = dbg;
725 device->name = sname;
727 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
728 nv_engine(device)->sclass = nouveau_device_sclass;
729 list_add(&device->head, &nv_devices);
731 ret = nvkm_event_init(&nouveau_device_event_func, 1, 1,
734 mutex_unlock(&nv_devices_mutex);