2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/device.h>
27 #include <core/client.h>
28 #include <core/device.h>
29 #include <core/option.h>
31 #include <core/class.h>
33 #include <subdev/device.h>
35 static DEFINE_MUTEX(nv_devices_mutex);
36 static LIST_HEAD(nv_devices);
38 struct nouveau_device *
39 nouveau_device_find(u64 name)
41 struct nouveau_device *device, *match = NULL;
42 mutex_lock(&nv_devices_mutex);
43 list_for_each_entry(device, &nv_devices, head) {
44 if (device->handle == name) {
49 mutex_unlock(&nv_devices_mutex);
53 /******************************************************************************
54 * nouveau_devobj (0x0080): class implementation
55 *****************************************************************************/
56 struct nouveau_devobj {
57 struct nouveau_parent base;
58 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
62 static const u64 disable_map[] = {
63 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
78 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
79 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
80 [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
81 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
82 [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
83 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
84 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
85 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
86 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
87 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
88 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
89 [NVDEV_SUBDEV_NR] = 0,
93 nouveau_devobj_ctor(struct nouveau_object *parent,
94 struct nouveau_object *engine,
95 struct nouveau_oclass *oclass, void *data, u32 size,
96 struct nouveau_object **pobject)
98 struct nouveau_client *client = nv_client(parent);
99 struct nouveau_device *device;
100 struct nouveau_devobj *devobj;
101 struct nv_device_class *args = data;
102 u64 disable, boot0, strap;
103 u64 mmio_base, mmio_size;
107 if (size < sizeof(struct nv_device_class))
110 /* find the device subdev that matches what the client requested */
111 device = nv_device(client->device);
112 if (args->device != ~0) {
113 device = nouveau_device_find(args->device);
118 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
119 (1ULL << NVDEV_ENGINE_DMAOBJ) |
120 (1ULL << NVDEV_ENGINE_FIFO) |
121 (1ULL << NVDEV_ENGINE_DISP), &devobj);
122 *pobject = nv_object(devobj);
126 mmio_base = pci_resource_start(device->pdev, 0);
127 mmio_size = pci_resource_len(device->pdev, 0);
129 /* translate api disable mask into internal mapping */
130 disable = args->debug0;
131 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
132 if (args->disable & disable_map[i])
133 disable |= (1ULL << i);
136 /* identify the chipset, and determine classes of subdev/engines */
137 if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
138 !device->card_type) {
139 map = ioremap(mmio_base, 0x102000);
143 /* switch mmio to cpu's native endianness */
145 if (ioread32_native(map + 0x000004) != 0x00000000)
147 if (ioread32_native(map + 0x000004) == 0x00000000)
149 iowrite32_native(0x01000001, map + 0x000004);
151 /* read boot0 and strapping information */
152 boot0 = ioread32_native(map + 0x000000);
153 strap = ioread32_native(map + 0x101000);
156 /* determine chipset and derive architecture from it */
157 if ((boot0 & 0x0f000000) > 0) {
158 device->chipset = (boot0 & 0xff00000) >> 20;
159 switch (device->chipset & 0xf0) {
160 case 0x10: device->card_type = NV_10; break;
161 case 0x20: device->card_type = NV_20; break;
162 case 0x30: device->card_type = NV_30; break;
164 case 0x60: device->card_type = NV_40; break;
168 case 0xa0: device->card_type = NV_50; break;
169 case 0xc0: device->card_type = NV_C0; break;
170 case 0xd0: device->card_type = NV_D0; break;
171 case 0xe0: device->card_type = NV_E0; break;
176 if ((boot0 & 0xff00fff0) == 0x20004000) {
177 if (boot0 & 0x00f00000)
178 device->chipset = 0x05;
180 device->chipset = 0x04;
181 device->card_type = NV_04;
184 switch (device->card_type) {
185 case NV_04: ret = nv04_identify(device); break;
186 case NV_10: ret = nv10_identify(device); break;
187 case NV_20: ret = nv20_identify(device); break;
188 case NV_30: ret = nv30_identify(device); break;
189 case NV_40: ret = nv40_identify(device); break;
190 case NV_50: ret = nv50_identify(device); break;
192 case NV_D0: ret = nvc0_identify(device); break;
193 case NV_E0: ret = nve0_identify(device); break;
200 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
204 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
205 nv_info(device, "Chipset: %s (NV%02X)\n",
206 device->cname, device->chipset);
207 nv_info(device, "Family : NV%02X\n", device->card_type);
209 /* determine frequency of timing crystal */
210 if ( device->chipset < 0x17 ||
211 (device->chipset >= 0x20 && device->chipset <= 0x25))
217 case 0x00000000: device->crystal = 13500; break;
218 case 0x00000040: device->crystal = 14318; break;
219 case 0x00400000: device->crystal = 27000; break;
220 case 0x00400040: device->crystal = 25000; break;
223 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
226 if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
227 !nv_subdev(device)->mmio) {
228 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
229 if (!nv_subdev(device)->mmio) {
230 nv_error(device, "unable to map device registers\n");
235 /* ensure requested subsystems are available for use */
236 for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
237 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
240 if (!device->subdev[i]) {
241 ret = nouveau_object_ctor(nv_object(device), NULL,
249 if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
250 nouveau_subdev_reset(devobj->subdev[i]);
252 nouveau_object_ref(device->subdev[i],
256 /* note: can't init *any* subdevs until devinit has been run
257 * due to not knowing exactly what the vbios init tables will
258 * mess with. devinit also can't be run until all of its
259 * dependencies have been created.
261 * this code delays init of any subdev until all of devinit's
262 * dependencies have been created, and then initialises each
263 * subdev in turn as they're created.
265 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
266 struct nouveau_object *subdev = devobj->subdev[c++];
267 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
268 ret = nouveau_object_inc(subdev);
279 nouveau_devobj_dtor(struct nouveau_object *object)
281 struct nouveau_devobj *devobj = (void *)object;
284 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
285 nouveau_object_ref(NULL, &devobj->subdev[i]);
287 nouveau_parent_destroy(&devobj->base);
291 nouveau_devobj_init(struct nouveau_object *object)
293 struct nouveau_devobj *devobj = (void *)object;
294 struct nouveau_object *subdev;
297 ret = nouveau_parent_init(&devobj->base);
301 for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
302 if ((subdev = devobj->subdev[i])) {
303 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
304 ret = nouveau_object_inc(subdev);
311 devobj->created = true;
315 for (--i; i >= 0; i--) {
316 if ((subdev = devobj->subdev[i])) {
317 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
318 nouveau_object_dec(subdev, false);
326 nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
328 struct nouveau_devobj *devobj = (void *)object;
329 struct nouveau_object *subdev;
332 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
333 if ((subdev = devobj->subdev[i])) {
334 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
335 ret = nouveau_object_dec(subdev, suspend);
342 ret = nouveau_parent_fini(&devobj->base, suspend);
344 for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
345 if ((subdev = devobj->subdev[i])) {
346 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
347 ret = nouveau_object_inc(subdev);
359 nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
361 return nv_rd08(object->engine, addr);
365 nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
367 return nv_rd16(object->engine, addr);
371 nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
373 return nv_rd32(object->engine, addr);
377 nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
379 nv_wr08(object->engine, addr, data);
383 nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
385 nv_wr16(object->engine, addr, data);
389 nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
391 nv_wr32(object->engine, addr, data);
394 static struct nouveau_ofuncs
395 nouveau_devobj_ofuncs = {
396 .ctor = nouveau_devobj_ctor,
397 .dtor = nouveau_devobj_dtor,
398 .init = nouveau_devobj_init,
399 .fini = nouveau_devobj_fini,
400 .rd08 = nouveau_devobj_rd08,
401 .rd16 = nouveau_devobj_rd16,
402 .rd32 = nouveau_devobj_rd32,
403 .wr08 = nouveau_devobj_wr08,
404 .wr16 = nouveau_devobj_wr16,
405 .wr32 = nouveau_devobj_wr32,
408 /******************************************************************************
409 * nouveau_device: engine functions
410 *****************************************************************************/
411 struct nouveau_oclass
412 nouveau_device_sclass[] = {
413 { 0x0080, &nouveau_devobj_ofuncs },
418 nouveau_device_dtor(struct nouveau_object *object)
420 struct nouveau_device *device = (void *)object;
422 mutex_lock(&nv_devices_mutex);
423 list_del(&device->head);
424 mutex_unlock(&nv_devices_mutex);
426 if (device->base.mmio)
427 iounmap(device->base.mmio);
429 nouveau_subdev_destroy(&device->base);
432 static struct nouveau_oclass
433 nouveau_device_oclass = {
434 .handle = NV_SUBDEV(DEVICE, 0x00),
435 .ofuncs = &(struct nouveau_ofuncs) {
436 .dtor = nouveau_device_dtor,
441 nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
442 const char *cfg, const char *dbg,
443 int length, void **pobject)
445 struct nouveau_device *device;
448 mutex_lock(&nv_devices_mutex);
449 list_for_each_entry(device, &nv_devices, head) {
450 if (device->handle == name)
454 ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
455 "DEVICE", "device", length, pobject);
460 atomic_set(&nv_object(device)->usecount, 2);
462 device->handle = name;
463 device->cfgopt = cfg;
464 device->dbgopt = dbg;
465 device->name = sname;
467 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
468 list_add(&device->head, &nv_devices);
470 mutex_unlock(&nv_devices_mutex);