2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
28 #include <core/client.h>
29 #include <subdev/fb.h>
30 #include <subdev/instmem.h>
31 #include <subdev/timer.h>
33 #include <nvif/class.h>
34 #include <nvif/cl0080.h>
35 #include <nvif/unpack.h>
38 struct nvkm_object object;
39 struct nvkm_device *device;
43 nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
45 struct nvkm_subdev *subdev;
46 enum nvkm_devidx subidx;
48 switch (mthd & NV_DEVICE_INFO_UNIT) {
49 case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break;
54 subdev = nvkm_device_subdev(device, subidx);
56 return nvkm_subdev_info(subdev, mthd, data);
61 nvkm_udevice_info_v1(struct nvkm_device *device,
62 struct nv_device_info_v1_data *args)
64 if (args->mthd & NV_DEVICE_INFO_UNIT) {
65 if (nvkm_udevice_info_subdev(device, args->mthd, &args->data))
66 args->mthd = NV_DEVICE_INFO_INVALID;
71 #define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i; \
72 for (_i = (B), args->data = 0ULL; _i <= (C); _i++) { \
73 if (nvkm_device_engine(device, _i)) \
74 args->data |= BIT_ULL(_i); \
77 #define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A , NVKM_ENGINE_##A)
78 #define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST)
79 case ENGINE_A(SW ); break;
80 case ENGINE_A(GR ); break;
81 case ENGINE_A(MPEG ); break;
82 case ENGINE_A(ME ); break;
83 case ENGINE_A(CIPHER); break;
84 case ENGINE_A(BSP ); break;
85 case ENGINE_A(VP ); break;
86 case ENGINE_B(CE ); break;
87 case ENGINE_A(SEC ); break;
88 case ENGINE_A(MSVLD ); break;
89 case ENGINE_A(MSPDEC); break;
90 case ENGINE_A(MSPPP ); break;
91 case ENGINE_A(MSENC ); break;
92 case ENGINE_A(VIC ); break;
93 case ENGINE_A(SEC2 ); break;
94 case ENGINE_B(NVDEC ); break;
95 case ENGINE_B(NVENC ); break;
97 args->mthd = NV_DEVICE_INFO_INVALID;
103 nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
105 struct nvkm_object *object = &udev->object;
106 struct nvkm_device *device = udev->device;
107 struct nvkm_fb *fb = device->fb;
108 struct nvkm_instmem *imem = device->imem;
110 struct nv_device_info_v0 v0;
111 struct nv_device_info_v1 v1;
113 int ret = -ENOSYS, i;
115 nvif_ioctl(object, "device info size %d\n", size);
116 if (!(ret = nvif_unpack(ret, &data, &size, args->v1, 1, 1, true))) {
117 nvif_ioctl(object, "device info vers %d count %d\n",
118 args->v1.version, args->v1.count);
119 if (args->v1.count * sizeof(args->v1.data[0]) == size) {
120 for (i = 0; i < args->v1.count; i++)
121 nvkm_udevice_info_v1(device, &args->v1.data[i]);
126 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
127 nvif_ioctl(object, "device info vers %d\n", args->v0.version);
131 switch (device->chipset) {
142 args->v0.platform = NV_DEVICE_INFO_V0_IGP;
145 switch (device->type) {
146 case NVKM_DEVICE_PCI:
147 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
149 case NVKM_DEVICE_AGP:
150 args->v0.platform = NV_DEVICE_INFO_V0_AGP;
152 case NVKM_DEVICE_PCIE:
153 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
155 case NVKM_DEVICE_TEGRA:
156 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
165 switch (device->card_type) {
166 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
168 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
169 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
170 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
171 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
172 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
173 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
174 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
175 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
176 case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
177 case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
178 case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
179 case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
185 args->v0.chipset = device->chipset;
186 args->v0.revision = device->chiprev;
188 args->v0.ram_size = args->v0.ram_user = fb->ram->size;
190 args->v0.ram_size = args->v0.ram_user = 0;
191 if (imem && args->v0.ram_size > 0)
192 args->v0.ram_user = args->v0.ram_user - imem->reserved;
194 strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
195 strncpy(args->v0.name, device->name, sizeof(args->v0.name));
200 nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
202 struct nvkm_object *object = &udev->object;
203 struct nvkm_device *device = udev->device;
205 struct nv_device_time_v0 v0;
209 nvif_ioctl(object, "device time size %d\n", size);
210 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
211 nvif_ioctl(object, "device time vers %d\n", args->v0.version);
212 args->v0.time = nvkm_timer_read(device->timer);
219 nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
221 struct nvkm_udevice *udev = nvkm_udevice(object);
222 nvif_ioctl(object, "device mthd %08x\n", mthd);
224 case NV_DEVICE_V0_INFO:
225 return nvkm_udevice_info(udev, data, size);
226 case NV_DEVICE_V0_TIME:
227 return nvkm_udevice_time(udev, data, size);
235 nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
237 struct nvkm_udevice *udev = nvkm_udevice(object);
238 *data = nvkm_rd08(udev->device, addr);
243 nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
245 struct nvkm_udevice *udev = nvkm_udevice(object);
246 *data = nvkm_rd16(udev->device, addr);
251 nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
253 struct nvkm_udevice *udev = nvkm_udevice(object);
254 *data = nvkm_rd32(udev->device, addr);
259 nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
261 struct nvkm_udevice *udev = nvkm_udevice(object);
262 nvkm_wr08(udev->device, addr, data);
267 nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
269 struct nvkm_udevice *udev = nvkm_udevice(object);
270 nvkm_wr16(udev->device, addr, data);
275 nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
277 struct nvkm_udevice *udev = nvkm_udevice(object);
278 nvkm_wr32(udev->device, addr, data);
283 nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
284 enum nvkm_object_map *type, u64 *addr, u64 *size)
286 struct nvkm_udevice *udev = nvkm_udevice(object);
287 struct nvkm_device *device = udev->device;
288 *type = NVKM_OBJECT_MAP_IO;
289 *addr = device->func->resource_addr(device, 0);
290 *size = device->func->resource_size(device, 0);
295 nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
297 struct nvkm_udevice *udev = nvkm_udevice(object);
298 struct nvkm_device *device = udev->device;
301 mutex_lock(&device->mutex);
302 if (!--device->refcount) {
303 ret = nvkm_device_fini(device, suspend);
304 if (ret && suspend) {
311 mutex_unlock(&device->mutex);
316 nvkm_udevice_init(struct nvkm_object *object)
318 struct nvkm_udevice *udev = nvkm_udevice(object);
319 struct nvkm_device *device = udev->device;
322 mutex_lock(&device->mutex);
323 if (!device->refcount++) {
324 ret = nvkm_device_init(device);
332 mutex_unlock(&device->mutex);
337 nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
338 void *data, u32 size, struct nvkm_object **pobject)
340 struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
341 const struct nvkm_device_oclass *sclass = oclass->priv;
342 return sclass->ctor(udev->device, oclass, data, size, pobject);
346 nvkm_udevice_child_get(struct nvkm_object *object, int index,
347 struct nvkm_oclass *oclass)
349 struct nvkm_udevice *udev = nvkm_udevice(object);
350 struct nvkm_device *device = udev->device;
351 struct nvkm_engine *engine;
352 u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
353 (1ULL << NVKM_ENGINE_FIFO) |
354 (1ULL << NVKM_ENGINE_DISP) |
355 (1ULL << NVKM_ENGINE_PM);
356 const struct nvkm_device_oclass *sclass = NULL;
359 for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
360 if (!(engine = nvkm_device_engine(device, i)) ||
361 !(engine->func->base.sclass))
363 oclass->engine = engine;
365 index -= engine->func->base.sclass(oclass, index, &sclass);
370 sclass = &nvkm_control_oclass;
371 else if (device->mmu && index-- == 0)
372 sclass = &device->mmu->user;
373 else if (device->fault && index-- == 0)
374 sclass = &device->fault->user;
378 oclass->base = sclass->base;
381 oclass->ctor = nvkm_udevice_child_new;
382 oclass->priv = sclass;
386 static const struct nvkm_object_func
387 nvkm_udevice_super = {
388 .init = nvkm_udevice_init,
389 .fini = nvkm_udevice_fini,
390 .mthd = nvkm_udevice_mthd,
391 .map = nvkm_udevice_map,
392 .rd08 = nvkm_udevice_rd08,
393 .rd16 = nvkm_udevice_rd16,
394 .rd32 = nvkm_udevice_rd32,
395 .wr08 = nvkm_udevice_wr08,
396 .wr16 = nvkm_udevice_wr16,
397 .wr32 = nvkm_udevice_wr32,
398 .sclass = nvkm_udevice_child_get,
401 static const struct nvkm_object_func
403 .init = nvkm_udevice_init,
404 .fini = nvkm_udevice_fini,
405 .mthd = nvkm_udevice_mthd,
406 .sclass = nvkm_udevice_child_get,
410 nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
411 struct nvkm_object **pobject)
414 struct nv_device_v0 v0;
416 struct nvkm_client *client = oclass->client;
417 struct nvkm_object *parent = &client->object;
418 const struct nvkm_object_func *func;
419 struct nvkm_udevice *udev;
422 nvif_ioctl(parent, "create device size %d\n", size);
423 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
424 nvif_ioctl(parent, "create device v%d device %016llx\n",
425 args->v0.version, args->v0.device);
429 /* give priviledged clients register access */
431 func = &nvkm_udevice_super;
433 func = &nvkm_udevice;
435 if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
437 nvkm_object_ctor(func, oclass, &udev->object);
438 *pobject = &udev->object;
440 /* find the device that matches what the client requested */
441 if (args->v0.device != ~0)
442 udev->device = nvkm_device_find(args->v0.device);
444 udev->device = nvkm_device_find(client->device);
451 const struct nvkm_sclass
452 nvkm_udevice_sclass = {
456 .ctor = nvkm_udevice_new,