Commit | Line | Data |
---|---|---|
9952f691 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
776dc384 TR |
2 | /* |
3 | * Copyright (C) 2012 Avionic Design GmbH | |
4 | * Copyright (C) 2012-2013, NVIDIA Corporation | |
776dc384 TR |
5 | */ |
6 | ||
f67524ca | 7 | #include <linux/debugfs.h> |
4abfc0e3 | 8 | #include <linux/dma-mapping.h> |
776dc384 TR |
9 | #include <linux/host1x.h> |
10 | #include <linux/of.h> | |
f67524ca | 11 | #include <linux/seq_file.h> |
776dc384 | 12 | #include <linux/slab.h> |
c95469aa | 13 | #include <linux/of_device.h> |
776dc384 | 14 | |
d24b2898 | 15 | #include "bus.h" |
776dc384 TR |
16 | #include "dev.h" |
17 | ||
18 | static DEFINE_MUTEX(clients_lock); | |
19 | static LIST_HEAD(clients); | |
20 | ||
21 | static DEFINE_MUTEX(drivers_lock); | |
22 | static LIST_HEAD(drivers); | |
23 | ||
24 | static DEFINE_MUTEX(devices_lock); | |
25 | static LIST_HEAD(devices); | |
26 | ||
27 | struct host1x_subdev { | |
28 | struct host1x_client *client; | |
29 | struct device_node *np; | |
30 | struct list_head list; | |
31 | }; | |
32 | ||
33 | /** | |
34 | * host1x_subdev_add() - add a new subdevice with an associated device node | |
466749f1 | 35 | * @device: host1x device to add the subdevice to |
d2a58fd1 | 36 | * @driver: host1x driver containing the subdevices |
466749f1 | 37 | * @np: device node |
776dc384 TR |
38 | */ |
39 | static int host1x_subdev_add(struct host1x_device *device, | |
25ae30d2 | 40 | struct host1x_driver *driver, |
776dc384 TR |
41 | struct device_node *np) |
42 | { | |
43 | struct host1x_subdev *subdev; | |
25ae30d2 TR |
44 | struct device_node *child; |
45 | int err; | |
776dc384 TR |
46 | |
47 | subdev = kzalloc(sizeof(*subdev), GFP_KERNEL); | |
48 | if (!subdev) | |
49 | return -ENOMEM; | |
50 | ||
51 | INIT_LIST_HEAD(&subdev->list); | |
52 | subdev->np = of_node_get(np); | |
53 | ||
54 | mutex_lock(&device->subdevs_lock); | |
55 | list_add_tail(&subdev->list, &device->subdevs); | |
56 | mutex_unlock(&device->subdevs_lock); | |
57 | ||
25ae30d2 TR |
58 | /* recursively add children */ |
59 | for_each_child_of_node(np, child) { | |
60 | if (of_match_node(driver->subdevs, child) && | |
61 | of_device_is_available(child)) { | |
62 | err = host1x_subdev_add(device, driver, child); | |
63 | if (err < 0) { | |
64 | /* XXX cleanup? */ | |
65 | of_node_put(child); | |
66 | return err; | |
67 | } | |
68 | } | |
69 | } | |
70 | ||
776dc384 TR |
71 | return 0; |
72 | } | |
73 | ||
74 | /** | |
75 | * host1x_subdev_del() - remove subdevice | |
466749f1 | 76 | * @subdev: subdevice to remove |
776dc384 TR |
77 | */ |
78 | static void host1x_subdev_del(struct host1x_subdev *subdev) | |
79 | { | |
80 | list_del(&subdev->list); | |
81 | of_node_put(subdev->np); | |
82 | kfree(subdev); | |
83 | } | |
84 | ||
85 | /** | |
86 | * host1x_device_parse_dt() - scan device tree and add matching subdevices | |
466749f1 TR |
87 | * @device: host1x logical device |
88 | * @driver: host1x driver | |
776dc384 | 89 | */ |
f4c5cf88 TR |
90 | static int host1x_device_parse_dt(struct host1x_device *device, |
91 | struct host1x_driver *driver) | |
776dc384 TR |
92 | { |
93 | struct device_node *np; | |
94 | int err; | |
95 | ||
96 | for_each_child_of_node(device->dev.parent->of_node, np) { | |
f4c5cf88 | 97 | if (of_match_node(driver->subdevs, np) && |
776dc384 | 98 | of_device_is_available(np)) { |
25ae30d2 | 99 | err = host1x_subdev_add(device, driver, np); |
93ec3029 AKC |
100 | if (err < 0) { |
101 | of_node_put(np); | |
776dc384 | 102 | return err; |
93ec3029 | 103 | } |
776dc384 TR |
104 | } |
105 | } | |
106 | ||
107 | return 0; | |
108 | } | |
109 | ||
110 | static void host1x_subdev_register(struct host1x_device *device, | |
111 | struct host1x_subdev *subdev, | |
112 | struct host1x_client *client) | |
113 | { | |
114 | int err; | |
115 | ||
116 | /* | |
117 | * Move the subdevice to the list of active (registered) subdevices | |
118 | * and associate it with a client. At the same time, associate the | |
119 | * client with its parent device. | |
120 | */ | |
121 | mutex_lock(&device->subdevs_lock); | |
122 | mutex_lock(&device->clients_lock); | |
123 | list_move_tail(&client->list, &device->clients); | |
124 | list_move_tail(&subdev->list, &device->active); | |
608f43ad | 125 | client->host = &device->dev; |
776dc384 TR |
126 | subdev->client = client; |
127 | mutex_unlock(&device->clients_lock); | |
128 | mutex_unlock(&device->subdevs_lock); | |
129 | ||
776dc384 | 130 | if (list_empty(&device->subdevs)) { |
f4c5cf88 | 131 | err = device_add(&device->dev); |
776dc384 | 132 | if (err < 0) |
f4c5cf88 | 133 | dev_err(&device->dev, "failed to add: %d\n", err); |
536e1715 | 134 | else |
f4c5cf88 | 135 | device->registered = true; |
776dc384 TR |
136 | } |
137 | } | |
138 | ||
139 | static void __host1x_subdev_unregister(struct host1x_device *device, | |
140 | struct host1x_subdev *subdev) | |
141 | { | |
142 | struct host1x_client *client = subdev->client; | |
776dc384 TR |
143 | |
144 | /* | |
145 | * If all subdevices have been activated, we're about to remove the | |
146 | * first active subdevice, so unload the driver first. | |
147 | */ | |
f4c5cf88 TR |
148 | if (list_empty(&device->subdevs)) { |
149 | if (device->registered) { | |
150 | device->registered = false; | |
151 | device_del(&device->dev); | |
152 | } | |
776dc384 TR |
153 | } |
154 | ||
155 | /* | |
156 | * Move the subdevice back to the list of idle subdevices and remove | |
157 | * it from list of clients. | |
158 | */ | |
159 | mutex_lock(&device->clients_lock); | |
160 | subdev->client = NULL; | |
608f43ad | 161 | client->host = NULL; |
776dc384 TR |
162 | list_move_tail(&subdev->list, &device->subdevs); |
163 | /* | |
164 | * XXX: Perhaps don't do this here, but rather explicitly remove it | |
165 | * when the device is about to be deleted. | |
166 | * | |
167 | * This is somewhat complicated by the fact that this function is | |
168 | * used to remove the subdevice when a client is unregistered but | |
169 | * also when the composite device is about to be removed. | |
170 | */ | |
171 | list_del_init(&client->list); | |
172 | mutex_unlock(&device->clients_lock); | |
173 | } | |
174 | ||
175 | static void host1x_subdev_unregister(struct host1x_device *device, | |
176 | struct host1x_subdev *subdev) | |
177 | { | |
178 | mutex_lock(&device->subdevs_lock); | |
179 | __host1x_subdev_unregister(device, subdev); | |
180 | mutex_unlock(&device->subdevs_lock); | |
181 | } | |
182 | ||
466749f1 TR |
183 | /** |
184 | * host1x_device_init() - initialize a host1x logical device | |
185 | * @device: host1x logical device | |
186 | * | |
187 | * The driver for the host1x logical device can call this during execution of | |
188 | * its &host1x_driver.probe implementation to initialize each of its clients. | |
189 | * The client drivers access the subsystem specific driver data using the | |
190 | * &host1x_client.parent field and driver data associated with it (usually by | |
191 | * calling dev_get_drvdata()). | |
192 | */ | |
776dc384 TR |
193 | int host1x_device_init(struct host1x_device *device) |
194 | { | |
195 | struct host1x_client *client; | |
196 | int err; | |
197 | ||
198 | mutex_lock(&device->clients_lock); | |
199 | ||
933deb8c TR |
200 | list_for_each_entry(client, &device->clients, list) { |
201 | if (client->ops && client->ops->early_init) { | |
202 | err = client->ops->early_init(client); | |
203 | if (err < 0) { | |
204 | dev_err(&device->dev, "failed to early initialize %s: %d\n", | |
205 | dev_name(client->dev), err); | |
206 | goto teardown_late; | |
207 | } | |
208 | } | |
209 | } | |
210 | ||
776dc384 TR |
211 | list_for_each_entry(client, &device->clients, list) { |
212 | if (client->ops && client->ops->init) { | |
213 | err = client->ops->init(client); | |
214 | if (err < 0) { | |
215 | dev_err(&device->dev, | |
216 | "failed to initialize %s: %d\n", | |
217 | dev_name(client->dev), err); | |
8f7da157 | 218 | goto teardown; |
776dc384 TR |
219 | } |
220 | } | |
221 | } | |
222 | ||
223 | mutex_unlock(&device->clients_lock); | |
224 | ||
225 | return 0; | |
8f7da157 TR |
226 | |
227 | teardown: | |
228 | list_for_each_entry_continue_reverse(client, &device->clients, list) | |
229 | if (client->ops->exit) | |
230 | client->ops->exit(client); | |
231 | ||
933deb8c TR |
232 | /* reset client to end of list for late teardown */ |
233 | client = list_entry(&device->clients, struct host1x_client, list); | |
234 | ||
235 | teardown_late: | |
236 | list_for_each_entry_continue_reverse(client, &device->clients, list) | |
237 | if (client->ops->late_exit) | |
238 | client->ops->late_exit(client); | |
239 | ||
8f7da157 TR |
240 | mutex_unlock(&device->clients_lock); |
241 | return err; | |
776dc384 | 242 | } |
fae798a1 | 243 | EXPORT_SYMBOL(host1x_device_init); |
776dc384 | 244 | |
466749f1 TR |
245 | /** |
246 | * host1x_device_exit() - uninitialize host1x logical device | |
247 | * @device: host1x logical device | |
248 | * | |
249 | * When the driver for a host1x logical device is unloaded, it can call this | |
250 | * function to tear down each of its clients. Typically this is done after a | |
251 | * subsystem-specific data structure is removed and the functionality can no | |
252 | * longer be used. | |
253 | */ | |
776dc384 TR |
254 | int host1x_device_exit(struct host1x_device *device) |
255 | { | |
256 | struct host1x_client *client; | |
257 | int err; | |
258 | ||
259 | mutex_lock(&device->clients_lock); | |
260 | ||
261 | list_for_each_entry_reverse(client, &device->clients, list) { | |
262 | if (client->ops && client->ops->exit) { | |
263 | err = client->ops->exit(client); | |
264 | if (err < 0) { | |
265 | dev_err(&device->dev, | |
266 | "failed to cleanup %s: %d\n", | |
267 | dev_name(client->dev), err); | |
268 | mutex_unlock(&device->clients_lock); | |
269 | return err; | |
270 | } | |
271 | } | |
272 | } | |
273 | ||
933deb8c TR |
274 | list_for_each_entry_reverse(client, &device->clients, list) { |
275 | if (client->ops && client->ops->late_exit) { | |
276 | err = client->ops->late_exit(client); | |
277 | if (err < 0) { | |
278 | dev_err(&device->dev, "failed to late cleanup %s: %d\n", | |
279 | dev_name(client->dev), err); | |
280 | mutex_unlock(&device->clients_lock); | |
281 | return err; | |
282 | } | |
283 | } | |
284 | } | |
285 | ||
776dc384 TR |
286 | mutex_unlock(&device->clients_lock); |
287 | ||
288 | return 0; | |
289 | } | |
fae798a1 | 290 | EXPORT_SYMBOL(host1x_device_exit); |
776dc384 | 291 | |
0c7dfd36 TR |
292 | static int host1x_add_client(struct host1x *host1x, |
293 | struct host1x_client *client) | |
776dc384 TR |
294 | { |
295 | struct host1x_device *device; | |
296 | struct host1x_subdev *subdev; | |
297 | ||
298 | mutex_lock(&host1x->devices_lock); | |
299 | ||
300 | list_for_each_entry(device, &host1x->devices, list) { | |
301 | list_for_each_entry(subdev, &device->subdevs, list) { | |
302 | if (subdev->np == client->dev->of_node) { | |
303 | host1x_subdev_register(device, subdev, client); | |
304 | mutex_unlock(&host1x->devices_lock); | |
305 | return 0; | |
306 | } | |
307 | } | |
308 | } | |
309 | ||
310 | mutex_unlock(&host1x->devices_lock); | |
311 | return -ENODEV; | |
312 | } | |
313 | ||
0c7dfd36 TR |
314 | static int host1x_del_client(struct host1x *host1x, |
315 | struct host1x_client *client) | |
776dc384 TR |
316 | { |
317 | struct host1x_device *device, *dt; | |
318 | struct host1x_subdev *subdev; | |
319 | ||
320 | mutex_lock(&host1x->devices_lock); | |
321 | ||
322 | list_for_each_entry_safe(device, dt, &host1x->devices, list) { | |
323 | list_for_each_entry(subdev, &device->active, list) { | |
324 | if (subdev->client == client) { | |
325 | host1x_subdev_unregister(device, subdev); | |
326 | mutex_unlock(&host1x->devices_lock); | |
327 | return 0; | |
328 | } | |
329 | } | |
330 | } | |
331 | ||
332 | mutex_unlock(&host1x->devices_lock); | |
333 | return -ENODEV; | |
334 | } | |
335 | ||
f4c5cf88 TR |
336 | static int host1x_device_match(struct device *dev, struct device_driver *drv) |
337 | { | |
338 | return strcmp(dev_name(dev), drv->name) == 0; | |
339 | } | |
776dc384 | 340 | |
31fa25f1 TR |
341 | static int host1x_device_uevent(struct device *dev, |
342 | struct kobj_uevent_env *env) | |
343 | { | |
344 | struct device_node *np = dev->parent->of_node; | |
345 | unsigned int count = 0; | |
346 | struct property *p; | |
347 | const char *compat; | |
348 | ||
349 | /* | |
350 | * This duplicates most of of_device_uevent(), but the latter cannot | |
351 | * be called from modules and operates on dev->of_node, which is not | |
352 | * available in this case. | |
353 | * | |
354 | * Note that this is really only needed for backwards compatibility | |
355 | * with libdrm, which parses this information from sysfs and will | |
356 | * fail if it can't find the OF_FULLNAME, specifically. | |
357 | */ | |
358 | add_uevent_var(env, "OF_NAME=%pOFn", np); | |
359 | add_uevent_var(env, "OF_FULLNAME=%pOF", np); | |
360 | ||
361 | of_property_for_each_string(np, "compatible", p, compat) { | |
362 | add_uevent_var(env, "OF_COMPATIBLE_%u=%s", count, compat); | |
363 | count++; | |
364 | } | |
365 | ||
366 | add_uevent_var(env, "OF_COMPATIBLE_N=%u", count); | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
07397df2 NG |
371 | static int host1x_dma_configure(struct device *dev) |
372 | { | |
3d6ce86e | 373 | return of_dma_configure(dev, dev->of_node, true); |
07397df2 NG |
374 | } |
375 | ||
f4c5cf88 TR |
376 | static const struct dev_pm_ops host1x_device_pm_ops = { |
377 | .suspend = pm_generic_suspend, | |
378 | .resume = pm_generic_resume, | |
379 | .freeze = pm_generic_freeze, | |
380 | .thaw = pm_generic_thaw, | |
381 | .poweroff = pm_generic_poweroff, | |
382 | .restore = pm_generic_restore, | |
383 | }; | |
384 | ||
385 | struct bus_type host1x_bus_type = { | |
386 | .name = "host1x", | |
387 | .match = host1x_device_match, | |
31fa25f1 | 388 | .uevent = host1x_device_uevent, |
50bac83c | 389 | .dma_configure = host1x_dma_configure, |
f4c5cf88 TR |
390 | .pm = &host1x_device_pm_ops, |
391 | }; | |
392 | ||
99d2cd81 TR |
393 | static void __host1x_device_del(struct host1x_device *device) |
394 | { | |
395 | struct host1x_subdev *subdev, *sd; | |
396 | struct host1x_client *client, *cl; | |
397 | ||
398 | mutex_lock(&device->subdevs_lock); | |
399 | ||
400 | /* unregister subdevices */ | |
401 | list_for_each_entry_safe(subdev, sd, &device->active, list) { | |
402 | /* | |
403 | * host1x_subdev_unregister() will remove the client from | |
404 | * any lists, so we'll need to manually add it back to the | |
405 | * list of idle clients. | |
406 | * | |
407 | * XXX: Alternatively, perhaps don't remove the client from | |
408 | * any lists in host1x_subdev_unregister() and instead do | |
409 | * that explicitly from host1x_unregister_client()? | |
410 | */ | |
411 | client = subdev->client; | |
412 | ||
413 | __host1x_subdev_unregister(device, subdev); | |
414 | ||
415 | /* add the client to the list of idle clients */ | |
416 | mutex_lock(&clients_lock); | |
417 | list_add_tail(&client->list, &clients); | |
418 | mutex_unlock(&clients_lock); | |
419 | } | |
420 | ||
421 | /* remove subdevices */ | |
422 | list_for_each_entry_safe(subdev, sd, &device->subdevs, list) | |
423 | host1x_subdev_del(subdev); | |
424 | ||
425 | mutex_unlock(&device->subdevs_lock); | |
426 | ||
427 | /* move clients to idle list */ | |
428 | mutex_lock(&clients_lock); | |
429 | mutex_lock(&device->clients_lock); | |
430 | ||
431 | list_for_each_entry_safe(client, cl, &device->clients, list) | |
432 | list_move_tail(&client->list, &clients); | |
433 | ||
434 | mutex_unlock(&device->clients_lock); | |
435 | mutex_unlock(&clients_lock); | |
436 | ||
437 | /* finally remove the device */ | |
438 | list_del_init(&device->list); | |
439 | } | |
440 | ||
776dc384 TR |
441 | static void host1x_device_release(struct device *dev) |
442 | { | |
443 | struct host1x_device *device = to_host1x_device(dev); | |
444 | ||
99d2cd81 | 445 | __host1x_device_del(device); |
776dc384 TR |
446 | kfree(device); |
447 | } | |
448 | ||
449 | static int host1x_device_add(struct host1x *host1x, | |
450 | struct host1x_driver *driver) | |
451 | { | |
452 | struct host1x_client *client, *tmp; | |
453 | struct host1x_subdev *subdev; | |
454 | struct host1x_device *device; | |
455 | int err; | |
456 | ||
457 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
458 | if (!device) | |
459 | return -ENOMEM; | |
460 | ||
f4c5cf88 TR |
461 | device_initialize(&device->dev); |
462 | ||
776dc384 TR |
463 | mutex_init(&device->subdevs_lock); |
464 | INIT_LIST_HEAD(&device->subdevs); | |
465 | INIT_LIST_HEAD(&device->active); | |
466 | mutex_init(&device->clients_lock); | |
467 | INIT_LIST_HEAD(&device->clients); | |
468 | INIT_LIST_HEAD(&device->list); | |
469 | device->driver = driver; | |
470 | ||
471 | device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; | |
472 | device->dev.dma_mask = &device->dev.coherent_dma_mask; | |
f4c5cf88 | 473 | dev_set_name(&device->dev, "%s", driver->driver.name); |
776dc384 | 474 | device->dev.release = host1x_device_release; |
776dc384 TR |
475 | device->dev.bus = &host1x_bus_type; |
476 | device->dev.parent = host1x->dev; | |
477 | ||
3d6ce86e | 478 | of_dma_configure(&device->dev, host1x->dev->of_node, true); |
2fb0dceb | 479 | |
1e390478 | 480 | device->dev.dma_parms = &device->dma_parms; |
d98914eb | 481 | dma_set_max_seg_size(&device->dev, UINT_MAX); |
1e390478 | 482 | |
f4c5cf88 | 483 | err = host1x_device_parse_dt(device, driver); |
776dc384 | 484 | if (err < 0) { |
f4c5cf88 | 485 | kfree(device); |
776dc384 TR |
486 | return err; |
487 | } | |
488 | ||
776dc384 | 489 | list_add_tail(&device->list, &host1x->devices); |
776dc384 TR |
490 | |
491 | mutex_lock(&clients_lock); | |
492 | ||
493 | list_for_each_entry_safe(client, tmp, &clients, list) { | |
494 | list_for_each_entry(subdev, &device->subdevs, list) { | |
495 | if (subdev->np == client->dev->of_node) { | |
496 | host1x_subdev_register(device, subdev, client); | |
497 | break; | |
498 | } | |
499 | } | |
500 | } | |
501 | ||
502 | mutex_unlock(&clients_lock); | |
503 | ||
504 | return 0; | |
505 | } | |
506 | ||
507 | /* | |
508 | * Removes a device by first unregistering any subdevices and then removing | |
509 | * itself from the list of devices. | |
510 | * | |
511 | * This function must be called with the host1x->devices_lock held. | |
512 | */ | |
513 | static void host1x_device_del(struct host1x *host1x, | |
514 | struct host1x_device *device) | |
515 | { | |
f4c5cf88 TR |
516 | if (device->registered) { |
517 | device->registered = false; | |
518 | device_del(&device->dev); | |
519 | } | |
520 | ||
521 | put_device(&device->dev); | |
776dc384 TR |
522 | } |
523 | ||
524 | static void host1x_attach_driver(struct host1x *host1x, | |
525 | struct host1x_driver *driver) | |
526 | { | |
527 | struct host1x_device *device; | |
528 | int err; | |
529 | ||
530 | mutex_lock(&host1x->devices_lock); | |
531 | ||
532 | list_for_each_entry(device, &host1x->devices, list) { | |
533 | if (device->driver == driver) { | |
534 | mutex_unlock(&host1x->devices_lock); | |
535 | return; | |
536 | } | |
537 | } | |
538 | ||
776dc384 TR |
539 | err = host1x_device_add(host1x, driver); |
540 | if (err < 0) | |
541 | dev_err(host1x->dev, "failed to allocate device: %d\n", err); | |
38d98de4 TR |
542 | |
543 | mutex_unlock(&host1x->devices_lock); | |
776dc384 TR |
544 | } |
545 | ||
546 | static void host1x_detach_driver(struct host1x *host1x, | |
547 | struct host1x_driver *driver) | |
548 | { | |
549 | struct host1x_device *device, *tmp; | |
550 | ||
551 | mutex_lock(&host1x->devices_lock); | |
552 | ||
553 | list_for_each_entry_safe(device, tmp, &host1x->devices, list) | |
554 | if (device->driver == driver) | |
555 | host1x_device_del(host1x, device); | |
556 | ||
557 | mutex_unlock(&host1x->devices_lock); | |
558 | } | |
559 | ||
f67524ca TR |
560 | static int host1x_devices_show(struct seq_file *s, void *data) |
561 | { | |
562 | struct host1x *host1x = s->private; | |
563 | struct host1x_device *device; | |
564 | ||
565 | mutex_lock(&host1x->devices_lock); | |
566 | ||
567 | list_for_each_entry(device, &host1x->devices, list) { | |
568 | struct host1x_subdev *subdev; | |
569 | ||
570 | seq_printf(s, "%s\n", dev_name(&device->dev)); | |
571 | ||
572 | mutex_lock(&device->subdevs_lock); | |
573 | ||
574 | list_for_each_entry(subdev, &device->active, list) | |
575 | seq_printf(s, " %pOFf: %s\n", subdev->np, | |
576 | dev_name(subdev->client->dev)); | |
577 | ||
578 | list_for_each_entry(subdev, &device->subdevs, list) | |
579 | seq_printf(s, " %pOFf:\n", subdev->np); | |
580 | ||
581 | mutex_unlock(&device->subdevs_lock); | |
582 | } | |
583 | ||
584 | mutex_unlock(&host1x->devices_lock); | |
585 | ||
586 | return 0; | |
587 | } | |
588 | DEFINE_SHOW_ATTRIBUTE(host1x_devices); | |
589 | ||
466749f1 TR |
590 | /** |
591 | * host1x_register() - register a host1x controller | |
592 | * @host1x: host1x controller | |
593 | * | |
594 | * The host1x controller driver uses this to register a host1x controller with | |
595 | * the infrastructure. Note that all Tegra SoC generations have only ever come | |
596 | * with a single host1x instance, so this function is somewhat academic. | |
597 | */ | |
776dc384 TR |
598 | int host1x_register(struct host1x *host1x) |
599 | { | |
600 | struct host1x_driver *driver; | |
601 | ||
602 | mutex_lock(&devices_lock); | |
603 | list_add_tail(&host1x->list, &devices); | |
604 | mutex_unlock(&devices_lock); | |
605 | ||
606 | mutex_lock(&drivers_lock); | |
607 | ||
608 | list_for_each_entry(driver, &drivers, list) | |
609 | host1x_attach_driver(host1x, driver); | |
610 | ||
611 | mutex_unlock(&drivers_lock); | |
612 | ||
f67524ca TR |
613 | debugfs_create_file("devices", S_IRUGO, host1x->debugfs, host1x, |
614 | &host1x_devices_fops); | |
615 | ||
776dc384 TR |
616 | return 0; |
617 | } | |
618 | ||
466749f1 TR |
619 | /** |
620 | * host1x_unregister() - unregister a host1x controller | |
621 | * @host1x: host1x controller | |
622 | * | |
623 | * The host1x controller driver uses this to remove a host1x controller from | |
624 | * the infrastructure. | |
625 | */ | |
776dc384 TR |
626 | int host1x_unregister(struct host1x *host1x) |
627 | { | |
628 | struct host1x_driver *driver; | |
629 | ||
630 | mutex_lock(&drivers_lock); | |
631 | ||
632 | list_for_each_entry(driver, &drivers, list) | |
633 | host1x_detach_driver(host1x, driver); | |
634 | ||
635 | mutex_unlock(&drivers_lock); | |
636 | ||
637 | mutex_lock(&devices_lock); | |
638 | list_del_init(&host1x->list); | |
639 | mutex_unlock(&devices_lock); | |
640 | ||
641 | return 0; | |
642 | } | |
643 | ||
b0d36daa TR |
644 | static int host1x_device_probe(struct device *dev) |
645 | { | |
646 | struct host1x_driver *driver = to_host1x_driver(dev->driver); | |
647 | struct host1x_device *device = to_host1x_device(dev); | |
648 | ||
649 | if (driver->probe) | |
650 | return driver->probe(device); | |
651 | ||
652 | return 0; | |
653 | } | |
654 | ||
655 | static int host1x_device_remove(struct device *dev) | |
656 | { | |
657 | struct host1x_driver *driver = to_host1x_driver(dev->driver); | |
658 | struct host1x_device *device = to_host1x_device(dev); | |
659 | ||
660 | if (driver->remove) | |
661 | return driver->remove(device); | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
666 | static void host1x_device_shutdown(struct device *dev) | |
667 | { | |
668 | struct host1x_driver *driver = to_host1x_driver(dev->driver); | |
669 | struct host1x_device *device = to_host1x_device(dev); | |
670 | ||
671 | if (driver->shutdown) | |
672 | driver->shutdown(device); | |
673 | } | |
674 | ||
466749f1 TR |
675 | /** |
676 | * host1x_driver_register_full() - register a host1x driver | |
677 | * @driver: host1x driver | |
678 | * @owner: owner module | |
679 | * | |
680 | * Drivers for host1x logical devices call this function to register a driver | |
681 | * with the infrastructure. Note that since these drive logical devices, the | |
682 | * registration of the driver actually triggers tho logical device creation. | |
683 | * A logical device will be created for each host1x instance. | |
684 | */ | |
f4c5cf88 TR |
685 | int host1x_driver_register_full(struct host1x_driver *driver, |
686 | struct module *owner) | |
776dc384 TR |
687 | { |
688 | struct host1x *host1x; | |
689 | ||
690 | INIT_LIST_HEAD(&driver->list); | |
691 | ||
692 | mutex_lock(&drivers_lock); | |
693 | list_add_tail(&driver->list, &drivers); | |
694 | mutex_unlock(&drivers_lock); | |
695 | ||
696 | mutex_lock(&devices_lock); | |
697 | ||
698 | list_for_each_entry(host1x, &devices, list) | |
699 | host1x_attach_driver(host1x, driver); | |
700 | ||
701 | mutex_unlock(&devices_lock); | |
702 | ||
f4c5cf88 TR |
703 | driver->driver.bus = &host1x_bus_type; |
704 | driver->driver.owner = owner; | |
b0d36daa TR |
705 | driver->driver.probe = host1x_device_probe; |
706 | driver->driver.remove = host1x_device_remove; | |
707 | driver->driver.shutdown = host1x_device_shutdown; | |
f4c5cf88 TR |
708 | |
709 | return driver_register(&driver->driver); | |
776dc384 | 710 | } |
f4c5cf88 | 711 | EXPORT_SYMBOL(host1x_driver_register_full); |
776dc384 | 712 | |
466749f1 TR |
713 | /** |
714 | * host1x_driver_unregister() - unregister a host1x driver | |
715 | * @driver: host1x driver | |
716 | * | |
717 | * Unbinds the driver from each of the host1x logical devices that it is | |
718 | * bound to, effectively removing the subsystem devices that they represent. | |
719 | */ | |
776dc384 TR |
720 | void host1x_driver_unregister(struct host1x_driver *driver) |
721 | { | |
d9a0a05b TR |
722 | struct host1x *host1x; |
723 | ||
e3e70814 TR |
724 | driver_unregister(&driver->driver); |
725 | ||
d9a0a05b TR |
726 | mutex_lock(&devices_lock); |
727 | ||
728 | list_for_each_entry(host1x, &devices, list) | |
729 | host1x_detach_driver(host1x, driver); | |
730 | ||
731 | mutex_unlock(&devices_lock); | |
732 | ||
776dc384 TR |
733 | mutex_lock(&drivers_lock); |
734 | list_del_init(&driver->list); | |
735 | mutex_unlock(&drivers_lock); | |
736 | } | |
737 | EXPORT_SYMBOL(host1x_driver_unregister); | |
738 | ||
0cfe5a6e TR |
739 | /** |
740 | * __host1x_client_init() - initialize a host1x client | |
741 | * @client: host1x client | |
742 | * @key: lock class key for the client-specific mutex | |
743 | */ | |
744 | void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key) | |
745 | { | |
1f39b1df | 746 | host1x_bo_cache_init(&client->cache); |
0cfe5a6e TR |
747 | INIT_LIST_HEAD(&client->list); |
748 | __mutex_init(&client->lock, "host1x client lock", key); | |
749 | client->usecount = 0; | |
750 | } | |
751 | EXPORT_SYMBOL(__host1x_client_init); | |
752 | ||
753 | /** | |
754 | * host1x_client_exit() - uninitialize a host1x client | |
755 | * @client: host1x client | |
756 | */ | |
757 | void host1x_client_exit(struct host1x_client *client) | |
758 | { | |
759 | mutex_destroy(&client->lock); | |
760 | } | |
761 | EXPORT_SYMBOL(host1x_client_exit); | |
762 | ||
466749f1 | 763 | /** |
a24f9817 | 764 | * __host1x_client_register() - register a host1x client |
466749f1 TR |
765 | * @client: host1x client |
766 | * | |
767 | * Registers a host1x client with each host1x controller instance. Note that | |
768 | * each client will only match their parent host1x controller and will only be | |
769 | * associated with that instance. Once all clients have been registered with | |
770 | * their parent host1x controller, the infrastructure will set up the logical | |
771 | * device and call host1x_device_init(), which will in turn call each client's | |
772 | * &host1x_client_ops.init implementation. | |
773 | */ | |
0cfe5a6e | 774 | int __host1x_client_register(struct host1x_client *client) |
776dc384 TR |
775 | { |
776 | struct host1x *host1x; | |
777 | int err; | |
778 | ||
779 | mutex_lock(&devices_lock); | |
780 | ||
781 | list_for_each_entry(host1x, &devices, list) { | |
0c7dfd36 | 782 | err = host1x_add_client(host1x, client); |
776dc384 TR |
783 | if (!err) { |
784 | mutex_unlock(&devices_lock); | |
785 | return 0; | |
786 | } | |
787 | } | |
788 | ||
789 | mutex_unlock(&devices_lock); | |
790 | ||
791 | mutex_lock(&clients_lock); | |
792 | list_add_tail(&client->list, &clients); | |
793 | mutex_unlock(&clients_lock); | |
794 | ||
795 | return 0; | |
796 | } | |
a24f9817 | 797 | EXPORT_SYMBOL(__host1x_client_register); |
776dc384 | 798 | |
466749f1 TR |
799 | /** |
800 | * host1x_client_unregister() - unregister a host1x client | |
801 | * @client: host1x client | |
802 | * | |
803 | * Removes a host1x client from its host1x controller instance. If a logical | |
804 | * device has already been initialized, it will be torn down. | |
805 | */ | |
776dc384 TR |
806 | int host1x_client_unregister(struct host1x_client *client) |
807 | { | |
808 | struct host1x_client *c; | |
809 | struct host1x *host1x; | |
810 | int err; | |
811 | ||
812 | mutex_lock(&devices_lock); | |
813 | ||
814 | list_for_each_entry(host1x, &devices, list) { | |
0c7dfd36 | 815 | err = host1x_del_client(host1x, client); |
776dc384 TR |
816 | if (!err) { |
817 | mutex_unlock(&devices_lock); | |
818 | return 0; | |
819 | } | |
820 | } | |
821 | ||
822 | mutex_unlock(&devices_lock); | |
823 | mutex_lock(&clients_lock); | |
824 | ||
825 | list_for_each_entry(c, &clients, list) { | |
826 | if (c == client) { | |
827 | list_del_init(&c->list); | |
828 | break; | |
829 | } | |
830 | } | |
831 | ||
832 | mutex_unlock(&clients_lock); | |
833 | ||
1f39b1df TR |
834 | host1x_bo_cache_destroy(&client->cache); |
835 | ||
776dc384 TR |
836 | return 0; |
837 | } | |
838 | EXPORT_SYMBOL(host1x_client_unregister); | |
fd67e9c6 TR |
839 | |
840 | int host1x_client_suspend(struct host1x_client *client) | |
841 | { | |
842 | int err = 0; | |
843 | ||
844 | mutex_lock(&client->lock); | |
845 | ||
846 | if (client->usecount == 1) { | |
847 | if (client->ops && client->ops->suspend) { | |
848 | err = client->ops->suspend(client); | |
849 | if (err < 0) | |
850 | goto unlock; | |
851 | } | |
852 | } | |
853 | ||
854 | client->usecount--; | |
855 | dev_dbg(client->dev, "use count: %u\n", client->usecount); | |
856 | ||
857 | if (client->parent) { | |
858 | err = host1x_client_suspend(client->parent); | |
859 | if (err < 0) | |
860 | goto resume; | |
861 | } | |
862 | ||
863 | goto unlock; | |
864 | ||
865 | resume: | |
866 | if (client->usecount == 0) | |
867 | if (client->ops && client->ops->resume) | |
868 | client->ops->resume(client); | |
869 | ||
870 | client->usecount++; | |
871 | unlock: | |
872 | mutex_unlock(&client->lock); | |
873 | return err; | |
874 | } | |
875 | EXPORT_SYMBOL(host1x_client_suspend); | |
876 | ||
877 | int host1x_client_resume(struct host1x_client *client) | |
878 | { | |
879 | int err = 0; | |
880 | ||
881 | mutex_lock(&client->lock); | |
882 | ||
883 | if (client->parent) { | |
884 | err = host1x_client_resume(client->parent); | |
885 | if (err < 0) | |
886 | goto unlock; | |
887 | } | |
888 | ||
889 | if (client->usecount == 0) { | |
890 | if (client->ops && client->ops->resume) { | |
891 | err = client->ops->resume(client); | |
892 | if (err < 0) | |
893 | goto suspend; | |
894 | } | |
895 | } | |
896 | ||
897 | client->usecount++; | |
898 | dev_dbg(client->dev, "use count: %u\n", client->usecount); | |
899 | ||
900 | goto unlock; | |
901 | ||
902 | suspend: | |
903 | if (client->parent) | |
904 | host1x_client_suspend(client->parent); | |
905 | unlock: | |
906 | mutex_unlock(&client->lock); | |
907 | return err; | |
908 | } | |
909 | EXPORT_SYMBOL(host1x_client_resume); | |
1f39b1df TR |
910 | |
911 | struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, | |
912 | enum dma_data_direction dir, | |
913 | struct host1x_bo_cache *cache) | |
914 | { | |
915 | struct host1x_bo_mapping *mapping; | |
916 | ||
917 | if (cache) { | |
918 | mutex_lock(&cache->lock); | |
919 | ||
920 | list_for_each_entry(mapping, &cache->mappings, entry) { | |
921 | if (mapping->bo == bo && mapping->direction == dir) { | |
922 | kref_get(&mapping->ref); | |
923 | goto unlock; | |
924 | } | |
925 | } | |
926 | } | |
927 | ||
928 | mapping = bo->ops->pin(dev, bo, dir); | |
929 | if (IS_ERR(mapping)) | |
930 | goto unlock; | |
931 | ||
932 | spin_lock(&mapping->bo->lock); | |
933 | list_add_tail(&mapping->list, &bo->mappings); | |
934 | spin_unlock(&mapping->bo->lock); | |
935 | ||
936 | if (cache) { | |
937 | INIT_LIST_HEAD(&mapping->entry); | |
938 | mapping->cache = cache; | |
939 | ||
940 | list_add_tail(&mapping->entry, &cache->mappings); | |
941 | ||
942 | /* bump reference count to track the copy in the cache */ | |
943 | kref_get(&mapping->ref); | |
944 | } | |
945 | ||
946 | unlock: | |
947 | if (cache) | |
948 | mutex_unlock(&cache->lock); | |
949 | ||
950 | return mapping; | |
951 | } | |
952 | EXPORT_SYMBOL(host1x_bo_pin); | |
953 | ||
954 | static void __host1x_bo_unpin(struct kref *ref) | |
955 | { | |
956 | struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref); | |
957 | ||
958 | /* | |
959 | * When the last reference of the mapping goes away, make sure to remove the mapping from | |
960 | * the cache. | |
961 | */ | |
962 | if (mapping->cache) | |
963 | list_del(&mapping->entry); | |
964 | ||
965 | spin_lock(&mapping->bo->lock); | |
966 | list_del(&mapping->list); | |
967 | spin_unlock(&mapping->bo->lock); | |
968 | ||
969 | mapping->bo->ops->unpin(mapping); | |
970 | } | |
971 | ||
972 | void host1x_bo_unpin(struct host1x_bo_mapping *mapping) | |
973 | { | |
974 | struct host1x_bo_cache *cache = mapping->cache; | |
975 | ||
976 | if (cache) | |
977 | mutex_lock(&cache->lock); | |
978 | ||
979 | kref_put(&mapping->ref, __host1x_bo_unpin); | |
980 | ||
981 | if (cache) | |
982 | mutex_unlock(&cache->lock); | |
983 | } | |
984 | EXPORT_SYMBOL(host1x_bo_unpin); |