Commit | Line | Data |
---|---|---|
c6603c74 DV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2020 Intel | |
4 | * | |
5 | * Based on drivers/base/devres.c | |
6 | */ | |
7 | ||
8 | #include <drm/drm_managed.h> | |
9 | ||
10 | #include <linux/list.h> | |
e13f13e0 | 11 | #include <linux/mutex.h> |
c6603c74 DV |
12 | #include <linux/slab.h> |
13 | #include <linux/spinlock.h> | |
14 | ||
15 | #include <drm/drm_device.h> | |
16 | #include <drm/drm_print.h> | |
17 | ||
3df6fad4 CW |
18 | #include "drm_internal.h" |
19 | ||
c6603c74 DV |
20 | /** |
21 | * DOC: managed resources | |
22 | * | |
23 | * Inspired by struct &device managed resources, but tied to the lifetime of | |
24 | * struct &drm_device, which can outlive the underlying physical device, usually | |
25 | * when userspace has some open files and other handles to resources still open. | |
9e1ed9fb DV |
26 | * |
27 | * Release actions can be added with drmm_add_action(), memory allocations can | |
28 | * be done directly with drmm_kmalloc() and the related functions. Everything | |
29 | * will be released on the final drm_dev_put() in reverse order of how the | |
30 | * release actions have been added and memory has been allocated since driver | |
4c8e84b8 | 31 | * loading started with devm_drm_dev_alloc(). |
9e1ed9fb DV |
32 | * |
33 | * Note that release actions and managed memory can also be added and removed | |
34 | * during the lifetime of the driver, all the functions are fully concurrent | |
35 | * safe. But it is recommended to use managed resources only for resources that | |
36 | * change rarely, if ever, during the lifetime of the &drm_device instance. | |
c6603c74 | 37 | */ |
9e1ed9fb | 38 | |
c6603c74 DV |
39 | struct drmres_node { |
40 | struct list_head entry; | |
41 | drmres_release_t release; | |
42 | const char *name; | |
43 | size_t size; | |
44 | }; | |
45 | ||
46 | struct drmres { | |
47 | struct drmres_node node; | |
48 | /* | |
49 | * Some archs want to perform DMA into kmalloc caches | |
50 | * and need a guaranteed alignment larger than | |
51 | * the alignment of a 64-bit integer. | |
52 | * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same | |
53 | * buffer alignment as if it was allocated by plain kmalloc(). | |
54 | */ | |
55 | u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; | |
56 | }; | |
57 | ||
58 | static void free_dr(struct drmres *dr) | |
59 | { | |
60 | kfree_const(dr->node.name); | |
61 | kfree(dr); | |
62 | } | |
63 | ||
64 | void drm_managed_release(struct drm_device *dev) | |
65 | { | |
66 | struct drmres *dr, *tmp; | |
67 | ||
68 | drm_dbg_drmres(dev, "drmres release begin\n"); | |
69 | list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) { | |
70 | drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n", | |
71 | dr, dr->node.name, dr->node.size); | |
72 | ||
73 | if (dr->node.release) | |
74 | dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL); | |
75 | ||
76 | list_del(&dr->node.entry); | |
77 | free_dr(dr); | |
78 | } | |
79 | drm_dbg_drmres(dev, "drmres release end\n"); | |
80 | } | |
81 | ||
82 | /* | |
83 | * Always inline so that kmalloc_track_caller tracks the actual interesting | |
84 | * caller outside of drm_managed.c. | |
85 | */ | |
86 | static __always_inline struct drmres * alloc_dr(drmres_release_t release, | |
87 | size_t size, gfp_t gfp, int nid) | |
88 | { | |
89 | size_t tot_size; | |
90 | struct drmres *dr; | |
91 | ||
92 | /* We must catch any near-SIZE_MAX cases that could overflow. */ | |
93 | if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size))) | |
94 | return NULL; | |
95 | ||
96 | dr = kmalloc_node_track_caller(tot_size, gfp, nid); | |
97 | if (unlikely(!dr)) | |
98 | return NULL; | |
99 | ||
100 | memset(dr, 0, offsetof(struct drmres, data)); | |
101 | ||
102 | INIT_LIST_HEAD(&dr->node.entry); | |
103 | dr->node.release = release; | |
104 | dr->node.size = size; | |
105 | ||
106 | return dr; | |
107 | } | |
108 | ||
109 | static void del_dr(struct drm_device *dev, struct drmres *dr) | |
110 | { | |
111 | list_del_init(&dr->node.entry); | |
112 | ||
113 | drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n", | |
114 | dr, dr->node.name, (unsigned long) dr->node.size); | |
115 | } | |
116 | ||
117 | static void add_dr(struct drm_device *dev, struct drmres *dr) | |
118 | { | |
119 | unsigned long flags; | |
120 | ||
121 | spin_lock_irqsave(&dev->managed.lock, flags); | |
122 | list_add(&dr->node.entry, &dev->managed.resources); | |
123 | spin_unlock_irqrestore(&dev->managed.lock, flags); | |
124 | ||
125 | drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n", | |
126 | dr, dr->node.name, (unsigned long) dr->node.size); | |
127 | } | |
128 | ||
129 | void drmm_add_final_kfree(struct drm_device *dev, void *container) | |
130 | { | |
131 | WARN_ON(dev->managed.final_kfree); | |
132 | WARN_ON(dev < (struct drm_device *) container); | |
c7da606e | 133 | WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); |
c6603c74 DV |
134 | dev->managed.final_kfree = container; |
135 | } | |
c6603c74 DV |
136 | |
137 | int __drmm_add_action(struct drm_device *dev, | |
138 | drmres_release_t action, | |
139 | void *data, const char *name) | |
140 | { | |
141 | struct drmres *dr; | |
142 | void **void_ptr; | |
143 | ||
144 | dr = alloc_dr(action, data ? sizeof(void*) : 0, | |
145 | GFP_KERNEL | __GFP_ZERO, | |
146 | dev_to_node(dev->dev)); | |
147 | if (!dr) { | |
148 | drm_dbg_drmres(dev, "failed to add action %s for %p\n", | |
149 | name, data); | |
150 | return -ENOMEM; | |
151 | } | |
152 | ||
153 | dr->node.name = kstrdup_const(name, GFP_KERNEL); | |
154 | if (data) { | |
155 | void_ptr = (void **)&dr->data; | |
156 | *void_ptr = data; | |
157 | } | |
158 | ||
159 | add_dr(dev, dr); | |
160 | ||
161 | return 0; | |
162 | } | |
163 | EXPORT_SYMBOL(__drmm_add_action); | |
164 | ||
f96306f9 DV |
165 | int __drmm_add_action_or_reset(struct drm_device *dev, |
166 | drmres_release_t action, | |
167 | void *data, const char *name) | |
168 | { | |
169 | int ret; | |
170 | ||
171 | ret = __drmm_add_action(dev, action, data, name); | |
172 | if (ret) | |
173 | action(dev, data); | |
174 | ||
175 | return ret; | |
176 | } | |
177 | EXPORT_SYMBOL(__drmm_add_action_or_reset); | |
178 | ||
9e1ed9fb DV |
179 | /** |
180 | * drmm_kmalloc - &drm_device managed kmalloc() | |
181 | * @dev: DRM device | |
182 | * @size: size of the memory allocation | |
183 | * @gfp: GFP allocation flags | |
184 | * | |
185 | * This is a &drm_device managed version of kmalloc(). The allocated memory is | |
186 | * automatically freed on the final drm_dev_put(). Memory can also be freed | |
187 | * before the final drm_dev_put() by calling drmm_kfree(). | |
188 | */ | |
c6603c74 DV |
189 | void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) |
190 | { | |
191 | struct drmres *dr; | |
192 | ||
193 | dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev)); | |
194 | if (!dr) { | |
195 | drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n", | |
196 | size, gfp); | |
197 | return NULL; | |
198 | } | |
199 | dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL); | |
200 | ||
201 | add_dr(dev, dr); | |
202 | ||
203 | return dr->data; | |
204 | } | |
205 | EXPORT_SYMBOL(drmm_kmalloc); | |
206 | ||
9e1ed9fb DV |
207 | /** |
208 | * drmm_kstrdup - &drm_device managed kstrdup() | |
209 | * @dev: DRM device | |
210 | * @s: 0-terminated string to be duplicated | |
211 | * @gfp: GFP allocation flags | |
212 | * | |
213 | * This is a &drm_device managed version of kstrdup(). The allocated memory is | |
214 | * automatically freed on the final drm_dev_put() and works exactly like a | |
215 | * memory allocation obtained by drmm_kmalloc(). | |
216 | */ | |
a5c71fdb DV |
217 | char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp) |
218 | { | |
219 | size_t size; | |
220 | char *buf; | |
221 | ||
222 | if (!s) | |
223 | return NULL; | |
224 | ||
225 | size = strlen(s) + 1; | |
226 | buf = drmm_kmalloc(dev, size, gfp); | |
227 | if (buf) | |
228 | memcpy(buf, s, size); | |
229 | return buf; | |
230 | } | |
231 | EXPORT_SYMBOL_GPL(drmm_kstrdup); | |
232 | ||
9e1ed9fb DV |
233 | /** |
234 | * drmm_kfree - &drm_device managed kfree() | |
235 | * @dev: DRM device | |
236 | * @data: memory allocation to be freed | |
237 | * | |
238 | * This is a &drm_device managed version of kfree() which can be used to | |
239 | * release memory allocated through drmm_kmalloc() or any of its related | |
240 | * functions before the final drm_dev_put() of @dev. | |
241 | */ | |
c6603c74 DV |
242 | void drmm_kfree(struct drm_device *dev, void *data) |
243 | { | |
244 | struct drmres *dr_match = NULL, *dr; | |
245 | unsigned long flags; | |
246 | ||
247 | if (!data) | |
248 | return; | |
249 | ||
250 | spin_lock_irqsave(&dev->managed.lock, flags); | |
251 | list_for_each_entry(dr, &dev->managed.resources, node.entry) { | |
252 | if (dr->data == data) { | |
253 | dr_match = dr; | |
254 | del_dr(dev, dr_match); | |
255 | break; | |
256 | } | |
257 | } | |
258 | spin_unlock_irqrestore(&dev->managed.lock, flags); | |
259 | ||
260 | if (WARN_ON(!dr_match)) | |
261 | return; | |
262 | ||
263 | free_dr(dr_match); | |
264 | } | |
265 | EXPORT_SYMBOL(drmm_kfree); | |
e13f13e0 TZ |
266 | |
267 | static void drmm_mutex_release(struct drm_device *dev, void *res) | |
268 | { | |
269 | struct mutex *lock = res; | |
270 | ||
271 | mutex_destroy(lock); | |
272 | } | |
273 | ||
274 | /** | |
275 | * drmm_mutex_init - &drm_device-managed mutex_init() | |
276 | * @dev: DRM device | |
277 | * @lock: lock to be initialized | |
278 | * | |
279 | * Returns: | |
280 | * 0 on success, or a negative errno code otherwise. | |
281 | * | |
282 | * This is a &drm_device-managed version of mutex_init(). The initialized | |
283 | * lock is automatically destroyed on the final drm_dev_put(). | |
284 | */ | |
285 | int drmm_mutex_init(struct drm_device *dev, struct mutex *lock) | |
286 | { | |
287 | mutex_init(lock); | |
288 | ||
289 | return drmm_add_action_or_reset(dev, drmm_mutex_release, lock); | |
290 | } | |
291 | EXPORT_SYMBOL(drmm_mutex_init); |