Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c8b75bca EA |
2 | /* |
3 | * Copyright © 2015 Broadcom | |
c8b75bca EA |
4 | */ |
5 | ||
72f793f1 EA |
6 | /** |
7 | * DOC: VC4 GEM BO management support | |
c8b75bca EA |
8 | * |
9 | * The VC4 GPU architecture (both scanout and rendering) has direct | |
10 | * access to system memory with no MMU in between. To support it, we | |
4a83c26a | 11 | * use the GEM DMA helper functions to allocate contiguous ranges of |
c8b75bca | 12 | * physical memory for our BOs. |
c826a6e1 | 13 | * |
4a83c26a | 14 | * Since the DMA allocator is very slow, we keep a cache of recently |
c826a6e1 EA |
15 | * freed BOs around so that the kernel's allocation of objects for 3D |
16 | * rendering can return quickly. | |
c8b75bca EA |
17 | */ |
18 | ||
cdec4d36 EA |
19 | #include <linux/dma-buf.h> |
20 | ||
720cf96d VS |
21 | #include <drm/drm_fourcc.h> |
22 | ||
c8b75bca | 23 | #include "vc4_drv.h" |
d5bc60f6 | 24 | #include "uapi/drm/vc4_drm.h" |
c8b75bca | 25 | |
ccfe8e9c | 26 | static const struct drm_gem_object_funcs vc4_gem_object_funcs; |
dd602022 | 27 | |
f3099462 EA |
28 | static const char * const bo_type_names[] = { |
29 | "kernel", | |
30 | "V3D", | |
31 | "V3D shader", | |
32 | "dumb", | |
33 | "binner", | |
34 | "RCL", | |
35 | "BCL", | |
36 | "kernel BO cache", | |
37 | }; | |
38 | ||
39 | static bool is_user_label(int label) | |
40 | { | |
41 | return label >= VC4_BO_TYPE_COUNT; | |
42 | } | |
43 | ||
13f0ec34 | 44 | static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4) |
c826a6e1 | 45 | { |
f3099462 EA |
46 | int i; |
47 | ||
48 | for (i = 0; i < vc4->num_labels; i++) { | |
49 | if (!vc4->bo_labels[i].num_allocated) | |
50 | continue; | |
51 | ||
13f0ec34 EA |
52 | drm_printf(p, "%30s: %6dkb BOs (%d)\n", |
53 | vc4->bo_labels[i].name, | |
54 | vc4->bo_labels[i].size_allocated / 1024, | |
55 | vc4->bo_labels[i].num_allocated); | |
f3099462 | 56 | } |
b9f19259 BB |
57 | |
58 | mutex_lock(&vc4->purgeable.lock); | |
59 | if (vc4->purgeable.num) | |
13f0ec34 EA |
60 | drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache", |
61 | vc4->purgeable.size / 1024, vc4->purgeable.num); | |
b9f19259 BB |
62 | |
63 | if (vc4->purgeable.purged_num) | |
13f0ec34 EA |
64 | drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO", |
65 | vc4->purgeable.purged_size / 1024, | |
66 | vc4->purgeable.purged_num); | |
b9f19259 | 67 | mutex_unlock(&vc4->purgeable.lock); |
c826a6e1 EA |
68 | } |
69 | ||
c9be804c | 70 | static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) |
c826a6e1 EA |
71 | { |
72 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
73 | struct drm_device *dev = node->minor->dev; | |
74 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
13f0ec34 | 75 | struct drm_printer p = drm_seq_file_printer(m); |
c826a6e1 | 76 | |
13f0ec34 | 77 | vc4_bo_stats_print(&p, vc4); |
b9f19259 | 78 | |
c826a6e1 EA |
79 | return 0; |
80 | } | |
c826a6e1 | 81 | |
f3099462 EA |
82 | /* Takes ownership of *name and returns the appropriate slot for it in |
83 | * the bo_labels[] array, extending it as necessary. | |
84 | * | |
85 | * This is inefficient and could use a hash table instead of walking | |
86 | * an array and strcmp()ing. However, the assumption is that user | |
87 | * labeling will be infrequent (scanout buffers and other long-lived | |
88 | * objects, or debug driver builds), so we can live with it for now. | |
89 | */ | |
90 | static int vc4_get_user_label(struct vc4_dev *vc4, const char *name) | |
91 | { | |
92 | int i; | |
93 | int free_slot = -1; | |
94 | ||
95 | for (i = 0; i < vc4->num_labels; i++) { | |
96 | if (!vc4->bo_labels[i].name) { | |
97 | free_slot = i; | |
98 | } else if (strcmp(vc4->bo_labels[i].name, name) == 0) { | |
99 | kfree(name); | |
100 | return i; | |
101 | } | |
102 | } | |
103 | ||
104 | if (free_slot != -1) { | |
105 | WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0); | |
106 | vc4->bo_labels[free_slot].name = name; | |
107 | return free_slot; | |
108 | } else { | |
109 | u32 new_label_count = vc4->num_labels + 1; | |
110 | struct vc4_label *new_labels = | |
111 | krealloc(vc4->bo_labels, | |
112 | new_label_count * sizeof(*new_labels), | |
113 | GFP_KERNEL); | |
114 | ||
115 | if (!new_labels) { | |
116 | kfree(name); | |
117 | return -1; | |
118 | } | |
119 | ||
120 | free_slot = vc4->num_labels; | |
121 | vc4->bo_labels = new_labels; | |
122 | vc4->num_labels = new_label_count; | |
123 | ||
124 | vc4->bo_labels[free_slot].name = name; | |
125 | vc4->bo_labels[free_slot].num_allocated = 0; | |
126 | vc4->bo_labels[free_slot].size_allocated = 0; | |
127 | ||
128 | return free_slot; | |
129 | } | |
130 | } | |
131 | ||
132 | static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label) | |
133 | { | |
134 | struct vc4_bo *bo = to_vc4_bo(gem_obj); | |
135 | struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev); | |
136 | ||
137 | lockdep_assert_held(&vc4->bo_lock); | |
138 | ||
139 | if (label != -1) { | |
140 | vc4->bo_labels[label].num_allocated++; | |
141 | vc4->bo_labels[label].size_allocated += gem_obj->size; | |
142 | } | |
143 | ||
144 | vc4->bo_labels[bo->label].num_allocated--; | |
145 | vc4->bo_labels[bo->label].size_allocated -= gem_obj->size; | |
146 | ||
147 | if (vc4->bo_labels[bo->label].num_allocated == 0 && | |
148 | is_user_label(bo->label)) { | |
149 | /* Free user BO label slots on last unreference. | |
150 | * Slots are just where we track the stats for a given | |
151 | * name, and once a name is unused we can reuse that | |
152 | * slot. | |
153 | */ | |
154 | kfree(vc4->bo_labels[bo->label].name); | |
155 | vc4->bo_labels[bo->label].name = NULL; | |
156 | } | |
157 | ||
158 | bo->label = label; | |
159 | } | |
160 | ||
c826a6e1 EA |
161 | static uint32_t bo_page_index(size_t size) |
162 | { | |
163 | return (size / PAGE_SIZE) - 1; | |
164 | } | |
165 | ||
c826a6e1 | 166 | static void vc4_bo_destroy(struct vc4_bo *bo) |
c8b75bca | 167 | { |
c826a6e1 | 168 | struct drm_gem_object *obj = &bo->base.base; |
4e6b1e91 EA |
169 | struct vc4_dev *vc4 = to_vc4_dev(obj->dev); |
170 | ||
171 | lockdep_assert_held(&vc4->bo_lock); | |
f3099462 EA |
172 | |
173 | vc4_bo_set_label(obj, -1); | |
c826a6e1 | 174 | |
463873d5 | 175 | if (bo->validated_shader) { |
c0db1b67 | 176 | kfree(bo->validated_shader->uniform_addr_offsets); |
463873d5 EA |
177 | kfree(bo->validated_shader->texture_samples); |
178 | kfree(bo->validated_shader); | |
179 | bo->validated_shader = NULL; | |
180 | } | |
181 | ||
07a2975c | 182 | mutex_destroy(&bo->madv_lock); |
4a83c26a | 183 | drm_gem_dma_free(&bo->base); |
c826a6e1 EA |
184 | } |
185 | ||
c826a6e1 EA |
186 | static void vc4_bo_remove_from_cache(struct vc4_bo *bo) |
187 | { | |
4e6b1e91 EA |
188 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); |
189 | ||
190 | lockdep_assert_held(&vc4->bo_lock); | |
c826a6e1 EA |
191 | list_del(&bo->unref_head); |
192 | list_del(&bo->size_head); | |
193 | } | |
194 | ||
195 | static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, | |
196 | size_t size) | |
197 | { | |
198 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
199 | uint32_t page_index = bo_page_index(size); | |
200 | ||
201 | if (vc4->bo_cache.size_list_size <= page_index) { | |
202 | uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, | |
203 | page_index + 1); | |
204 | struct list_head *new_list; | |
205 | uint32_t i; | |
206 | ||
207 | new_list = kmalloc_array(new_size, sizeof(struct list_head), | |
208 | GFP_KERNEL); | |
209 | if (!new_list) | |
210 | return NULL; | |
211 | ||
212 | /* Rebase the old cached BO lists to their new list | |
213 | * head locations. | |
214 | */ | |
215 | for (i = 0; i < vc4->bo_cache.size_list_size; i++) { | |
216 | struct list_head *old_list = | |
217 | &vc4->bo_cache.size_list[i]; | |
218 | ||
219 | if (list_empty(old_list)) | |
220 | INIT_LIST_HEAD(&new_list[i]); | |
221 | else | |
222 | list_replace(old_list, &new_list[i]); | |
223 | } | |
224 | /* And initialize the brand new BO list heads. */ | |
225 | for (i = vc4->bo_cache.size_list_size; i < new_size; i++) | |
226 | INIT_LIST_HEAD(&new_list[i]); | |
227 | ||
228 | kfree(vc4->bo_cache.size_list); | |
229 | vc4->bo_cache.size_list = new_list; | |
230 | vc4->bo_cache.size_list_size = new_size; | |
231 | } | |
232 | ||
233 | return &vc4->bo_cache.size_list[page_index]; | |
234 | } | |
235 | ||
ea903838 | 236 | static void vc4_bo_cache_purge(struct drm_device *dev) |
c826a6e1 EA |
237 | { |
238 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
239 | ||
240 | mutex_lock(&vc4->bo_lock); | |
241 | while (!list_empty(&vc4->bo_cache.time_list)) { | |
242 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
243 | struct vc4_bo, unref_head); | |
244 | vc4_bo_remove_from_cache(bo); | |
245 | vc4_bo_destroy(bo); | |
246 | } | |
247 | mutex_unlock(&vc4->bo_lock); | |
248 | } | |
249 | ||
b9f19259 BB |
250 | void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) |
251 | { | |
252 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); | |
253 | ||
30f8c74c MR |
254 | if (WARN_ON_ONCE(vc4->is_vc5)) |
255 | return; | |
256 | ||
b9f19259 BB |
257 | mutex_lock(&vc4->purgeable.lock); |
258 | list_add_tail(&bo->size_head, &vc4->purgeable.list); | |
259 | vc4->purgeable.num++; | |
260 | vc4->purgeable.size += bo->base.base.size; | |
261 | mutex_unlock(&vc4->purgeable.lock); | |
262 | } | |
263 | ||
264 | static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) | |
265 | { | |
266 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); | |
267 | ||
30f8c74c MR |
268 | if (WARN_ON_ONCE(vc4->is_vc5)) |
269 | return; | |
270 | ||
b9f19259 BB |
271 | /* list_del_init() is used here because the caller might release |
272 | * the purgeable lock in order to acquire the madv one and update the | |
273 | * madv status. | |
274 | * During this short period of time a user might decide to mark | |
275 | * the BO as unpurgeable, and if bo->madv is set to | |
276 | * VC4_MADV_DONTNEED it will try to remove the BO from the | |
277 | * purgeable list which will fail if the ->next/prev fields | |
278 | * are set to LIST_POISON1/LIST_POISON2 (which is what | |
279 | * list_del() does). | |
280 | * Re-initializing the list element guarantees that list_del() | |
281 | * will work correctly even if it's a NOP. | |
282 | */ | |
283 | list_del_init(&bo->size_head); | |
284 | vc4->purgeable.num--; | |
285 | vc4->purgeable.size -= bo->base.base.size; | |
286 | } | |
287 | ||
288 | void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo) | |
289 | { | |
290 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); | |
291 | ||
292 | mutex_lock(&vc4->purgeable.lock); | |
293 | vc4_bo_remove_from_purgeable_pool_locked(bo); | |
294 | mutex_unlock(&vc4->purgeable.lock); | |
295 | } | |
296 | ||
297 | static void vc4_bo_purge(struct drm_gem_object *obj) | |
298 | { | |
299 | struct vc4_bo *bo = to_vc4_bo(obj); | |
300 | struct drm_device *dev = obj->dev; | |
301 | ||
302 | WARN_ON(!mutex_is_locked(&bo->madv_lock)); | |
303 | WARN_ON(bo->madv != VC4_MADV_DONTNEED); | |
304 | ||
305 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
306 | ||
8c30eecc | 307 | dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr); |
b9f19259 BB |
308 | bo->base.vaddr = NULL; |
309 | bo->madv = __VC4_MADV_PURGED; | |
310 | } | |
311 | ||
312 | static void vc4_bo_userspace_cache_purge(struct drm_device *dev) | |
313 | { | |
314 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
315 | ||
316 | mutex_lock(&vc4->purgeable.lock); | |
317 | while (!list_empty(&vc4->purgeable.list)) { | |
318 | struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list, | |
319 | struct vc4_bo, size_head); | |
320 | struct drm_gem_object *obj = &bo->base.base; | |
321 | size_t purged_size = 0; | |
322 | ||
323 | vc4_bo_remove_from_purgeable_pool_locked(bo); | |
324 | ||
325 | /* Release the purgeable lock while we're purging the BO so | |
326 | * that other people can continue inserting things in the | |
327 | * purgeable pool without having to wait for all BOs to be | |
328 | * purged. | |
329 | */ | |
330 | mutex_unlock(&vc4->purgeable.lock); | |
331 | mutex_lock(&bo->madv_lock); | |
332 | ||
333 | /* Since we released the purgeable pool lock before acquiring | |
334 | * the BO madv one, the user may have marked the BO as WILLNEED | |
335 | * and re-used it in the meantime. | |
336 | * Before purging the BO we need to make sure | |
337 | * - it is still marked as DONTNEED | |
338 | * - it has not been re-inserted in the purgeable list | |
339 | * - it is not used by HW blocks | |
340 | * If one of these conditions is not met, just skip the entry. | |
341 | */ | |
342 | if (bo->madv == VC4_MADV_DONTNEED && | |
343 | list_empty(&bo->size_head) && | |
344 | !refcount_read(&bo->usecnt)) { | |
345 | purged_size = bo->base.base.size; | |
346 | vc4_bo_purge(obj); | |
347 | } | |
348 | mutex_unlock(&bo->madv_lock); | |
349 | mutex_lock(&vc4->purgeable.lock); | |
350 | ||
351 | if (purged_size) { | |
352 | vc4->purgeable.purged_size += purged_size; | |
353 | vc4->purgeable.purged_num++; | |
354 | } | |
355 | } | |
356 | mutex_unlock(&vc4->purgeable.lock); | |
357 | } | |
358 | ||
c826a6e1 | 359 | static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, |
f3099462 EA |
360 | uint32_t size, |
361 | enum vc4_kernel_bo_type type) | |
c826a6e1 EA |
362 | { |
363 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
364 | uint32_t page_index = bo_page_index(size); | |
365 | struct vc4_bo *bo = NULL; | |
366 | ||
c826a6e1 EA |
367 | mutex_lock(&vc4->bo_lock); |
368 | if (page_index >= vc4->bo_cache.size_list_size) | |
369 | goto out; | |
370 | ||
371 | if (list_empty(&vc4->bo_cache.size_list[page_index])) | |
372 | goto out; | |
373 | ||
374 | bo = list_first_entry(&vc4->bo_cache.size_list[page_index], | |
375 | struct vc4_bo, size_head); | |
376 | vc4_bo_remove_from_cache(bo); | |
377 | kref_init(&bo->base.base.refcount); | |
378 | ||
379 | out: | |
f3099462 EA |
380 | if (bo) |
381 | vc4_bo_set_label(&bo->base.base, type); | |
c826a6e1 EA |
382 | mutex_unlock(&vc4->bo_lock); |
383 | return bo; | |
384 | } | |
385 | ||
386 | /** | |
e9d2871f | 387 | * vc4_create_object - Implementation of driver->gem_create_object. |
72f793f1 EA |
388 | * @dev: DRM device |
389 | * @size: Size in bytes of the memory the object will reference | |
c826a6e1 | 390 | * |
4a83c26a | 391 | * This lets the DMA helpers allocate object structs for us, and keep |
c826a6e1 EA |
392 | * our BO stats correct. |
393 | */ | |
394 | struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) | |
395 | { | |
396 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
397 | struct vc4_bo *bo; | |
398 | ||
30f8c74c MR |
399 | if (WARN_ON_ONCE(vc4->is_vc5)) |
400 | return ERR_PTR(-ENODEV); | |
401 | ||
c826a6e1 EA |
402 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
403 | if (!bo) | |
404 | return ERR_PTR(-ENOMEM); | |
405 | ||
b9f19259 BB |
406 | bo->madv = VC4_MADV_WILLNEED; |
407 | refcount_set(&bo->usecnt, 0); | |
374146ca | 408 | |
07a2975c | 409 | mutex_init(&bo->madv_lock); |
374146ca | 410 | |
c826a6e1 | 411 | mutex_lock(&vc4->bo_lock); |
f3099462 EA |
412 | bo->label = VC4_BO_TYPE_KERNEL; |
413 | vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; | |
414 | vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; | |
c826a6e1 EA |
415 | mutex_unlock(&vc4->bo_lock); |
416 | ||
dd602022 TZ |
417 | bo->base.base.funcs = &vc4_gem_object_funcs; |
418 | ||
c826a6e1 EA |
419 | return &bo->base.base; |
420 | } | |
421 | ||
422 | struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, | |
f3099462 | 423 | bool allow_unzeroed, enum vc4_kernel_bo_type type) |
c826a6e1 EA |
424 | { |
425 | size_t size = roundup(unaligned_size, PAGE_SIZE); | |
426 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
4a83c26a | 427 | struct drm_gem_dma_object *dma_obj; |
eb981383 | 428 | struct vc4_bo *bo; |
c8b75bca | 429 | |
30f8c74c MR |
430 | if (WARN_ON_ONCE(vc4->is_vc5)) |
431 | return ERR_PTR(-ENODEV); | |
432 | ||
c826a6e1 | 433 | if (size == 0) |
2c68f1fc | 434 | return ERR_PTR(-EINVAL); |
c826a6e1 EA |
435 | |
436 | /* First, try to get a vc4_bo from the kernel BO cache. */ | |
f3099462 | 437 | bo = vc4_bo_get_from_cache(dev, size, type); |
eb981383 EA |
438 | if (bo) { |
439 | if (!allow_unzeroed) | |
440 | memset(bo->base.vaddr, 0, bo->base.base.size); | |
441 | return bo; | |
c826a6e1 EA |
442 | } |
443 | ||
4a83c26a DK |
444 | dma_obj = drm_gem_dma_create(dev, size); |
445 | if (IS_ERR(dma_obj)) { | |
c826a6e1 | 446 | /* |
4a83c26a DK |
447 | * If we've run out of DMA memory, kill the cache of |
448 | * DMA allocations we've got laying around and try again. | |
c826a6e1 EA |
449 | */ |
450 | vc4_bo_cache_purge(dev); | |
4a83c26a | 451 | dma_obj = drm_gem_dma_create(dev, size); |
b9f19259 | 452 | } |
c826a6e1 | 453 | |
4a83c26a | 454 | if (IS_ERR(dma_obj)) { |
b9f19259 | 455 | /* |
4a83c26a | 456 | * Still not enough DMA memory, purge the userspace BO |
b9f19259 BB |
457 | * cache and retry. |
458 | * This is sub-optimal since we purge the whole userspace | |
459 | * BO cache which forces user that want to re-use the BO to | |
460 | * restore its initial content. | |
461 | * Ideally, we should purge entries one by one and retry | |
4a83c26a | 462 | * after each to see if DMA allocation succeeds. Or even |
b9f19259 BB |
463 | * better, try to find an entry with at least the same |
464 | * size. | |
465 | */ | |
466 | vc4_bo_userspace_cache_purge(dev); | |
4a83c26a | 467 | dma_obj = drm_gem_dma_create(dev, size); |
b9f19259 BB |
468 | } |
469 | ||
4a83c26a | 470 | if (IS_ERR(dma_obj)) { |
84d7d472 | 471 | struct drm_printer p = drm_info_printer(vc4->base.dev); |
4a83c26a | 472 | DRM_ERROR("Failed to allocate from GEM DMA helper:\n"); |
13f0ec34 | 473 | vc4_bo_stats_print(&p, vc4); |
b9f19259 | 474 | return ERR_PTR(-ENOMEM); |
c826a6e1 | 475 | } |
4a83c26a | 476 | bo = to_vc4_bo(&dma_obj->base); |
f3099462 | 477 | |
b9f19259 BB |
478 | /* By default, BOs do not support the MADV ioctl. This will be enabled |
479 | * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB | |
480 | * BOs). | |
481 | */ | |
482 | bo->madv = __VC4_MADV_NOTSUPP; | |
483 | ||
f3099462 | 484 | mutex_lock(&vc4->bo_lock); |
4a83c26a | 485 | vc4_bo_set_label(&dma_obj->base, type); |
f3099462 EA |
486 | mutex_unlock(&vc4->bo_lock); |
487 | ||
488 | return bo; | |
c8b75bca EA |
489 | } |
490 | ||
dd2dfd44 MR |
491 | int vc4_bo_dumb_create(struct drm_file *file_priv, |
492 | struct drm_device *dev, | |
493 | struct drm_mode_create_dumb *args) | |
c8b75bca | 494 | { |
30f8c74c | 495 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
c8b75bca EA |
496 | struct vc4_bo *bo = NULL; |
497 | int ret; | |
498 | ||
30f8c74c MR |
499 | if (WARN_ON_ONCE(vc4->is_vc5)) |
500 | return -ENODEV; | |
c8b75bca | 501 | |
3d763742 MR |
502 | ret = vc4_dumb_fixup_args(args); |
503 | if (ret) | |
504 | return ret; | |
c8b75bca | 505 | |
f3099462 | 506 | bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); |
2c68f1fc EA |
507 | if (IS_ERR(bo)) |
508 | return PTR_ERR(bo); | |
c8b75bca | 509 | |
b9f19259 BB |
510 | bo->madv = VC4_MADV_WILLNEED; |
511 | ||
c8b75bca | 512 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); |
f7a8cd30 | 513 | drm_gem_object_put(&bo->base.base); |
c8b75bca EA |
514 | |
515 | return ret; | |
516 | } | |
c826a6e1 | 517 | |
c826a6e1 EA |
518 | static void vc4_bo_cache_free_old(struct drm_device *dev) |
519 | { | |
520 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
521 | unsigned long expire_time = jiffies - msecs_to_jiffies(1000); | |
522 | ||
4e6b1e91 EA |
523 | lockdep_assert_held(&vc4->bo_lock); |
524 | ||
c826a6e1 EA |
525 | while (!list_empty(&vc4->bo_cache.time_list)) { |
526 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
527 | struct vc4_bo, unref_head); | |
528 | if (time_before(expire_time, bo->free_time)) { | |
529 | mod_timer(&vc4->bo_cache.time_timer, | |
530 | round_jiffies_up(jiffies + | |
531 | msecs_to_jiffies(1000))); | |
532 | return; | |
533 | } | |
534 | ||
535 | vc4_bo_remove_from_cache(bo); | |
536 | vc4_bo_destroy(bo); | |
537 | } | |
538 | } | |
539 | ||
540 | /* Called on the last userspace/kernel unreference of the BO. Returns | |
541 | * it to the BO cache if possible, otherwise frees it. | |
c826a6e1 | 542 | */ |
ccfe8e9c | 543 | static void vc4_free_object(struct drm_gem_object *gem_bo) |
c826a6e1 EA |
544 | { |
545 | struct drm_device *dev = gem_bo->dev; | |
546 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
547 | struct vc4_bo *bo = to_vc4_bo(gem_bo); | |
548 | struct list_head *cache_list; | |
549 | ||
b9f19259 BB |
550 | /* Remove the BO from the purgeable list. */ |
551 | mutex_lock(&bo->madv_lock); | |
552 | if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) | |
553 | vc4_bo_remove_from_purgeable_pool(bo); | |
554 | mutex_unlock(&bo->madv_lock); | |
555 | ||
c826a6e1 EA |
556 | mutex_lock(&vc4->bo_lock); |
557 | /* If the object references someone else's memory, we can't cache it. | |
558 | */ | |
559 | if (gem_bo->import_attach) { | |
560 | vc4_bo_destroy(bo); | |
561 | goto out; | |
562 | } | |
563 | ||
564 | /* Don't cache if it was publicly named. */ | |
565 | if (gem_bo->name) { | |
566 | vc4_bo_destroy(bo); | |
567 | goto out; | |
568 | } | |
569 | ||
4a83c26a | 570 | /* If this object was partially constructed but DMA allocation |
b9f19259 BB |
571 | * had failed, just free it. Can also happen when the BO has been |
572 | * purged. | |
ca39b449 EA |
573 | */ |
574 | if (!bo->base.vaddr) { | |
575 | vc4_bo_destroy(bo); | |
576 | goto out; | |
577 | } | |
578 | ||
c826a6e1 EA |
579 | cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); |
580 | if (!cache_list) { | |
581 | vc4_bo_destroy(bo); | |
582 | goto out; | |
583 | } | |
584 | ||
463873d5 | 585 | if (bo->validated_shader) { |
c0db1b67 | 586 | kfree(bo->validated_shader->uniform_addr_offsets); |
463873d5 EA |
587 | kfree(bo->validated_shader->texture_samples); |
588 | kfree(bo->validated_shader); | |
589 | bo->validated_shader = NULL; | |
590 | } | |
591 | ||
b9f19259 BB |
592 | /* Reset madv and usecnt before adding the BO to the cache. */ |
593 | bo->madv = __VC4_MADV_NOTSUPP; | |
594 | refcount_set(&bo->usecnt, 0); | |
595 | ||
83753117 | 596 | bo->t_format = false; |
c826a6e1 EA |
597 | bo->free_time = jiffies; |
598 | list_add(&bo->size_head, cache_list); | |
599 | list_add(&bo->unref_head, &vc4->bo_cache.time_list); | |
600 | ||
f3099462 | 601 | vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE); |
c826a6e1 EA |
602 | |
603 | vc4_bo_cache_free_old(dev); | |
604 | ||
605 | out: | |
606 | mutex_unlock(&vc4->bo_lock); | |
607 | } | |
608 | ||
609 | static void vc4_bo_cache_time_work(struct work_struct *work) | |
610 | { | |
611 | struct vc4_dev *vc4 = | |
612 | container_of(work, struct vc4_dev, bo_cache.time_work); | |
84d7d472 | 613 | struct drm_device *dev = &vc4->base; |
c826a6e1 EA |
614 | |
615 | mutex_lock(&vc4->bo_lock); | |
616 | vc4_bo_cache_free_old(dev); | |
617 | mutex_unlock(&vc4->bo_lock); | |
618 | } | |
619 | ||
b9f19259 BB |
620 | int vc4_bo_inc_usecnt(struct vc4_bo *bo) |
621 | { | |
30f8c74c | 622 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); |
b9f19259 BB |
623 | int ret; |
624 | ||
30f8c74c MR |
625 | if (WARN_ON_ONCE(vc4->is_vc5)) |
626 | return -ENODEV; | |
627 | ||
b9f19259 BB |
628 | /* Fast path: if the BO is already retained by someone, no need to |
629 | * check the madv status. | |
630 | */ | |
631 | if (refcount_inc_not_zero(&bo->usecnt)) | |
632 | return 0; | |
633 | ||
634 | mutex_lock(&bo->madv_lock); | |
635 | switch (bo->madv) { | |
636 | case VC4_MADV_WILLNEED: | |
5bfd4013 BB |
637 | if (!refcount_inc_not_zero(&bo->usecnt)) |
638 | refcount_set(&bo->usecnt, 1); | |
b9f19259 BB |
639 | ret = 0; |
640 | break; | |
641 | case VC4_MADV_DONTNEED: | |
642 | /* We shouldn't use a BO marked as purgeable if at least | |
643 | * someone else retained its content by incrementing usecnt. | |
644 | * Luckily the BO hasn't been purged yet, but something wrong | |
645 | * is happening here. Just throw an error instead of | |
646 | * authorizing this use case. | |
647 | */ | |
648 | case __VC4_MADV_PURGED: | |
649 | /* We can't use a purged BO. */ | |
650 | default: | |
651 | /* Invalid madv value. */ | |
652 | ret = -EINVAL; | |
653 | break; | |
654 | } | |
655 | mutex_unlock(&bo->madv_lock); | |
656 | ||
657 | return ret; | |
658 | } | |
659 | ||
660 | void vc4_bo_dec_usecnt(struct vc4_bo *bo) | |
661 | { | |
30f8c74c MR |
662 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); |
663 | ||
664 | if (WARN_ON_ONCE(vc4->is_vc5)) | |
665 | return; | |
666 | ||
b9f19259 BB |
667 | /* Fast path: if the BO is still retained by someone, no need to test |
668 | * the madv value. | |
669 | */ | |
670 | if (refcount_dec_not_one(&bo->usecnt)) | |
671 | return; | |
672 | ||
673 | mutex_lock(&bo->madv_lock); | |
674 | if (refcount_dec_and_test(&bo->usecnt) && | |
675 | bo->madv == VC4_MADV_DONTNEED) | |
676 | vc4_bo_add_to_purgeable_pool(bo); | |
677 | mutex_unlock(&bo->madv_lock); | |
678 | } | |
679 | ||
0078730f | 680 | static void vc4_bo_cache_time_timer(struct timer_list *t) |
c826a6e1 | 681 | { |
0078730f | 682 | struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer); |
c826a6e1 EA |
683 | |
684 | schedule_work(&vc4->bo_cache.time_work); | |
685 | } | |
686 | ||
ccfe8e9c | 687 | static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags) |
463873d5 EA |
688 | { |
689 | struct vc4_bo *bo = to_vc4_bo(obj); | |
b9f19259 BB |
690 | struct dma_buf *dmabuf; |
691 | int ret; | |
463873d5 EA |
692 | |
693 | if (bo->validated_shader) { | |
fb95992a | 694 | DRM_DEBUG("Attempting to export shader BO\n"); |
463873d5 EA |
695 | return ERR_PTR(-EINVAL); |
696 | } | |
697 | ||
b9f19259 BB |
698 | /* Note: as soon as the BO is exported it becomes unpurgeable, because |
699 | * noone ever decrements the usecnt even if the reference held by the | |
700 | * exported BO is released. This shouldn't be a problem since we don't | |
701 | * expect exported BOs to be marked as purgeable. | |
702 | */ | |
703 | ret = vc4_bo_inc_usecnt(bo); | |
704 | if (ret) { | |
705 | DRM_ERROR("Failed to increment BO usecnt\n"); | |
706 | return ERR_PTR(ret); | |
707 | } | |
708 | ||
e4fa8457 | 709 | dmabuf = drm_gem_prime_export(obj, flags); |
b9f19259 BB |
710 | if (IS_ERR(dmabuf)) |
711 | vc4_bo_dec_usecnt(bo); | |
712 | ||
713 | return dmabuf; | |
714 | } | |
715 | ||
dd602022 | 716 | static vm_fault_t vc4_fault(struct vm_fault *vmf) |
b9f19259 BB |
717 | { |
718 | struct vm_area_struct *vma = vmf->vma; | |
719 | struct drm_gem_object *obj = vma->vm_private_data; | |
720 | struct vc4_bo *bo = to_vc4_bo(obj); | |
721 | ||
722 | /* The only reason we would end up here is when user-space accesses | |
723 | * BO's memory after it's been purged. | |
724 | */ | |
725 | mutex_lock(&bo->madv_lock); | |
726 | WARN_ON(bo->madv != __VC4_MADV_PURGED); | |
727 | mutex_unlock(&bo->madv_lock); | |
728 | ||
729 | return VM_FAULT_SIGBUS; | |
463873d5 EA |
730 | } |
731 | ||
fa49fdbe | 732 | static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
463873d5 | 733 | { |
fa49fdbe | 734 | struct vc4_bo *bo = to_vc4_bo(obj); |
463873d5 EA |
735 | |
736 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
a7af4d67 | 737 | DRM_DEBUG("mmapping of shader BOs for writing not allowed.\n"); |
463873d5 EA |
738 | return -EINVAL; |
739 | } | |
740 | ||
b9f19259 | 741 | if (bo->madv != VC4_MADV_WILLNEED) { |
a7af4d67 | 742 | DRM_DEBUG("mmapping of %s BO not allowed\n", |
b9f19259 BB |
743 | bo->madv == VC4_MADV_DONTNEED ? |
744 | "purgeable" : "purged"); | |
745 | return -EINVAL; | |
746 | } | |
747 | ||
4a83c26a | 748 | return drm_gem_dma_mmap(&bo->base, vma); |
463873d5 EA |
749 | } |
750 | ||
ccfe8e9c TZ |
751 | static const struct vm_operations_struct vc4_vm_ops = { |
752 | .fault = vc4_fault, | |
753 | .open = drm_gem_vm_open, | |
754 | .close = drm_gem_vm_close, | |
755 | }; | |
756 | ||
757 | static const struct drm_gem_object_funcs vc4_gem_object_funcs = { | |
758 | .free = vc4_free_object, | |
759 | .export = vc4_prime_export, | |
4a83c26a DK |
760 | .get_sg_table = drm_gem_dma_object_get_sg_table, |
761 | .vmap = drm_gem_dma_object_vmap, | |
fa49fdbe | 762 | .mmap = vc4_gem_object_mmap, |
ccfe8e9c TZ |
763 | .vm_ops = &vc4_vm_ops, |
764 | }; | |
765 | ||
35c8b4b2 PK |
766 | static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file) |
767 | { | |
35c8b4b2 PK |
768 | if (!vc4->v3d) |
769 | return -ENODEV; | |
770 | ||
771 | if (vc4file->bin_bo_used) | |
772 | return 0; | |
773 | ||
a425e980 | 774 | return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used); |
35c8b4b2 PK |
775 | } |
776 | ||
d5bc60f6 EA |
777 | int vc4_create_bo_ioctl(struct drm_device *dev, void *data, |
778 | struct drm_file *file_priv) | |
779 | { | |
780 | struct drm_vc4_create_bo *args = data; | |
35c8b4b2 PK |
781 | struct vc4_file *vc4file = file_priv->driver_priv; |
782 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
d5bc60f6 EA |
783 | struct vc4_bo *bo = NULL; |
784 | int ret; | |
785 | ||
30f8c74c MR |
786 | if (WARN_ON_ONCE(vc4->is_vc5)) |
787 | return -ENODEV; | |
788 | ||
35c8b4b2 PK |
789 | ret = vc4_grab_bin_bo(vc4, vc4file); |
790 | if (ret) | |
791 | return ret; | |
792 | ||
d5bc60f6 EA |
793 | /* |
794 | * We can't allocate from the BO cache, because the BOs don't | |
795 | * get zeroed, and that might leak data between users. | |
796 | */ | |
f3099462 | 797 | bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D); |
2c68f1fc EA |
798 | if (IS_ERR(bo)) |
799 | return PTR_ERR(bo); | |
d5bc60f6 | 800 | |
b9f19259 BB |
801 | bo->madv = VC4_MADV_WILLNEED; |
802 | ||
d5bc60f6 | 803 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); |
f7a8cd30 | 804 | drm_gem_object_put(&bo->base.base); |
d5bc60f6 EA |
805 | |
806 | return ret; | |
807 | } | |
808 | ||
809 | int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, | |
810 | struct drm_file *file_priv) | |
811 | { | |
30f8c74c | 812 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
d5bc60f6 EA |
813 | struct drm_vc4_mmap_bo *args = data; |
814 | struct drm_gem_object *gem_obj; | |
815 | ||
30f8c74c MR |
816 | if (WARN_ON_ONCE(vc4->is_vc5)) |
817 | return -ENODEV; | |
818 | ||
a8ad0bd8 | 819 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); |
d5bc60f6 | 820 | if (!gem_obj) { |
fb95992a | 821 | DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); |
d5bc60f6 EA |
822 | return -EINVAL; |
823 | } | |
824 | ||
825 | /* The mmap offset was set up at BO allocation time. */ | |
826 | args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); | |
827 | ||
f7a8cd30 | 828 | drm_gem_object_put(gem_obj); |
d5bc60f6 EA |
829 | return 0; |
830 | } | |
831 | ||
463873d5 EA |
832 | int |
833 | vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, | |
834 | struct drm_file *file_priv) | |
835 | { | |
836 | struct drm_vc4_create_shader_bo *args = data; | |
35c8b4b2 PK |
837 | struct vc4_file *vc4file = file_priv->driver_priv; |
838 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
463873d5 EA |
839 | struct vc4_bo *bo = NULL; |
840 | int ret; | |
841 | ||
30f8c74c MR |
842 | if (WARN_ON_ONCE(vc4->is_vc5)) |
843 | return -ENODEV; | |
844 | ||
463873d5 EA |
845 | if (args->size == 0) |
846 | return -EINVAL; | |
847 | ||
848 | if (args->size % sizeof(u64) != 0) | |
849 | return -EINVAL; | |
850 | ||
851 | if (args->flags != 0) { | |
852 | DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); | |
853 | return -EINVAL; | |
854 | } | |
855 | ||
856 | if (args->pad != 0) { | |
857 | DRM_INFO("Pad set: 0x%08x\n", args->pad); | |
858 | return -EINVAL; | |
859 | } | |
860 | ||
35c8b4b2 PK |
861 | ret = vc4_grab_bin_bo(vc4, vc4file); |
862 | if (ret) | |
863 | return ret; | |
864 | ||
f3099462 | 865 | bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); |
2c68f1fc EA |
866 | if (IS_ERR(bo)) |
867 | return PTR_ERR(bo); | |
463873d5 | 868 | |
b9f19259 BB |
869 | bo->madv = VC4_MADV_WILLNEED; |
870 | ||
585cb132 | 871 | if (copy_from_user(bo->base.vaddr, |
463873d5 | 872 | (void __user *)(uintptr_t)args->data, |
585cb132 DC |
873 | args->size)) { |
874 | ret = -EFAULT; | |
463873d5 | 875 | goto fail; |
585cb132 | 876 | } |
463873d5 EA |
877 | /* Clear the rest of the memory from allocating from the BO |
878 | * cache. | |
879 | */ | |
880 | memset(bo->base.vaddr + args->size, 0, | |
881 | bo->base.base.size - args->size); | |
882 | ||
883 | bo->validated_shader = vc4_validate_shader(&bo->base); | |
884 | if (!bo->validated_shader) { | |
885 | ret = -EINVAL; | |
886 | goto fail; | |
887 | } | |
888 | ||
889 | /* We have to create the handle after validation, to avoid | |
890 | * races for users to do doing things like mmap the shader BO. | |
891 | */ | |
892 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
893 | ||
35c8b4b2 | 894 | fail: |
f7a8cd30 | 895 | drm_gem_object_put(&bo->base.base); |
463873d5 EA |
896 | |
897 | return ret; | |
898 | } | |
899 | ||
83753117 EA |
900 | /** |
901 | * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO. | |
902 | * @dev: DRM device | |
903 | * @data: ioctl argument | |
904 | * @file_priv: DRM file for this fd | |
905 | * | |
906 | * The tiling state of the BO decides the default modifier of an fb if | |
907 | * no specific modifier was set by userspace, and the return value of | |
908 | * vc4_get_tiling_ioctl() (so that userspace can treat a BO it | |
909 | * received from dmabuf as the same tiling format as the producer | |
910 | * used). | |
911 | */ | |
912 | int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, | |
913 | struct drm_file *file_priv) | |
914 | { | |
30f8c74c | 915 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
83753117 EA |
916 | struct drm_vc4_set_tiling *args = data; |
917 | struct drm_gem_object *gem_obj; | |
918 | struct vc4_bo *bo; | |
919 | bool t_format; | |
920 | ||
30f8c74c MR |
921 | if (WARN_ON_ONCE(vc4->is_vc5)) |
922 | return -ENODEV; | |
923 | ||
83753117 EA |
924 | if (args->flags != 0) |
925 | return -EINVAL; | |
926 | ||
927 | switch (args->modifier) { | |
928 | case DRM_FORMAT_MOD_NONE: | |
929 | t_format = false; | |
930 | break; | |
931 | case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: | |
932 | t_format = true; | |
933 | break; | |
934 | default: | |
935 | return -EINVAL; | |
936 | } | |
937 | ||
938 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
939 | if (!gem_obj) { | |
fb95992a | 940 | DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); |
83753117 EA |
941 | return -ENOENT; |
942 | } | |
943 | bo = to_vc4_bo(gem_obj); | |
944 | bo->t_format = t_format; | |
945 | ||
f7a8cd30 | 946 | drm_gem_object_put(gem_obj); |
83753117 EA |
947 | |
948 | return 0; | |
949 | } | |
950 | ||
951 | /** | |
952 | * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO. | |
953 | * @dev: DRM device | |
954 | * @data: ioctl argument | |
955 | * @file_priv: DRM file for this fd | |
956 | * | |
957 | * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl(). | |
958 | */ | |
959 | int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, | |
960 | struct drm_file *file_priv) | |
961 | { | |
30f8c74c | 962 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
83753117 EA |
963 | struct drm_vc4_get_tiling *args = data; |
964 | struct drm_gem_object *gem_obj; | |
965 | struct vc4_bo *bo; | |
966 | ||
30f8c74c MR |
967 | if (WARN_ON_ONCE(vc4->is_vc5)) |
968 | return -ENODEV; | |
969 | ||
83753117 EA |
970 | if (args->flags != 0 || args->modifier != 0) |
971 | return -EINVAL; | |
972 | ||
973 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
974 | if (!gem_obj) { | |
fb95992a | 975 | DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); |
83753117 EA |
976 | return -ENOENT; |
977 | } | |
978 | bo = to_vc4_bo(gem_obj); | |
979 | ||
980 | if (bo->t_format) | |
981 | args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; | |
982 | else | |
983 | args->modifier = DRM_FORMAT_MOD_NONE; | |
984 | ||
f7a8cd30 | 985 | drm_gem_object_put(gem_obj); |
83753117 EA |
986 | |
987 | return 0; | |
988 | } | |
989 | ||
445b287e MR |
990 | int vc4_bo_debugfs_init(struct drm_minor *minor) |
991 | { | |
992 | struct drm_device *drm = minor->dev; | |
993 | struct vc4_dev *vc4 = to_vc4_dev(drm); | |
994 | int ret; | |
995 | ||
996 | if (!vc4->v3d) | |
997 | return -ENODEV; | |
998 | ||
999 | ret = vc4_debugfs_add_file(minor, "bo_stats", | |
1000 | vc4_bo_stats_debugfs, NULL); | |
1001 | if (ret) | |
1002 | return ret; | |
1003 | ||
1004 | return 0; | |
1005 | } | |
1006 | ||
1c80be48 | 1007 | static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused); |
f3099462 | 1008 | int vc4_bo_cache_init(struct drm_device *dev) |
c826a6e1 EA |
1009 | { |
1010 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
374146ca | 1011 | int ret; |
f3099462 EA |
1012 | int i; |
1013 | ||
30f8c74c MR |
1014 | if (WARN_ON_ONCE(vc4->is_vc5)) |
1015 | return -ENODEV; | |
1016 | ||
f3099462 EA |
1017 | /* Create the initial set of BO labels that the kernel will |
1018 | * use. This lets us avoid a bunch of string reallocation in | |
1019 | * the kernel's draw and BO allocation paths. | |
1020 | */ | |
1021 | vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels), | |
1022 | GFP_KERNEL); | |
1023 | if (!vc4->bo_labels) | |
1024 | return -ENOMEM; | |
1025 | vc4->num_labels = VC4_BO_TYPE_COUNT; | |
1026 | ||
1027 | BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT); | |
1028 | for (i = 0; i < VC4_BO_TYPE_COUNT; i++) | |
1029 | vc4->bo_labels[i].name = bo_type_names[i]; | |
c826a6e1 | 1030 | |
374146ca MR |
1031 | ret = drmm_mutex_init(dev, &vc4->bo_lock); |
1032 | if (ret) { | |
1033 | kfree(vc4->bo_labels); | |
1034 | return ret; | |
1035 | } | |
1036 | ||
c826a6e1 EA |
1037 | INIT_LIST_HEAD(&vc4->bo_cache.time_list); |
1038 | ||
1039 | INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); | |
0078730f | 1040 | timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0); |
f3099462 | 1041 | |
1c80be48 | 1042 | return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL); |
c826a6e1 EA |
1043 | } |
1044 | ||
1c80be48 | 1045 | static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused) |
c826a6e1 EA |
1046 | { |
1047 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
f3099462 | 1048 | int i; |
c826a6e1 EA |
1049 | |
1050 | del_timer(&vc4->bo_cache.time_timer); | |
1051 | cancel_work_sync(&vc4->bo_cache.time_work); | |
1052 | ||
1053 | vc4_bo_cache_purge(dev); | |
1054 | ||
f3099462 EA |
1055 | for (i = 0; i < vc4->num_labels; i++) { |
1056 | if (vc4->bo_labels[i].num_allocated) { | |
1057 | DRM_ERROR("Destroying BO cache with %d %s " | |
1058 | "BOs still allocated\n", | |
1059 | vc4->bo_labels[i].num_allocated, | |
1060 | vc4->bo_labels[i].name); | |
1061 | } | |
1062 | ||
1063 | if (is_user_label(i)) | |
1064 | kfree(vc4->bo_labels[i].name); | |
c826a6e1 | 1065 | } |
f3099462 EA |
1066 | kfree(vc4->bo_labels); |
1067 | } | |
1068 | ||
1069 | int vc4_label_bo_ioctl(struct drm_device *dev, void *data, | |
1070 | struct drm_file *file_priv) | |
1071 | { | |
1072 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
1073 | struct drm_vc4_label_bo *args = data; | |
1074 | char *name; | |
1075 | struct drm_gem_object *gem_obj; | |
1076 | int ret = 0, label; | |
1077 | ||
30f8c74c MR |
1078 | if (WARN_ON_ONCE(vc4->is_vc5)) |
1079 | return -ENODEV; | |
1080 | ||
f3099462 EA |
1081 | if (!args->len) |
1082 | return -EINVAL; | |
1083 | ||
1084 | name = strndup_user(u64_to_user_ptr(args->name), args->len + 1); | |
1085 | if (IS_ERR(name)) | |
1086 | return PTR_ERR(name); | |
1087 | ||
1088 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
1089 | if (!gem_obj) { | |
1090 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
1091 | kfree(name); | |
1092 | return -ENOENT; | |
1093 | } | |
1094 | ||
1095 | mutex_lock(&vc4->bo_lock); | |
1096 | label = vc4_get_user_label(vc4, name); | |
1097 | if (label != -1) | |
1098 | vc4_bo_set_label(gem_obj, label); | |
1099 | else | |
1100 | ret = -ENOMEM; | |
1101 | mutex_unlock(&vc4->bo_lock); | |
1102 | ||
f7a8cd30 | 1103 | drm_gem_object_put(gem_obj); |
f3099462 EA |
1104 | |
1105 | return ret; | |
c826a6e1 | 1106 | } |