Commit | Line | Data |
---|---|---|
3a1bd924 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. | |
ba004e39 | 4 | * Copyright 2016 Intel Corporation |
3a1bd924 TH |
5 | * All Rights Reserved. |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | * | |
28 | **************************************************************************/ | |
29 | ||
30 | /* | |
31 | * Generic simple memory manager implementation. Intended to be used as a base | |
32 | * class implementation for more advanced memory managers. | |
33 | * | |
34 | * Note that the algorithm used is quite simple and there might be substantial | |
ba004e39 CW |
35 | * performance gains if a smarter free list is implemented. Currently it is |
36 | * just an unordered stack of free regions. This could easily be improved if | |
37 | * an RB-tree is used instead. At least if we expect heavy fragmentation. | |
3a1bd924 TH |
38 | * |
39 | * Aligned allocations can also see improvement. | |
40 | * | |
41 | * Authors: | |
96de0e25 | 42 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
3a1bd924 TH |
43 | */ |
44 | ||
2d1a8a48 | 45 | #include <linux/export.h> |
202b52b7 | 46 | #include <linux/interval_tree_generic.h> |
0500c04e SR |
47 | #include <linux/seq_file.h> |
48 | #include <linux/slab.h> | |
49 | #include <linux/stacktrace.h> | |
50 | ||
51 | #include <drm/drm_mm.h> | |
1d58420b | 52 | |
93110be6 DV |
53 | /** |
54 | * DOC: Overview | |
55 | * | |
56 | * drm_mm provides a simple range allocator. The drivers are free to use the | |
57 | * resource allocator from the linux core if it suits them, the upside of drm_mm | |
58 | * is that it's in the DRM core. Which means that it's easier to extend for | |
59 | * some of the crazier special purpose needs of gpus. | |
60 | * | |
61 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. | |
62 | * Drivers are free to embed either of them into their own suitable | |
05fc0321 DV |
63 | * datastructures. drm_mm itself will not do any memory allocations of its own, |
64 | * so if drivers choose not to embed nodes they need to still allocate them | |
93110be6 DV |
65 | * themselves. |
66 | * | |
67 | * The range allocator also supports reservation of preallocated blocks. This is | |
68 | * useful for taking over initial mode setting configurations from the firmware, | |
69 | * where an object needs to be created which exactly matches the firmware's | |
70 | * scanout target. As long as the range is still free it can be inserted anytime | |
71 | * after the allocator is initialized, which helps with avoiding looped | |
ba004e39 | 72 | * dependencies in the driver load sequence. |
93110be6 DV |
73 | * |
74 | * drm_mm maintains a stack of most recently freed holes, which of all | |
75 | * simplistic datastructures seems to be a fairly decent approach to clustering | |
76 | * allocations and avoiding too much fragmentation. This means free space | |
77 | * searches are O(num_holes). Given that all the fancy features drm_mm supports | |
78 | * something better would be fairly complex and since gfx thrashing is a fairly | |
79 | * steep cliff not a real concern. Removing a node again is O(1). | |
80 | * | |
81 | * drm_mm supports a few features: Alignment and range restrictions can be | |
05fc0321 | 82 | * supplied. Furthermore every &drm_mm_node has a color value (which is just an |
ba004e39 | 83 | * opaque unsigned long) which in conjunction with a driver callback can be used |
93110be6 DV |
84 | * to implement sophisticated placement restrictions. The i915 DRM driver uses |
85 | * this to implement guard pages between incompatible caching domains in the | |
86 | * graphics TT. | |
87 | * | |
ba004e39 CW |
88 | * Two behaviors are supported for searching and allocating: bottom-up and |
89 | * top-down. The default is bottom-up. Top-down allocation can be used if the | |
90 | * memory area has different restrictions, or just to reduce fragmentation. | |
62347f9e | 91 | * |
93110be6 DV |
92 | * Finally iteration helpers to walk all nodes and all holes are provided as are |
93 | * some basic allocator dumpers for debugging. | |
1c9bd1ed DV |
94 | * |
95 | * Note that this range allocator is not thread-safe, drivers need to protect | |
21be9154 | 96 | * modifications with their own locking. The idea behind this is that for a full |
1c9bd1ed DV |
97 | * memory manager additional data needs to be protected anyway, hence internal |
98 | * locking would be fully redundant. | |
93110be6 DV |
99 | */ |
100 | ||
5705670d | 101 | #ifdef CONFIG_DRM_DEBUG_MM |
93ce75fa CW |
102 | #include <linux/stackdepot.h> |
103 | ||
5705670d CW |
104 | #define STACKDEPTH 32 |
105 | #define BUFSZ 4096 | |
106 | ||
107 | static noinline void save_stack(struct drm_mm_node *node) | |
108 | { | |
109 | unsigned long entries[STACKDEPTH]; | |
487f3c7f | 110 | unsigned int n; |
5705670d | 111 | |
487f3c7f | 112 | n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); |
5705670d CW |
113 | |
114 | /* May be called under spinlock, so avoid sleeping */ | |
487f3c7f | 115 | node->stack = stack_depot_save(entries, n, GFP_NOWAIT); |
5705670d CW |
116 | } |
117 | ||
118 | static void show_leaks(struct drm_mm *mm) | |
119 | { | |
120 | struct drm_mm_node *node; | |
5705670d CW |
121 | char *buf; |
122 | ||
123 | buf = kmalloc(BUFSZ, GFP_KERNEL); | |
124 | if (!buf) | |
125 | return; | |
126 | ||
2bc98c86 | 127 | list_for_each_entry(node, drm_mm_nodes(mm), node_list) { |
5705670d CW |
128 | if (!node->stack) { |
129 | DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", | |
130 | node->start, node->size); | |
131 | continue; | |
132 | } | |
133 | ||
0f68d45e | 134 | stack_depot_snprint(node->stack, buf, BUFSZ, 0); |
5705670d CW |
135 | DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", |
136 | node->start, node->size, buf); | |
137 | } | |
138 | ||
139 | kfree(buf); | |
140 | } | |
141 | ||
142 | #undef STACKDEPTH | |
143 | #undef BUFSZ | |
144 | #else | |
145 | static void save_stack(struct drm_mm_node *node) { } | |
146 | static void show_leaks(struct drm_mm *mm) { } | |
147 | #endif | |
148 | ||
202b52b7 CW |
149 | #define START(node) ((node)->start) |
150 | #define LAST(node) ((node)->start + (node)->size - 1) | |
151 | ||
152 | INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, | |
153 | u64, __subtree_last, | |
53bd7c1c | 154 | START, LAST, static inline __maybe_unused, drm_mm_interval_tree) |
202b52b7 CW |
155 | |
156 | struct drm_mm_node * | |
45b186f1 | 157 | __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) |
202b52b7 | 158 | { |
f808c13f | 159 | return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, |
bbba9693 | 160 | start, last) ?: (struct drm_mm_node *)&mm->head_node; |
202b52b7 | 161 | } |
522e85dd | 162 | EXPORT_SYMBOL(__drm_mm_interval_first); |
202b52b7 CW |
163 | |
164 | static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, | |
165 | struct drm_mm_node *node) | |
166 | { | |
167 | struct drm_mm *mm = hole_node->mm; | |
168 | struct rb_node **link, *rb; | |
169 | struct drm_mm_node *parent; | |
8a194945 | 170 | bool leftmost; |
202b52b7 CW |
171 | |
172 | node->__subtree_last = LAST(node); | |
173 | ||
71724f70 | 174 | if (drm_mm_node_allocated(hole_node)) { |
202b52b7 CW |
175 | rb = &hole_node->rb; |
176 | while (rb) { | |
177 | parent = rb_entry(rb, struct drm_mm_node, rb); | |
178 | if (parent->__subtree_last >= node->__subtree_last) | |
179 | break; | |
180 | ||
181 | parent->__subtree_last = node->__subtree_last; | |
182 | rb = rb_parent(rb); | |
183 | } | |
184 | ||
185 | rb = &hole_node->rb; | |
186 | link = &hole_node->rb.rb_right; | |
f808c13f | 187 | leftmost = false; |
202b52b7 CW |
188 | } else { |
189 | rb = NULL; | |
f808c13f | 190 | link = &mm->interval_tree.rb_root.rb_node; |
8a194945 | 191 | leftmost = true; |
202b52b7 CW |
192 | } |
193 | ||
194 | while (*link) { | |
195 | rb = *link; | |
196 | parent = rb_entry(rb, struct drm_mm_node, rb); | |
197 | if (parent->__subtree_last < node->__subtree_last) | |
198 | parent->__subtree_last = node->__subtree_last; | |
8a194945 | 199 | if (node->start < parent->start) { |
202b52b7 | 200 | link = &parent->rb.rb_left; |
8a194945 | 201 | } else { |
202b52b7 | 202 | link = &parent->rb.rb_right; |
8a194945 | 203 | leftmost = false; |
f808c13f | 204 | } |
202b52b7 CW |
205 | } |
206 | ||
207 | rb_link_node(&node->rb, rb, link); | |
f808c13f DB |
208 | rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, |
209 | &drm_mm_interval_tree_augment); | |
202b52b7 CW |
210 | } |
211 | ||
4e64e553 CW |
212 | #define HOLE_SIZE(NODE) ((NODE)->hole_size) |
213 | #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) | |
214 | ||
2f7e8769 CW |
215 | static u64 rb_to_hole_size(struct rb_node *rb) |
216 | { | |
217 | return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; | |
218 | } | |
219 | ||
220 | static void insert_hole_size(struct rb_root_cached *root, | |
221 | struct drm_mm_node *node) | |
222 | { | |
223 | struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; | |
224 | u64 x = node->hole_size; | |
225 | bool first = true; | |
226 | ||
227 | while (*link) { | |
228 | rb = *link; | |
229 | if (x > rb_to_hole_size(rb)) { | |
230 | link = &rb->rb_left; | |
231 | } else { | |
232 | link = &rb->rb_right; | |
233 | first = false; | |
234 | } | |
235 | } | |
236 | ||
237 | rb_link_node(&node->rb_hole_size, rb, link); | |
238 | rb_insert_color_cached(&node->rb_hole_size, root, first); | |
239 | } | |
240 | ||
0cdea445 ND |
241 | RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, |
242 | struct drm_mm_node, rb_hole_addr, | |
243 | u64, subtree_max_hole, HOLE_SIZE) | |
244 | ||
245 | static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node) | |
246 | { | |
247 | struct rb_node **link = &root->rb_node, *rb_parent = NULL; | |
248 | u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; | |
249 | struct drm_mm_node *parent; | |
250 | ||
251 | while (*link) { | |
252 | rb_parent = *link; | |
253 | parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr); | |
254 | if (parent->subtree_max_hole < subtree_max_hole) | |
255 | parent->subtree_max_hole = subtree_max_hole; | |
256 | if (start < HOLE_ADDR(parent)) | |
257 | link = &parent->rb_hole_addr.rb_left; | |
258 | else | |
259 | link = &parent->rb_hole_addr.rb_right; | |
260 | } | |
261 | ||
262 | rb_link_node(&node->rb_hole_addr, rb_parent, link); | |
263 | rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks); | |
264 | } | |
265 | ||
4e64e553 | 266 | static void add_hole(struct drm_mm_node *node) |
3a1bd924 | 267 | { |
4e64e553 | 268 | struct drm_mm *mm = node->mm; |
ea7b1dd4 | 269 | |
4e64e553 CW |
270 | node->hole_size = |
271 | __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); | |
0cdea445 | 272 | node->subtree_max_hole = node->hole_size; |
4e64e553 | 273 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); |
b0b7af18 | 274 | |
2f7e8769 | 275 | insert_hole_size(&mm->holes_size, node); |
0cdea445 | 276 | insert_hole_addr(&mm->holes_addr, node); |
1d58420b | 277 | |
4e64e553 CW |
278 | list_add(&node->hole_stack, &mm->hole_stack); |
279 | } | |
adb040b8 | 280 | |
4e64e553 CW |
281 | static void rm_hole(struct drm_mm_node *node) |
282 | { | |
283 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); | |
62347f9e | 284 | |
4e64e553 | 285 | list_del(&node->hole_stack); |
2f7e8769 | 286 | rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); |
0cdea445 ND |
287 | rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr, |
288 | &augment_callbacks); | |
4e64e553 | 289 | node->hole_size = 0; |
0cdea445 | 290 | node->subtree_max_hole = 0; |
440fd528 | 291 | |
4e64e553 CW |
292 | DRM_MM_BUG_ON(drm_mm_hole_follows(node)); |
293 | } | |
294 | ||
295 | static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) | |
296 | { | |
297 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); | |
298 | } | |
299 | ||
300 | static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) | |
301 | { | |
302 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); | |
303 | } | |
304 | ||
4e64e553 CW |
305 | static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) |
306 | { | |
2f7e8769 CW |
307 | struct rb_node *rb = mm->holes_size.rb_root.rb_node; |
308 | struct drm_mm_node *best = NULL; | |
4e64e553 | 309 | |
2f7e8769 CW |
310 | do { |
311 | struct drm_mm_node *node = | |
312 | rb_entry(rb, struct drm_mm_node, rb_hole_size); | |
4e64e553 | 313 | |
2f7e8769 CW |
314 | if (size <= node->hole_size) { |
315 | best = node; | |
316 | rb = rb->rb_right; | |
4e64e553 | 317 | } else { |
2f7e8769 | 318 | rb = rb->rb_left; |
62347f9e | 319 | } |
2f7e8769 | 320 | } while (rb); |
6b9d89b4 | 321 | |
2f7e8769 | 322 | return best; |
4e64e553 CW |
323 | } |
324 | ||
5fad79fd CK |
325 | static bool usable_hole_addr(struct rb_node *rb, u64 size) |
326 | { | |
327 | return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; | |
328 | } | |
329 | ||
271e7dec | 330 | static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) |
4e64e553 | 331 | { |
2f7e8769 | 332 | struct rb_node *rb = mm->holes_addr.rb_node; |
4e64e553 | 333 | struct drm_mm_node *node = NULL; |
4e64e553 | 334 | |
2f7e8769 | 335 | while (rb) { |
4e64e553 CW |
336 | u64 hole_start; |
337 | ||
5fad79fd | 338 | if (!usable_hole_addr(rb, size)) |
271e7dec CK |
339 | break; |
340 | ||
2f7e8769 | 341 | node = rb_hole_addr_to_node(rb); |
4e64e553 CW |
342 | hole_start = __drm_mm_hole_node_start(node); |
343 | ||
344 | if (addr < hole_start) | |
2f7e8769 | 345 | rb = node->rb_hole_addr.rb_left; |
4e64e553 | 346 | else if (addr > hole_start + node->hole_size) |
2f7e8769 | 347 | rb = node->rb_hole_addr.rb_right; |
4e64e553 CW |
348 | else |
349 | break; | |
6b9d89b4 | 350 | } |
ea7b1dd4 | 351 | |
4e64e553 CW |
352 | return node; |
353 | } | |
3a1bd924 | 354 | |
4e64e553 CW |
355 | static struct drm_mm_node * |
356 | first_hole(struct drm_mm *mm, | |
357 | u64 start, u64 end, u64 size, | |
358 | enum drm_mm_insert_mode mode) | |
359 | { | |
4e64e553 CW |
360 | switch (mode) { |
361 | default: | |
362 | case DRM_MM_INSERT_BEST: | |
363 | return best_hole(mm, size); | |
202b52b7 | 364 | |
4e64e553 | 365 | case DRM_MM_INSERT_LOW: |
271e7dec | 366 | return find_hole_addr(mm, start, size); |
ea7b1dd4 | 367 | |
4e64e553 | 368 | case DRM_MM_INSERT_HIGH: |
271e7dec | 369 | return find_hole_addr(mm, end, size); |
4e64e553 CW |
370 | |
371 | case DRM_MM_INSERT_EVICT: | |
372 | return list_first_entry_or_null(&mm->hole_stack, | |
373 | struct drm_mm_node, | |
374 | hole_stack); | |
1d58420b | 375 | } |
4e64e553 | 376 | } |
5705670d | 377 | |
0cdea445 | 378 | /** |
5fad79fd CK |
379 | * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions |
380 | * @name: name of function to declare | |
381 | * @first: first rb member to traverse (either rb_left or rb_right). | |
382 | * @last: last rb member to traverse (either rb_right or rb_left). | |
0cdea445 | 383 | * |
5fad79fd CK |
384 | * This macro declares a function to return the next hole of the addr rb tree. |
385 | * While traversing the tree we take the searched size into account and only | |
386 | * visit branches with potential big enough holes. | |
0cdea445 | 387 | */ |
0cdea445 | 388 | |
5fad79fd CK |
389 | #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ |
390 | static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ | |
391 | { \ | |
392 | struct rb_node *parent, *node = &entry->rb_hole_addr; \ | |
393 | \ | |
394 | if (!entry || RB_EMPTY_NODE(node)) \ | |
395 | return NULL; \ | |
396 | \ | |
397 | if (usable_hole_addr(node->first, size)) { \ | |
398 | node = node->first; \ | |
399 | while (usable_hole_addr(node->last, size)) \ | |
400 | node = node->last; \ | |
401 | return rb_hole_addr_to_node(node); \ | |
402 | } \ | |
403 | \ | |
404 | while ((parent = rb_parent(node)) && node == parent->first) \ | |
405 | node = parent; \ | |
406 | \ | |
407 | return rb_hole_addr_to_node(parent); \ | |
0cdea445 ND |
408 | } |
409 | ||
5fad79fd CK |
410 | DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) |
411 | DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) | |
0cdea445 | 412 | |
4e64e553 CW |
413 | static struct drm_mm_node * |
414 | next_hole(struct drm_mm *mm, | |
415 | struct drm_mm_node *node, | |
0cdea445 | 416 | u64 size, |
4e64e553 CW |
417 | enum drm_mm_insert_mode mode) |
418 | { | |
419 | switch (mode) { | |
420 | default: | |
421 | case DRM_MM_INSERT_BEST: | |
2f7e8769 | 422 | return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); |
4e64e553 CW |
423 | |
424 | case DRM_MM_INSERT_LOW: | |
0cdea445 | 425 | return next_hole_low_addr(node, size); |
4e64e553 CW |
426 | |
427 | case DRM_MM_INSERT_HIGH: | |
0cdea445 | 428 | return next_hole_high_addr(node, size); |
4e64e553 CW |
429 | |
430 | case DRM_MM_INSERT_EVICT: | |
431 | node = list_next_entry(node, hole_stack); | |
432 | return &node->hole_stack == &mm->hole_stack ? NULL : node; | |
433 | } | |
9fc935de DV |
434 | } |
435 | ||
e18c0412 DV |
436 | /** |
437 | * drm_mm_reserve_node - insert an pre-initialized node | |
438 | * @mm: drm_mm allocator to insert @node into | |
439 | * @node: drm_mm_node to insert | |
440 | * | |
05fc0321 DV |
441 | * This functions inserts an already set-up &drm_mm_node into the allocator, |
442 | * meaning that start, size and color must be set by the caller. All other | |
443 | * fields must be cleared to 0. This is useful to initialize the allocator with | |
444 | * preallocated objects which must be set-up before the range allocator can be | |
445 | * set-up, e.g. when taking over a firmware framebuffer. | |
e18c0412 DV |
446 | * |
447 | * Returns: | |
448 | * 0 on success, -ENOSPC if there's no hole where @node is. | |
449 | */ | |
338710e7 | 450 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
5973c7ee | 451 | { |
b3a070cc | 452 | struct drm_mm_node *hole; |
202b52b7 | 453 | u64 hole_start, hole_end; |
2db86dfc | 454 | u64 adj_start, adj_end; |
0d1650fa | 455 | u64 end; |
338710e7 | 456 | |
b80d3942 | 457 | end = node->start + node->size; |
c820186d CW |
458 | if (unlikely(end <= node->start)) |
459 | return -ENOSPC; | |
b80d3942 | 460 | |
338710e7 | 461 | /* Find the relevant hole to add our node to */ |
271e7dec | 462 | hole = find_hole_addr(mm, node->start, 0); |
4e64e553 | 463 | if (!hole) |
202b52b7 | 464 | return -ENOSPC; |
5973c7ee | 465 | |
2db86dfc | 466 | adj_start = hole_start = __drm_mm_hole_node_start(hole); |
4e64e553 | 467 | adj_end = hole_end = hole_start + hole->hole_size; |
2db86dfc CW |
468 | |
469 | if (mm->color_adjust) | |
470 | mm->color_adjust(hole, node->color, &adj_start, &adj_end); | |
471 | ||
472 | if (adj_start > node->start || adj_end < end) | |
202b52b7 | 473 | return -ENOSPC; |
5973c7ee | 474 | |
202b52b7 | 475 | node->mm = mm; |
5973c7ee | 476 | |
3dda22d3 | 477 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
202b52b7 | 478 | list_add(&node->node_list, &hole->node_list); |
202b52b7 | 479 | drm_mm_interval_tree_add_node(hole, node); |
4e64e553 | 480 | node->hole_size = 0; |
202b52b7 | 481 | |
4e64e553 CW |
482 | rm_hole(hole); |
483 | if (node->start > hole_start) | |
484 | add_hole(hole); | |
485 | if (end < hole_end) | |
486 | add_hole(node); | |
5973c7ee | 487 | |
5705670d | 488 | save_stack(node); |
202b52b7 | 489 | return 0; |
5973c7ee | 490 | } |
338710e7 | 491 | EXPORT_SYMBOL(drm_mm_reserve_node); |
5973c7ee | 492 | |
2f7e8769 CW |
493 | static u64 rb_to_hole_size_or_zero(struct rb_node *rb) |
494 | { | |
495 | return rb ? rb_to_hole_size(rb) : 0; | |
496 | } | |
497 | ||
b0b7af18 | 498 | /** |
4e64e553 | 499 | * drm_mm_insert_node_in_range - ranged search for space and insert @node |
e18c0412 DV |
500 | * @mm: drm_mm to allocate from |
501 | * @node: preallocate node to insert | |
502 | * @size: size of the allocation | |
503 | * @alignment: alignment of the allocation | |
504 | * @color: opaque tag value to use for this node | |
4e64e553 CW |
505 | * @range_start: start of the allowed range for this node |
506 | * @range_end: end of the allowed range for this node | |
507 | * @mode: fine-tune the allocation search and placement | |
e18c0412 | 508 | * |
05fc0321 | 509 | * The preallocated @node must be cleared to 0. |
e18c0412 DV |
510 | * |
511 | * Returns: | |
512 | * 0 on success, -ENOSPC if there's no suitable hole. | |
3a1bd924 | 513 | */ |
4e64e553 CW |
514 | int drm_mm_insert_node_in_range(struct drm_mm * const mm, |
515 | struct drm_mm_node * const node, | |
516 | u64 size, u64 alignment, | |
517 | unsigned long color, | |
518 | u64 range_start, u64 range_end, | |
519 | enum drm_mm_insert_mode mode) | |
3a1bd924 | 520 | { |
4e64e553 CW |
521 | struct drm_mm_node *hole; |
522 | u64 remainder_mask; | |
83bc4ec3 | 523 | bool once; |
b0b7af18 | 524 | |
c1a495a5 | 525 | DRM_MM_BUG_ON(range_start > range_end); |
aafdcfd3 | 526 | |
4e64e553 | 527 | if (unlikely(size == 0 || range_end - range_start < size)) |
b0b7af18 DV |
528 | return -ENOSPC; |
529 | ||
2f7e8769 CW |
530 | if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) |
531 | return -ENOSPC; | |
532 | ||
4e64e553 CW |
533 | if (alignment <= 1) |
534 | alignment = 0; | |
535 | ||
83bc4ec3 CW |
536 | once = mode & DRM_MM_INSERT_ONCE; |
537 | mode &= ~DRM_MM_INSERT_ONCE; | |
538 | ||
4e64e553 | 539 | remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; |
83bc4ec3 CW |
540 | for (hole = first_hole(mm, range_start, range_end, size, mode); |
541 | hole; | |
0cdea445 | 542 | hole = once ? NULL : next_hole(mm, hole, size, mode)) { |
4e64e553 CW |
543 | u64 hole_start = __drm_mm_hole_node_start(hole); |
544 | u64 hole_end = hole_start + hole->hole_size; | |
545 | u64 adj_start, adj_end; | |
546 | u64 col_start, col_end; | |
547 | ||
548 | if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) | |
549 | break; | |
550 | ||
551 | if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) | |
552 | break; | |
553 | ||
554 | col_start = hole_start; | |
555 | col_end = hole_end; | |
556 | if (mm->color_adjust) | |
557 | mm->color_adjust(hole, color, &col_start, &col_end); | |
558 | ||
559 | adj_start = max(col_start, range_start); | |
560 | adj_end = min(col_end, range_end); | |
561 | ||
562 | if (adj_end <= adj_start || adj_end - adj_start < size) | |
563 | continue; | |
564 | ||
565 | if (mode == DRM_MM_INSERT_HIGH) | |
566 | adj_start = adj_end - size; | |
567 | ||
568 | if (alignment) { | |
569 | u64 rem; | |
570 | ||
571 | if (likely(remainder_mask)) | |
572 | rem = adj_start & remainder_mask; | |
573 | else | |
574 | div64_u64_rem(adj_start, alignment, &rem); | |
575 | if (rem) { | |
576 | adj_start -= rem; | |
577 | if (mode != DRM_MM_INSERT_HIGH) | |
578 | adj_start += alignment; | |
579 | ||
580 | if (adj_start < max(col_start, range_start) || | |
581 | min(col_end, range_end) - adj_start < size) | |
582 | continue; | |
583 | ||
584 | if (adj_end <= adj_start || | |
585 | adj_end - adj_start < size) | |
586 | continue; | |
587 | } | |
588 | } | |
589 | ||
590 | node->mm = mm; | |
591 | node->size = size; | |
592 | node->start = adj_start; | |
593 | node->color = color; | |
594 | node->hole_size = 0; | |
595 | ||
3dda22d3 | 596 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
4e64e553 CW |
597 | list_add(&node->node_list, &hole->node_list); |
598 | drm_mm_interval_tree_add_node(hole, node); | |
4e64e553 CW |
599 | |
600 | rm_hole(hole); | |
601 | if (adj_start > hole_start) | |
602 | add_hole(hole); | |
603 | if (adj_start + size < hole_end) | |
604 | add_hole(node); | |
605 | ||
606 | save_stack(node); | |
607 | return 0; | |
608 | } | |
609 | ||
2713778c | 610 | return -ENOSPC; |
b0b7af18 | 611 | } |
4e64e553 | 612 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); |
b8103450 | 613 | |
461ed4aa | 614 | static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node) |
71724f70 | 615 | { |
4ee92c71 | 616 | return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
71724f70 CW |
617 | } |
618 | ||
b0b7af18 | 619 | /** |
e18c0412 DV |
620 | * drm_mm_remove_node - Remove a memory node from the allocator. |
621 | * @node: drm_mm_node to remove | |
622 | * | |
623 | * This just removes a node from its drm_mm allocator. The node does not need to | |
624 | * be cleared again before it can be re-inserted into this or any other drm_mm | |
ba004e39 | 625 | * allocator. It is a bug to call this function on a unallocated node. |
b0b7af18 DV |
626 | */ |
627 | void drm_mm_remove_node(struct drm_mm_node *node) | |
628 | { | |
ea7b1dd4 DV |
629 | struct drm_mm *mm = node->mm; |
630 | struct drm_mm_node *prev_node; | |
3a1bd924 | 631 | |
71724f70 CW |
632 | DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); |
633 | DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); | |
3a1bd924 | 634 | |
4e64e553 | 635 | prev_node = list_prev_entry(node, node_list); |
9e8944ab | 636 | |
4e64e553 CW |
637 | if (drm_mm_hole_follows(node)) |
638 | rm_hole(node); | |
ea7b1dd4 | 639 | |
202b52b7 | 640 | drm_mm_interval_tree_remove(node, &mm->interval_tree); |
ea7b1dd4 | 641 | list_del(&node->node_list); |
7a6b2896 | 642 | |
4e64e553 CW |
643 | if (drm_mm_hole_follows(prev_node)) |
644 | rm_hole(prev_node); | |
645 | add_hole(prev_node); | |
3dda22d3 CW |
646 | |
647 | clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); | |
a2e68e92 | 648 | } |
4e64e553 | 649 | EXPORT_SYMBOL(drm_mm_remove_node); |
a2e68e92 | 650 | |
93110be6 | 651 | /** |
05fc0321 | 652 | * DOC: lru scan roster |
93110be6 DV |
653 | * |
654 | * Very often GPUs need to have continuous allocations for a given object. When | |
655 | * evicting objects to make space for a new one it is therefore not most | |
656 | * efficient when we simply start to select all objects from the tail of an LRU | |
657 | * until there's a suitable hole: Especially for big objects or nodes that | |
658 | * otherwise have special allocation constraints there's a good chance we evict | |
ba004e39 | 659 | * lots of (smaller) objects unnecessarily. |
93110be6 DV |
660 | * |
661 | * The DRM range allocator supports this use-case through the scanning | |
662 | * interfaces. First a scan operation needs to be initialized with | |
9a71e277 | 663 | * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds |
05fc0321 | 664 | * objects to the roster, probably by walking an LRU list, but this can be |
0ae865ef | 665 | * freely implemented. Eviction candidates are added using |
05fc0321 | 666 | * drm_mm_scan_add_block() until a suitable hole is found or there are no |
940eba2d DV |
667 | * further evictable objects. Eviction roster metadata is tracked in &struct |
668 | * drm_mm_scan. | |
93110be6 | 669 | * |
ba004e39 | 670 | * The driver must walk through all objects again in exactly the reverse |
93110be6 DV |
671 | * order to restore the allocator state. Note that while the allocator is used |
672 | * in the scan mode no other operation is allowed. | |
673 | * | |
3fa489da CW |
674 | * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() |
675 | * reported true) in the scan, and any overlapping nodes after color adjustment | |
05fc0321 | 676 | * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and |
3fa489da CW |
677 | * since freeing a node is also O(1) the overall complexity is |
678 | * O(scanned_objects). So like the free stack which needs to be walked before a | |
679 | * scan operation even begins this is linear in the number of objects. It | |
680 | * doesn't seem to hurt too badly. | |
93110be6 DV |
681 | */ |
682 | ||
d935cc61 | 683 | /** |
9a71e277 CW |
684 | * drm_mm_scan_init_with_range - initialize range-restricted lru scanning |
685 | * @scan: scan state | |
e18c0412 DV |
686 | * @mm: drm_mm to scan |
687 | * @size: size of the allocation | |
688 | * @alignment: alignment of the allocation | |
689 | * @color: opaque tag value to use for the allocation | |
690 | * @start: start of the allowed range for the allocation | |
691 | * @end: end of the allowed range for the allocation | |
4e64e553 | 692 | * @mode: fine-tune the allocation search and placement |
d935cc61 DV |
693 | * |
694 | * This simply sets up the scanning routines with the parameters for the desired | |
0b04d474 | 695 | * hole. |
d935cc61 | 696 | * |
e18c0412 DV |
697 | * Warning: |
698 | * As long as the scan list is non-empty, no other operations than | |
d935cc61 DV |
699 | * adding/removing nodes to/from the scan list are allowed. |
700 | */ | |
9a71e277 CW |
701 | void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, |
702 | struct drm_mm *mm, | |
440fd528 | 703 | u64 size, |
71733207 | 704 | u64 alignment, |
6b9d89b4 | 705 | unsigned long color, |
440fd528 | 706 | u64 start, |
0b04d474 | 707 | u64 end, |
4e64e553 | 708 | enum drm_mm_insert_mode mode) |
d935cc61 | 709 | { |
6259a56b CW |
710 | DRM_MM_BUG_ON(start >= end); |
711 | DRM_MM_BUG_ON(!size || size > end - start); | |
9a71e277 CW |
712 | DRM_MM_BUG_ON(mm->scan_active); |
713 | ||
714 | scan->mm = mm; | |
715 | ||
9a956b15 CW |
716 | if (alignment <= 1) |
717 | alignment = 0; | |
718 | ||
9a71e277 CW |
719 | scan->color = color; |
720 | scan->alignment = alignment; | |
9a956b15 | 721 | scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; |
9a71e277 | 722 | scan->size = size; |
4e64e553 | 723 | scan->mode = mode; |
9a71e277 CW |
724 | |
725 | DRM_MM_BUG_ON(end <= start); | |
726 | scan->range_start = start; | |
727 | scan->range_end = end; | |
6259a56b | 728 | |
9a71e277 CW |
729 | scan->hit_start = U64_MAX; |
730 | scan->hit_end = 0; | |
d935cc61 | 731 | } |
9a71e277 | 732 | EXPORT_SYMBOL(drm_mm_scan_init_with_range); |
d935cc61 | 733 | |
709ea971 | 734 | /** |
e18c0412 | 735 | * drm_mm_scan_add_block - add a node to the scan list |
9b8b75de | 736 | * @scan: the active drm_mm scanner |
e18c0412 DV |
737 | * @node: drm_mm_node to add |
738 | * | |
709ea971 DV |
739 | * Add a node to the scan list that might be freed to make space for the desired |
740 | * hole. | |
741 | * | |
e18c0412 DV |
742 | * Returns: |
743 | * True if a hole has been found, false otherwise. | |
709ea971 | 744 | */ |
9a71e277 CW |
745 | bool drm_mm_scan_add_block(struct drm_mm_scan *scan, |
746 | struct drm_mm_node *node) | |
709ea971 | 747 | { |
9a71e277 | 748 | struct drm_mm *mm = scan->mm; |
4a6c156f | 749 | struct drm_mm_node *hole; |
440fd528 | 750 | u64 hole_start, hole_end; |
268c6498 | 751 | u64 col_start, col_end; |
440fd528 | 752 | u64 adj_start, adj_end; |
709ea971 | 753 | |
9a71e277 | 754 | DRM_MM_BUG_ON(node->mm != mm); |
71724f70 CW |
755 | DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); |
756 | DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); | |
4ee92c71 | 757 | __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
9a71e277 | 758 | mm->scan_active++; |
709ea971 | 759 | |
f29051f1 CW |
760 | /* Remove this block from the node_list so that we enlarge the hole |
761 | * (distance between the end of our previous node and the start of | |
762 | * or next), without poisoning the link so that we can restore it | |
763 | * later in drm_mm_scan_remove_block(). | |
764 | */ | |
4a6c156f | 765 | hole = list_prev_entry(node, node_list); |
f29051f1 CW |
766 | DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); |
767 | __list_del_entry(&node->node_list); | |
709ea971 | 768 | |
268c6498 CW |
769 | hole_start = __drm_mm_hole_node_start(hole); |
770 | hole_end = __drm_mm_hole_node_end(hole); | |
d935cc61 | 771 | |
268c6498 CW |
772 | col_start = hole_start; |
773 | col_end = hole_end; | |
901593f2 | 774 | if (mm->color_adjust) |
268c6498 CW |
775 | mm->color_adjust(hole, scan->color, &col_start, &col_end); |
776 | ||
777 | adj_start = max(col_start, scan->range_start); | |
778 | adj_end = min(col_end, scan->range_end); | |
0b04d474 CW |
779 | if (adj_end <= adj_start || adj_end - adj_start < scan->size) |
780 | return false; | |
781 | ||
4e64e553 | 782 | if (scan->mode == DRM_MM_INSERT_HIGH) |
0b04d474 CW |
783 | adj_start = adj_end - scan->size; |
784 | ||
785 | if (scan->alignment) { | |
786 | u64 rem; | |
787 | ||
9a956b15 CW |
788 | if (likely(scan->remainder_mask)) |
789 | rem = adj_start & scan->remainder_mask; | |
790 | else | |
791 | div64_u64_rem(adj_start, scan->alignment, &rem); | |
0b04d474 CW |
792 | if (rem) { |
793 | adj_start -= rem; | |
4e64e553 | 794 | if (scan->mode != DRM_MM_INSERT_HIGH) |
0b04d474 CW |
795 | adj_start += scan->alignment; |
796 | if (adj_start < max(col_start, scan->range_start) || | |
797 | min(col_end, scan->range_end) - adj_start < scan->size) | |
798 | return false; | |
799 | ||
800 | if (adj_end <= adj_start || | |
801 | adj_end - adj_start < scan->size) | |
802 | return false; | |
803 | } | |
804 | } | |
901593f2 | 805 | |
3fa489da CW |
806 | scan->hit_start = adj_start; |
807 | scan->hit_end = adj_start + scan->size; | |
709ea971 | 808 | |
0b04d474 CW |
809 | DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); |
810 | DRM_MM_BUG_ON(scan->hit_start < hole_start); | |
811 | DRM_MM_BUG_ON(scan->hit_end > hole_end); | |
812 | ||
813 | return true; | |
709ea971 DV |
814 | } |
815 | EXPORT_SYMBOL(drm_mm_scan_add_block); | |
816 | ||
817 | /** | |
e18c0412 | 818 | * drm_mm_scan_remove_block - remove a node from the scan list |
9b8b75de | 819 | * @scan: the active drm_mm scanner |
e18c0412 | 820 | * @node: drm_mm_node to remove |
709ea971 | 821 | * |
05fc0321 DV |
822 | * Nodes **must** be removed in exactly the reverse order from the scan list as |
823 | * they have been added (e.g. using list_add() as they are added and then | |
824 | * list_for_each() over that eviction list to remove), otherwise the internal | |
ba004e39 | 825 | * state of the memory manager will be corrupted. |
709ea971 DV |
826 | * |
827 | * When the scan list is empty, the selected memory nodes can be freed. An | |
05fc0321 DV |
828 | * immediately following drm_mm_insert_node_in_range_generic() or one of the |
829 | * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return | |
1e55a53a | 830 | * the just freed block (because it's at the top of the free_stack list). |
709ea971 | 831 | * |
e18c0412 DV |
832 | * Returns: |
833 | * True if this block should be evicted, false otherwise. Will always | |
834 | * return false when no hole has been found. | |
709ea971 | 835 | */ |
9a71e277 CW |
836 | bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, |
837 | struct drm_mm_node *node) | |
709ea971 | 838 | { |
ea7b1dd4 | 839 | struct drm_mm_node *prev_node; |
709ea971 | 840 | |
9a71e277 | 841 | DRM_MM_BUG_ON(node->mm != scan->mm); |
71724f70 | 842 | DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); |
4ee92c71 | 843 | __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
709ea971 | 844 | |
9a71e277 CW |
845 | DRM_MM_BUG_ON(!node->mm->scan_active); |
846 | node->mm->scan_active--; | |
847 | ||
f29051f1 CW |
848 | /* During drm_mm_scan_add_block() we decoupled this node leaving |
849 | * its pointers intact. Now that the caller is walking back along | |
850 | * the eviction list we can restore this block into its rightful | |
851 | * place on the full node_list. To confirm that the caller is walking | |
852 | * backwards correctly we check that prev_node->next == node->next, | |
853 | * i.e. both believe the same node should be on the other side of the | |
854 | * hole. | |
855 | */ | |
9a71e277 | 856 | prev_node = list_prev_entry(node, node_list); |
f29051f1 CW |
857 | DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != |
858 | list_next_entry(node, node_list)); | |
ea7b1dd4 | 859 | list_add(&node->node_list, &prev_node->node_list); |
709ea971 | 860 | |
0b04d474 | 861 | return (node->start + node->size > scan->hit_start && |
9a71e277 | 862 | node->start < scan->hit_end); |
709ea971 DV |
863 | } |
864 | EXPORT_SYMBOL(drm_mm_scan_remove_block); | |
865 | ||
3fa489da CW |
866 | /** |
867 | * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole | |
868 | * @scan: drm_mm scan with target hole | |
869 | * | |
870 | * After completing an eviction scan and removing the selected nodes, we may | |
871 | * need to remove a few more nodes from either side of the target hole if | |
872 | * mm.color_adjust is being used. | |
873 | * | |
874 | * Returns: | |
875 | * A node to evict, or NULL if there are no overlapping nodes. | |
876 | */ | |
877 | struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) | |
878 | { | |
879 | struct drm_mm *mm = scan->mm; | |
880 | struct drm_mm_node *hole; | |
881 | u64 hole_start, hole_end; | |
882 | ||
883 | DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); | |
884 | ||
885 | if (!mm->color_adjust) | |
886 | return NULL; | |
887 | ||
b8ff1802 CW |
888 | /* |
889 | * The hole found during scanning should ideally be the first element | |
890 | * in the hole_stack list, but due to side-effects in the driver it | |
891 | * may not be. | |
892 | */ | |
893 | list_for_each_entry(hole, &mm->hole_stack, hole_stack) { | |
894 | hole_start = __drm_mm_hole_node_start(hole); | |
895 | hole_end = hole_start + hole->hole_size; | |
896 | ||
897 | if (hole_start <= scan->hit_start && | |
898 | hole_end >= scan->hit_end) | |
899 | break; | |
900 | } | |
901 | ||
902 | /* We should only be called after we found the hole previously */ | |
903 | DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); | |
904 | if (unlikely(&hole->hole_stack == &mm->hole_stack)) | |
905 | return NULL; | |
3fa489da CW |
906 | |
907 | DRM_MM_BUG_ON(hole_start > scan->hit_start); | |
908 | DRM_MM_BUG_ON(hole_end < scan->hit_end); | |
909 | ||
910 | mm->color_adjust(hole, scan->color, &hole_start, &hole_end); | |
911 | if (hole_start > scan->hit_start) | |
912 | return hole; | |
913 | if (hole_end < scan->hit_end) | |
914 | return list_next_entry(hole, node_list); | |
915 | ||
916 | return NULL; | |
917 | } | |
918 | EXPORT_SYMBOL(drm_mm_scan_color_evict); | |
919 | ||
e18c0412 DV |
920 | /** |
921 | * drm_mm_init - initialize a drm-mm allocator | |
922 | * @mm: the drm_mm structure to initialize | |
923 | * @start: start of the range managed by @mm | |
924 | * @size: end of the range managed by @mm | |
925 | * | |
926 | * Note that @mm must be cleared to 0 before calling this function. | |
927 | */ | |
45b186f1 | 928 | void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) |
1d58420b | 929 | { |
6259a56b CW |
930 | DRM_MM_BUG_ON(start + size <= start); |
931 | ||
4e64e553 CW |
932 | mm->color_adjust = NULL; |
933 | ||
ea7b1dd4 | 934 | INIT_LIST_HEAD(&mm->hole_stack); |
f808c13f | 935 | mm->interval_tree = RB_ROOT_CACHED; |
2f7e8769 | 936 | mm->holes_size = RB_ROOT_CACHED; |
4e64e553 | 937 | mm->holes_addr = RB_ROOT; |
3a1bd924 | 938 | |
ea7b1dd4 DV |
939 | /* Clever trick to avoid a special case in the free hole tracking. */ |
940 | INIT_LIST_HEAD(&mm->head_node.node_list); | |
4ee92c71 | 941 | mm->head_node.flags = 0; |
ea7b1dd4 DV |
942 | mm->head_node.mm = mm; |
943 | mm->head_node.start = start + size; | |
4e64e553 CW |
944 | mm->head_node.size = -size; |
945 | add_hole(&mm->head_node); | |
ea7b1dd4 | 946 | |
4e64e553 | 947 | mm->scan_active = 0; |
2dba5eb1 VB |
948 | |
949 | #ifdef CONFIG_DRM_DEBUG_MM | |
950 | stack_depot_init(); | |
951 | #endif | |
3a1bd924 | 952 | } |
673a394b | 953 | EXPORT_SYMBOL(drm_mm_init); |
3a1bd924 | 954 | |
e18c0412 DV |
955 | /** |
956 | * drm_mm_takedown - clean up a drm_mm allocator | |
957 | * @mm: drm_mm allocator to clean up | |
958 | * | |
959 | * Note that it is a bug to call this function on an allocator which is not | |
960 | * clean. | |
961 | */ | |
5705670d | 962 | void drm_mm_takedown(struct drm_mm *mm) |
3a1bd924 | 963 | { |
ac9bb7b7 | 964 | if (WARN(!drm_mm_clean(mm), |
5705670d CW |
965 | "Memory manager not clean during takedown.\n")) |
966 | show_leaks(mm); | |
3a1bd924 | 967 | } |
f453ba04 | 968 | EXPORT_SYMBOL(drm_mm_takedown); |
fa8a1238 | 969 | |
b5c3714f | 970 | static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) |
99d7e48e | 971 | { |
4e64e553 CW |
972 | u64 start, size; |
973 | ||
974 | size = entry->hole_size; | |
975 | if (size) { | |
976 | start = drm_mm_hole_node_start(entry); | |
977 | drm_printf(p, "%#018llx-%#018llx: %llu: free\n", | |
978 | start, start + size, size); | |
3a359f0b DV |
979 | } |
980 | ||
4e64e553 | 981 | return size; |
3a359f0b | 982 | } |
e18c0412 | 983 | /** |
b5c3714f DV |
984 | * drm_mm_print - print allocator state |
985 | * @mm: drm_mm allocator to print | |
986 | * @p: DRM printer to use | |
e18c0412 | 987 | */ |
b5c3714f | 988 | void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) |
3a359f0b | 989 | { |
45b186f1 | 990 | const struct drm_mm_node *entry; |
440fd528 | 991 | u64 total_used = 0, total_free = 0, total = 0; |
3a359f0b | 992 | |
b5c3714f | 993 | total_free += drm_mm_dump_hole(p, &mm->head_node); |
ea7b1dd4 DV |
994 | |
995 | drm_mm_for_each_node(entry, mm) { | |
b5c3714f | 996 | drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, |
440fd528 | 997 | entry->start + entry->size, entry->size); |
ea7b1dd4 | 998 | total_used += entry->size; |
b5c3714f | 999 | total_free += drm_mm_dump_hole(p, entry); |
fa8a1238 | 1000 | } |
ea7b1dd4 DV |
1001 | total = total_free + total_used; |
1002 | ||
b5c3714f | 1003 | drm_printf(p, "total: %llu, used %llu free %llu\n", total, |
440fd528 | 1004 | total_used, total_free); |
fa8a1238 | 1005 | } |
b5c3714f | 1006 | EXPORT_SYMBOL(drm_mm_print); |