x86/insn: Directly assign x86_64 state in insn_init()
[linux-2.6-block.git] / drivers / gpu / drm / drm_buddy.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9
10 #include <drm/drm_buddy.h>
11
12 static struct kmem_cache *slab_blocks;
13
14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15                                                struct drm_buddy_block *parent,
16                                                unsigned int order,
17                                                u64 offset)
18 {
19         struct drm_buddy_block *block;
20
21         BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23         block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24         if (!block)
25                 return NULL;
26
27         block->header = offset;
28         block->header |= order;
29         block->parent = parent;
30
31         BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32         return block;
33 }
34
35 static void drm_block_free(struct drm_buddy *mm,
36                            struct drm_buddy_block *block)
37 {
38         kmem_cache_free(slab_blocks, block);
39 }
40
41 static void list_insert_sorted(struct drm_buddy *mm,
42                                struct drm_buddy_block *block)
43 {
44         struct drm_buddy_block *node;
45         struct list_head *head;
46
47         head = &mm->free_list[drm_buddy_block_order(block)];
48         if (list_empty(head)) {
49                 list_add(&block->link, head);
50                 return;
51         }
52
53         list_for_each_entry(node, head, link)
54                 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55                         break;
56
57         __list_add(&block->link, node->link.prev, &node->link);
58 }
59
60 static void mark_allocated(struct drm_buddy_block *block)
61 {
62         block->header &= ~DRM_BUDDY_HEADER_STATE;
63         block->header |= DRM_BUDDY_ALLOCATED;
64
65         list_del(&block->link);
66 }
67
68 static void mark_free(struct drm_buddy *mm,
69                       struct drm_buddy_block *block)
70 {
71         block->header &= ~DRM_BUDDY_HEADER_STATE;
72         block->header |= DRM_BUDDY_FREE;
73
74         list_insert_sorted(mm, block);
75 }
76
77 static void mark_split(struct drm_buddy_block *block)
78 {
79         block->header &= ~DRM_BUDDY_HEADER_STATE;
80         block->header |= DRM_BUDDY_SPLIT;
81
82         list_del(&block->link);
83 }
84
85 /**
86  * drm_buddy_init - init memory manager
87  *
88  * @mm: DRM buddy manager to initialize
89  * @size: size in bytes to manage
90  * @chunk_size: minimum page size in bytes for our allocations
91  *
92  * Initializes the memory manager and its resources.
93  *
94  * Returns:
95  * 0 on success, error code on failure.
96  */
97 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
98 {
99         unsigned int i;
100         u64 offset;
101
102         if (size < chunk_size)
103                 return -EINVAL;
104
105         if (chunk_size < PAGE_SIZE)
106                 return -EINVAL;
107
108         if (!is_power_of_2(chunk_size))
109                 return -EINVAL;
110
111         size = round_down(size, chunk_size);
112
113         mm->size = size;
114         mm->avail = size;
115         mm->chunk_size = chunk_size;
116         mm->max_order = ilog2(size) - ilog2(chunk_size);
117
118         BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
119
120         mm->free_list = kmalloc_array(mm->max_order + 1,
121                                       sizeof(struct list_head),
122                                       GFP_KERNEL);
123         if (!mm->free_list)
124                 return -ENOMEM;
125
126         for (i = 0; i <= mm->max_order; ++i)
127                 INIT_LIST_HEAD(&mm->free_list[i]);
128
129         mm->n_roots = hweight64(size);
130
131         mm->roots = kmalloc_array(mm->n_roots,
132                                   sizeof(struct drm_buddy_block *),
133                                   GFP_KERNEL);
134         if (!mm->roots)
135                 goto out_free_list;
136
137         offset = 0;
138         i = 0;
139
140         /*
141          * Split into power-of-two blocks, in case we are given a size that is
142          * not itself a power-of-two.
143          */
144         do {
145                 struct drm_buddy_block *root;
146                 unsigned int order;
147                 u64 root_size;
148
149                 order = ilog2(size) - ilog2(chunk_size);
150                 root_size = chunk_size << order;
151
152                 root = drm_block_alloc(mm, NULL, order, offset);
153                 if (!root)
154                         goto out_free_roots;
155
156                 mark_free(mm, root);
157
158                 BUG_ON(i > mm->max_order);
159                 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
160
161                 mm->roots[i] = root;
162
163                 offset += root_size;
164                 size -= root_size;
165                 i++;
166         } while (size);
167
168         return 0;
169
170 out_free_roots:
171         while (i--)
172                 drm_block_free(mm, mm->roots[i]);
173         kfree(mm->roots);
174 out_free_list:
175         kfree(mm->free_list);
176         return -ENOMEM;
177 }
178 EXPORT_SYMBOL(drm_buddy_init);
179
180 /**
181  * drm_buddy_fini - tear down the memory manager
182  *
183  * @mm: DRM buddy manager to free
184  *
185  * Cleanup memory manager resources and the freelist
186  */
187 void drm_buddy_fini(struct drm_buddy *mm)
188 {
189         int i;
190
191         for (i = 0; i < mm->n_roots; ++i) {
192                 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
193                 drm_block_free(mm, mm->roots[i]);
194         }
195
196         WARN_ON(mm->avail != mm->size);
197
198         kfree(mm->roots);
199         kfree(mm->free_list);
200 }
201 EXPORT_SYMBOL(drm_buddy_fini);
202
203 static int split_block(struct drm_buddy *mm,
204                        struct drm_buddy_block *block)
205 {
206         unsigned int block_order = drm_buddy_block_order(block) - 1;
207         u64 offset = drm_buddy_block_offset(block);
208
209         BUG_ON(!drm_buddy_block_is_free(block));
210         BUG_ON(!drm_buddy_block_order(block));
211
212         block->left = drm_block_alloc(mm, block, block_order, offset);
213         if (!block->left)
214                 return -ENOMEM;
215
216         block->right = drm_block_alloc(mm, block, block_order,
217                                        offset + (mm->chunk_size << block_order));
218         if (!block->right) {
219                 drm_block_free(mm, block->left);
220                 return -ENOMEM;
221         }
222
223         mark_free(mm, block->left);
224         mark_free(mm, block->right);
225
226         mark_split(block);
227
228         return 0;
229 }
230
231 static struct drm_buddy_block *
232 __get_buddy(struct drm_buddy_block *block)
233 {
234         struct drm_buddy_block *parent;
235
236         parent = block->parent;
237         if (!parent)
238                 return NULL;
239
240         if (parent->left == block)
241                 return parent->right;
242
243         return parent->left;
244 }
245
246 /**
247  * drm_get_buddy - get buddy address
248  *
249  * @block: DRM buddy block
250  *
251  * Returns the corresponding buddy block for @block, or NULL
252  * if this is a root block and can't be merged further.
253  * Requires some kind of locking to protect against
254  * any concurrent allocate and free operations.
255  */
256 struct drm_buddy_block *
257 drm_get_buddy(struct drm_buddy_block *block)
258 {
259         return __get_buddy(block);
260 }
261 EXPORT_SYMBOL(drm_get_buddy);
262
263 static void __drm_buddy_free(struct drm_buddy *mm,
264                              struct drm_buddy_block *block)
265 {
266         struct drm_buddy_block *parent;
267
268         while ((parent = block->parent)) {
269                 struct drm_buddy_block *buddy;
270
271                 buddy = __get_buddy(block);
272
273                 if (!drm_buddy_block_is_free(buddy))
274                         break;
275
276                 list_del(&buddy->link);
277
278                 drm_block_free(mm, block);
279                 drm_block_free(mm, buddy);
280
281                 block = parent;
282         }
283
284         mark_free(mm, block);
285 }
286
287 /**
288  * drm_buddy_free_block - free a block
289  *
290  * @mm: DRM buddy manager
291  * @block: block to be freed
292  */
293 void drm_buddy_free_block(struct drm_buddy *mm,
294                           struct drm_buddy_block *block)
295 {
296         BUG_ON(!drm_buddy_block_is_allocated(block));
297         mm->avail += drm_buddy_block_size(mm, block);
298         __drm_buddy_free(mm, block);
299 }
300 EXPORT_SYMBOL(drm_buddy_free_block);
301
302 /**
303  * drm_buddy_free_list - free blocks
304  *
305  * @mm: DRM buddy manager
306  * @objects: input list head to free blocks
307  */
308 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
309 {
310         struct drm_buddy_block *block, *on;
311
312         list_for_each_entry_safe(block, on, objects, link) {
313                 drm_buddy_free_block(mm, block);
314                 cond_resched();
315         }
316         INIT_LIST_HEAD(objects);
317 }
318 EXPORT_SYMBOL(drm_buddy_free_list);
319
320 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321 {
322         return s1 <= e2 && e1 >= s2;
323 }
324
325 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326 {
327         return s1 <= s2 && e1 >= e2;
328 }
329
330 static struct drm_buddy_block *
331 alloc_range_bias(struct drm_buddy *mm,
332                  u64 start, u64 end,
333                  unsigned int order)
334 {
335         struct drm_buddy_block *block;
336         struct drm_buddy_block *buddy;
337         LIST_HEAD(dfs);
338         int err;
339         int i;
340
341         end = end - 1;
342
343         for (i = 0; i < mm->n_roots; ++i)
344                 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
345
346         do {
347                 u64 block_start;
348                 u64 block_end;
349
350                 block = list_first_entry_or_null(&dfs,
351                                                  struct drm_buddy_block,
352                                                  tmp_link);
353                 if (!block)
354                         break;
355
356                 list_del(&block->tmp_link);
357
358                 if (drm_buddy_block_order(block) < order)
359                         continue;
360
361                 block_start = drm_buddy_block_offset(block);
362                 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
363
364                 if (!overlaps(start, end, block_start, block_end))
365                         continue;
366
367                 if (drm_buddy_block_is_allocated(block))
368                         continue;
369
370                 if (contains(start, end, block_start, block_end) &&
371                     order == drm_buddy_block_order(block)) {
372                         /*
373                          * Find the free block within the range.
374                          */
375                         if (drm_buddy_block_is_free(block))
376                                 return block;
377
378                         continue;
379                 }
380
381                 if (!drm_buddy_block_is_split(block)) {
382                         err = split_block(mm, block);
383                         if (unlikely(err))
384                                 goto err_undo;
385                 }
386
387                 list_add(&block->right->tmp_link, &dfs);
388                 list_add(&block->left->tmp_link, &dfs);
389         } while (1);
390
391         return ERR_PTR(-ENOSPC);
392
393 err_undo:
394         /*
395          * We really don't want to leave around a bunch of split blocks, since
396          * bigger is better, so make sure we merge everything back before we
397          * free the allocated blocks.
398          */
399         buddy = __get_buddy(block);
400         if (buddy &&
401             (drm_buddy_block_is_free(block) &&
402              drm_buddy_block_is_free(buddy)))
403                 __drm_buddy_free(mm, block);
404         return ERR_PTR(err);
405 }
406
407 static struct drm_buddy_block *
408 get_maxblock(struct drm_buddy *mm, unsigned int order)
409 {
410         struct drm_buddy_block *max_block = NULL, *node;
411         unsigned int i;
412
413         for (i = order; i <= mm->max_order; ++i) {
414                 if (!list_empty(&mm->free_list[i])) {
415                         node = list_last_entry(&mm->free_list[i],
416                                                struct drm_buddy_block,
417                                                link);
418                         if (!max_block) {
419                                 max_block = node;
420                                 continue;
421                         }
422
423                         if (drm_buddy_block_offset(node) >
424                             drm_buddy_block_offset(max_block)) {
425                                 max_block = node;
426                         }
427                 }
428         }
429
430         return max_block;
431 }
432
433 static struct drm_buddy_block *
434 alloc_from_freelist(struct drm_buddy *mm,
435                     unsigned int order,
436                     unsigned long flags)
437 {
438         struct drm_buddy_block *block = NULL;
439         unsigned int tmp;
440         int err;
441
442         if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
443                 block = get_maxblock(mm, order);
444                 if (block)
445                         /* Store the obtained block order */
446                         tmp = drm_buddy_block_order(block);
447         } else {
448                 for (tmp = order; tmp <= mm->max_order; ++tmp) {
449                         if (!list_empty(&mm->free_list[tmp])) {
450                                 block = list_last_entry(&mm->free_list[tmp],
451                                                         struct drm_buddy_block,
452                                                         link);
453                                 if (block)
454                                         break;
455                         }
456                 }
457         }
458
459         if (!block)
460                 return ERR_PTR(-ENOSPC);
461
462         BUG_ON(!drm_buddy_block_is_free(block));
463
464         while (tmp != order) {
465                 err = split_block(mm, block);
466                 if (unlikely(err))
467                         goto err_undo;
468
469                 block = block->right;
470                 tmp--;
471         }
472         return block;
473
474 err_undo:
475         if (tmp != order)
476                 __drm_buddy_free(mm, block);
477         return ERR_PTR(err);
478 }
479
480 static int __alloc_range(struct drm_buddy *mm,
481                          struct list_head *dfs,
482                          u64 start, u64 size,
483                          struct list_head *blocks,
484                          u64 *total_allocated_on_err)
485 {
486         struct drm_buddy_block *block;
487         struct drm_buddy_block *buddy;
488         u64 total_allocated = 0;
489         LIST_HEAD(allocated);
490         u64 end;
491         int err;
492
493         end = start + size - 1;
494
495         do {
496                 u64 block_start;
497                 u64 block_end;
498
499                 block = list_first_entry_or_null(dfs,
500                                                  struct drm_buddy_block,
501                                                  tmp_link);
502                 if (!block)
503                         break;
504
505                 list_del(&block->tmp_link);
506
507                 block_start = drm_buddy_block_offset(block);
508                 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
509
510                 if (!overlaps(start, end, block_start, block_end))
511                         continue;
512
513                 if (drm_buddy_block_is_allocated(block)) {
514                         err = -ENOSPC;
515                         goto err_free;
516                 }
517
518                 if (contains(start, end, block_start, block_end)) {
519                         if (!drm_buddy_block_is_free(block)) {
520                                 err = -ENOSPC;
521                                 goto err_free;
522                         }
523
524                         mark_allocated(block);
525                         total_allocated += drm_buddy_block_size(mm, block);
526                         mm->avail -= drm_buddy_block_size(mm, block);
527                         list_add_tail(&block->link, &allocated);
528                         continue;
529                 }
530
531                 if (!drm_buddy_block_is_split(block)) {
532                         err = split_block(mm, block);
533                         if (unlikely(err))
534                                 goto err_undo;
535                 }
536
537                 list_add(&block->right->tmp_link, dfs);
538                 list_add(&block->left->tmp_link, dfs);
539         } while (1);
540
541         list_splice_tail(&allocated, blocks);
542
543         if (total_allocated < size) {
544                 err = -ENOSPC;
545                 goto err_free;
546         }
547
548         return 0;
549
550 err_undo:
551         /*
552          * We really don't want to leave around a bunch of split blocks, since
553          * bigger is better, so make sure we merge everything back before we
554          * free the allocated blocks.
555          */
556         buddy = __get_buddy(block);
557         if (buddy &&
558             (drm_buddy_block_is_free(block) &&
559              drm_buddy_block_is_free(buddy)))
560                 __drm_buddy_free(mm, block);
561
562 err_free:
563         if (err == -ENOSPC && total_allocated_on_err) {
564                 list_splice_tail(&allocated, blocks);
565                 *total_allocated_on_err = total_allocated;
566         } else {
567                 drm_buddy_free_list(mm, &allocated);
568         }
569
570         return err;
571 }
572
573 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
574                                    u64 start,
575                                    u64 size,
576                                    u64 *total_allocated_on_err,
577                                    struct list_head *blocks)
578 {
579         LIST_HEAD(dfs);
580         int i;
581
582         for (i = 0; i < mm->n_roots; ++i)
583                 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
584
585         return __alloc_range(mm, &dfs, start, size,
586                              blocks, total_allocated_on_err);
587 }
588
589 static int __alloc_contig_try_harder(struct drm_buddy *mm,
590                                      u64 size,
591                                      u64 min_block_size,
592                                      struct list_head *blocks)
593 {
594         u64 rhs_offset, lhs_offset, lhs_size, filled;
595         struct drm_buddy_block *block;
596         struct list_head *list;
597         LIST_HEAD(blocks_lhs);
598         unsigned long pages;
599         unsigned int order;
600         u64 modify_size;
601         int err;
602
603         modify_size = rounddown_pow_of_two(size);
604         pages = modify_size >> ilog2(mm->chunk_size);
605         order = fls(pages) - 1;
606         if (order == 0)
607                 return -ENOSPC;
608
609         list = &mm->free_list[order];
610         if (list_empty(list))
611                 return -ENOSPC;
612
613         list_for_each_entry_reverse(block, list, link) {
614                 /* Allocate blocks traversing RHS */
615                 rhs_offset = drm_buddy_block_offset(block);
616                 err =  __drm_buddy_alloc_range(mm, rhs_offset, size,
617                                                &filled, blocks);
618                 if (!err || err != -ENOSPC)
619                         return err;
620
621                 lhs_size = max((size - filled), min_block_size);
622                 if (!IS_ALIGNED(lhs_size, min_block_size))
623                         lhs_size = round_up(lhs_size, min_block_size);
624
625                 /* Allocate blocks traversing LHS */
626                 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
627                 err =  __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
628                                                NULL, &blocks_lhs);
629                 if (!err) {
630                         list_splice(&blocks_lhs, blocks);
631                         return 0;
632                 } else if (err != -ENOSPC) {
633                         drm_buddy_free_list(mm, blocks);
634                         return err;
635                 }
636                 /* Free blocks for the next iteration */
637                 drm_buddy_free_list(mm, blocks);
638         }
639
640         return -ENOSPC;
641 }
642
643 /**
644  * drm_buddy_block_trim - free unused pages
645  *
646  * @mm: DRM buddy manager
647  * @new_size: original size requested
648  * @blocks: Input and output list of allocated blocks.
649  * MUST contain single block as input to be trimmed.
650  * On success will contain the newly allocated blocks
651  * making up the @new_size. Blocks always appear in
652  * ascending order
653  *
654  * For contiguous allocation, we round up the size to the nearest
655  * power of two value, drivers consume *actual* size, so remaining
656  * portions are unused and can be optionally freed with this function
657  *
658  * Returns:
659  * 0 on success, error code on failure.
660  */
661 int drm_buddy_block_trim(struct drm_buddy *mm,
662                          u64 new_size,
663                          struct list_head *blocks)
664 {
665         struct drm_buddy_block *parent;
666         struct drm_buddy_block *block;
667         LIST_HEAD(dfs);
668         u64 new_start;
669         int err;
670
671         if (!list_is_singular(blocks))
672                 return -EINVAL;
673
674         block = list_first_entry(blocks,
675                                  struct drm_buddy_block,
676                                  link);
677
678         if (WARN_ON(!drm_buddy_block_is_allocated(block)))
679                 return -EINVAL;
680
681         if (new_size > drm_buddy_block_size(mm, block))
682                 return -EINVAL;
683
684         if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
685                 return -EINVAL;
686
687         if (new_size == drm_buddy_block_size(mm, block))
688                 return 0;
689
690         list_del(&block->link);
691         mark_free(mm, block);
692         mm->avail += drm_buddy_block_size(mm, block);
693
694         /* Prevent recursively freeing this node */
695         parent = block->parent;
696         block->parent = NULL;
697
698         new_start = drm_buddy_block_offset(block);
699         list_add(&block->tmp_link, &dfs);
700         err =  __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
701         if (err) {
702                 mark_allocated(block);
703                 mm->avail -= drm_buddy_block_size(mm, block);
704                 list_add(&block->link, blocks);
705         }
706
707         block->parent = parent;
708         return err;
709 }
710 EXPORT_SYMBOL(drm_buddy_block_trim);
711
712 /**
713  * drm_buddy_alloc_blocks - allocate power-of-two blocks
714  *
715  * @mm: DRM buddy manager to allocate from
716  * @start: start of the allowed range for this block
717  * @end: end of the allowed range for this block
718  * @size: size of the allocation
719  * @min_block_size: alignment of the allocation
720  * @blocks: output list head to add allocated blocks
721  * @flags: DRM_BUDDY_*_ALLOCATION flags
722  *
723  * alloc_range_bias() called on range limitations, which traverses
724  * the tree and returns the desired block.
725  *
726  * alloc_from_freelist() called when *no* range restrictions
727  * are enforced, which picks the block from the freelist.
728  *
729  * Returns:
730  * 0 on success, error code on failure.
731  */
732 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
733                            u64 start, u64 end, u64 size,
734                            u64 min_block_size,
735                            struct list_head *blocks,
736                            unsigned long flags)
737 {
738         struct drm_buddy_block *block = NULL;
739         u64 original_size, original_min_size;
740         unsigned int min_order, order;
741         LIST_HEAD(allocated);
742         unsigned long pages;
743         int err;
744
745         if (size < mm->chunk_size)
746                 return -EINVAL;
747
748         if (min_block_size < mm->chunk_size)
749                 return -EINVAL;
750
751         if (!is_power_of_2(min_block_size))
752                 return -EINVAL;
753
754         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
755                 return -EINVAL;
756
757         if (end > mm->size)
758                 return -EINVAL;
759
760         if (range_overflows(start, size, mm->size))
761                 return -EINVAL;
762
763         /* Actual range allocation */
764         if (start + size == end)
765                 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
766
767         original_size = size;
768         original_min_size = min_block_size;
769
770         /* Roundup the size to power of 2 */
771         if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
772                 size = roundup_pow_of_two(size);
773                 min_block_size = size;
774         /* Align size value to min_block_size */
775         } else if (!IS_ALIGNED(size, min_block_size)) {
776                 size = round_up(size, min_block_size);
777         }
778
779         pages = size >> ilog2(mm->chunk_size);
780         order = fls(pages) - 1;
781         min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
782
783         do {
784                 order = min(order, (unsigned int)fls(pages) - 1);
785                 BUG_ON(order > mm->max_order);
786                 BUG_ON(order < min_order);
787
788                 do {
789                         if (flags & DRM_BUDDY_RANGE_ALLOCATION)
790                                 /* Allocate traversing within the range */
791                                 block = alloc_range_bias(mm, start, end, order);
792                         else
793                                 /* Allocate from freelist */
794                                 block = alloc_from_freelist(mm, order, flags);
795
796                         if (!IS_ERR(block))
797                                 break;
798
799                         if (order-- == min_order) {
800                                 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
801                                     !(flags & DRM_BUDDY_RANGE_ALLOCATION))
802                                         /*
803                                          * Try contiguous block allocation through
804                                          * try harder method
805                                          */
806                                         return __alloc_contig_try_harder(mm,
807                                                                          original_size,
808                                                                          original_min_size,
809                                                                          blocks);
810                                 err = -ENOSPC;
811                                 goto err_free;
812                         }
813                 } while (1);
814
815                 mark_allocated(block);
816                 mm->avail -= drm_buddy_block_size(mm, block);
817                 kmemleak_update_trace(block);
818                 list_add_tail(&block->link, &allocated);
819
820                 pages -= BIT(order);
821
822                 if (!pages)
823                         break;
824         } while (1);
825
826         /* Trim the allocated block to the required size */
827         if (original_size != size) {
828                 struct list_head *trim_list;
829                 LIST_HEAD(temp);
830                 u64 trim_size;
831
832                 trim_list = &allocated;
833                 trim_size = original_size;
834
835                 if (!list_is_singular(&allocated)) {
836                         block = list_last_entry(&allocated, typeof(*block), link);
837                         list_move(&block->link, &temp);
838                         trim_list = &temp;
839                         trim_size = drm_buddy_block_size(mm, block) -
840                                 (size - original_size);
841                 }
842
843                 drm_buddy_block_trim(mm,
844                                      trim_size,
845                                      trim_list);
846
847                 if (!list_empty(&temp))
848                         list_splice_tail(trim_list, &allocated);
849         }
850
851         list_splice_tail(&allocated, blocks);
852         return 0;
853
854 err_free:
855         drm_buddy_free_list(mm, &allocated);
856         return err;
857 }
858 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
859
860 /**
861  * drm_buddy_block_print - print block information
862  *
863  * @mm: DRM buddy manager
864  * @block: DRM buddy block
865  * @p: DRM printer to use
866  */
867 void drm_buddy_block_print(struct drm_buddy *mm,
868                            struct drm_buddy_block *block,
869                            struct drm_printer *p)
870 {
871         u64 start = drm_buddy_block_offset(block);
872         u64 size = drm_buddy_block_size(mm, block);
873
874         drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
875 }
876 EXPORT_SYMBOL(drm_buddy_block_print);
877
878 /**
879  * drm_buddy_print - print allocator state
880  *
881  * @mm: DRM buddy manager
882  * @p: DRM printer to use
883  */
884 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
885 {
886         int order;
887
888         drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
889                    mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
890
891         for (order = mm->max_order; order >= 0; order--) {
892                 struct drm_buddy_block *block;
893                 u64 count = 0, free;
894
895                 list_for_each_entry(block, &mm->free_list[order], link) {
896                         BUG_ON(!drm_buddy_block_is_free(block));
897                         count++;
898                 }
899
900                 drm_printf(p, "order-%2d ", order);
901
902                 free = count * (mm->chunk_size << order);
903                 if (free < SZ_1M)
904                         drm_printf(p, "free: %8llu KiB", free >> 10);
905                 else
906                         drm_printf(p, "free: %8llu MiB", free >> 20);
907
908                 drm_printf(p, ", blocks: %llu\n", count);
909         }
910 }
911 EXPORT_SYMBOL(drm_buddy_print);
912
913 static void drm_buddy_module_exit(void)
914 {
915         kmem_cache_destroy(slab_blocks);
916 }
917
918 static int __init drm_buddy_module_init(void)
919 {
920         slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
921         if (!slab_blocks)
922                 return -ENOMEM;
923
924         return 0;
925 }
926
927 module_init(drm_buddy_module_init);
928 module_exit(drm_buddy_module_exit);
929
930 MODULE_DESCRIPTION("DRM Buddy Allocator");
931 MODULE_LICENSE("Dual MIT/GPL");