1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9
10 #include <drm/drm_buddy.h>
11
12 static struct kmem_cache *slab_blocks;
13
drm_block_alloc(struct drm_buddy * mm,struct drm_buddy_block * parent,unsigned int order,u64 offset)14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 struct drm_buddy_block *parent,
16 unsigned int order,
17 u64 offset)
18 {
19 struct drm_buddy_block *block;
20
21 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24 if (!block)
25 return NULL;
26
27 block->header = offset;
28 block->header |= order;
29 block->parent = parent;
30
31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 return block;
33 }
34
drm_block_free(struct drm_buddy * mm,struct drm_buddy_block * block)35 static void drm_block_free(struct drm_buddy *mm,
36 struct drm_buddy_block *block)
37 {
38 kmem_cache_free(slab_blocks, block);
39 }
40
list_insert_sorted(struct drm_buddy * mm,struct drm_buddy_block * block)41 static void list_insert_sorted(struct drm_buddy *mm,
42 struct drm_buddy_block *block)
43 {
44 struct drm_buddy_block *node;
45 struct list_head *head;
46
47 head = &mm->free_list[drm_buddy_block_order(block)];
48 if (list_empty(head)) {
49 list_add(&block->link, head);
50 return;
51 }
52
53 list_for_each_entry(node, head, link)
54 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55 break;
56
57 __list_add(&block->link, node->link.prev, &node->link);
58 }
59
mark_allocated(struct drm_buddy_block * block)60 static void mark_allocated(struct drm_buddy_block *block)
61 {
62 block->header &= ~DRM_BUDDY_HEADER_STATE;
63 block->header |= DRM_BUDDY_ALLOCATED;
64
65 list_del(&block->link);
66 }
67
mark_free(struct drm_buddy * mm,struct drm_buddy_block * block)68 static void mark_free(struct drm_buddy *mm,
69 struct drm_buddy_block *block)
70 {
71 block->header &= ~DRM_BUDDY_HEADER_STATE;
72 block->header |= DRM_BUDDY_FREE;
73
74 list_insert_sorted(mm, block);
75 }
76
mark_split(struct drm_buddy_block * block)77 static void mark_split(struct drm_buddy_block *block)
78 {
79 block->header &= ~DRM_BUDDY_HEADER_STATE;
80 block->header |= DRM_BUDDY_SPLIT;
81
82 list_del(&block->link);
83 }
84
85 /**
86 * drm_buddy_init - init memory manager
87 *
88 * @mm: DRM buddy manager to initialize
89 * @size: size in bytes to manage
90 * @chunk_size: minimum page size in bytes for our allocations
91 *
92 * Initializes the memory manager and its resources.
93 *
94 * Returns:
95 * 0 on success, error code on failure.
96 */
drm_buddy_init(struct drm_buddy * mm,u64 size,u64 chunk_size)97 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
98 {
99 unsigned int i;
100 u64 offset;
101
102 if (size < chunk_size)
103 return -EINVAL;
104
105 if (chunk_size < PAGE_SIZE)
106 return -EINVAL;
107
108 if (!is_power_of_2(chunk_size))
109 return -EINVAL;
110
111 size = round_down(size, chunk_size);
112
113 mm->size = size;
114 mm->avail = size;
115 mm->chunk_size = chunk_size;
116 mm->max_order = ilog2(size) - ilog2(chunk_size);
117
118 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
119
120 mm->free_list = kmalloc_array(mm->max_order + 1,
121 sizeof(struct list_head),
122 GFP_KERNEL);
123 if (!mm->free_list)
124 return -ENOMEM;
125
126 for (i = 0; i <= mm->max_order; ++i)
127 INIT_LIST_HEAD(&mm->free_list[i]);
128
129 mm->n_roots = hweight64(size);
130
131 mm->roots = kmalloc_array(mm->n_roots,
132 sizeof(struct drm_buddy_block *),
133 GFP_KERNEL);
134 if (!mm->roots)
135 goto out_free_list;
136
137 offset = 0;
138 i = 0;
139
140 /*
141 * Split into power-of-two blocks, in case we are given a size that is
142 * not itself a power-of-two.
143 */
144 do {
145 struct drm_buddy_block *root;
146 unsigned int order;
147 u64 root_size;
148
149 root_size = rounddown_pow_of_two(size);
150 order = ilog2(root_size) - ilog2(chunk_size);
151
152 root = drm_block_alloc(mm, NULL, order, offset);
153 if (!root)
154 goto out_free_roots;
155
156 mark_free(mm, root);
157
158 BUG_ON(i > mm->max_order);
159 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
160
161 mm->roots[i] = root;
162
163 offset += root_size;
164 size -= root_size;
165 i++;
166 } while (size);
167
168 return 0;
169
170 out_free_roots:
171 while (i--)
172 drm_block_free(mm, mm->roots[i]);
173 kfree(mm->roots);
174 out_free_list:
175 kfree(mm->free_list);
176 return -ENOMEM;
177 }
178 EXPORT_SYMBOL(drm_buddy_init);
179
180 /**
181 * drm_buddy_fini - tear down the memory manager
182 *
183 * @mm: DRM buddy manager to free
184 *
185 * Cleanup memory manager resources and the freelist
186 */
drm_buddy_fini(struct drm_buddy * mm)187 void drm_buddy_fini(struct drm_buddy *mm)
188 {
189 int i;
190
191 for (i = 0; i < mm->n_roots; ++i) {
192 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
193 drm_block_free(mm, mm->roots[i]);
194 }
195
196 WARN_ON(mm->avail != mm->size);
197
198 kfree(mm->roots);
199 kfree(mm->free_list);
200 }
201 EXPORT_SYMBOL(drm_buddy_fini);
202
split_block(struct drm_buddy * mm,struct drm_buddy_block * block)203 static int split_block(struct drm_buddy *mm,
204 struct drm_buddy_block *block)
205 {
206 unsigned int block_order = drm_buddy_block_order(block) - 1;
207 u64 offset = drm_buddy_block_offset(block);
208
209 BUG_ON(!drm_buddy_block_is_free(block));
210 BUG_ON(!drm_buddy_block_order(block));
211
212 block->left = drm_block_alloc(mm, block, block_order, offset);
213 if (!block->left)
214 return -ENOMEM;
215
216 block->right = drm_block_alloc(mm, block, block_order,
217 offset + (mm->chunk_size << block_order));
218 if (!block->right) {
219 drm_block_free(mm, block->left);
220 return -ENOMEM;
221 }
222
223 mark_free(mm, block->left);
224 mark_free(mm, block->right);
225
226 mark_split(block);
227
228 return 0;
229 }
230
231 static struct drm_buddy_block *
__get_buddy(struct drm_buddy_block * block)232 __get_buddy(struct drm_buddy_block *block)
233 {
234 struct drm_buddy_block *parent;
235
236 parent = block->parent;
237 if (!parent)
238 return NULL;
239
240 if (parent->left == block)
241 return parent->right;
242
243 return parent->left;
244 }
245
246 /**
247 * drm_get_buddy - get buddy address
248 *
249 * @block: DRM buddy block
250 *
251 * Returns the corresponding buddy block for @block, or NULL
252 * if this is a root block and can't be merged further.
253 * Requires some kind of locking to protect against
254 * any concurrent allocate and free operations.
255 */
256 struct drm_buddy_block *
drm_get_buddy(struct drm_buddy_block * block)257 drm_get_buddy(struct drm_buddy_block *block)
258 {
259 return __get_buddy(block);
260 }
261 EXPORT_SYMBOL(drm_get_buddy);
262
__drm_buddy_free(struct drm_buddy * mm,struct drm_buddy_block * block)263 static void __drm_buddy_free(struct drm_buddy *mm,
264 struct drm_buddy_block *block)
265 {
266 struct drm_buddy_block *parent;
267
268 while ((parent = block->parent)) {
269 struct drm_buddy_block *buddy;
270
271 buddy = __get_buddy(block);
272
273 if (!drm_buddy_block_is_free(buddy))
274 break;
275
276 list_del(&buddy->link);
277
278 drm_block_free(mm, block);
279 drm_block_free(mm, buddy);
280
281 block = parent;
282 }
283
284 mark_free(mm, block);
285 }
286
287 /**
288 * drm_buddy_free_block - free a block
289 *
290 * @mm: DRM buddy manager
291 * @block: block to be freed
292 */
drm_buddy_free_block(struct drm_buddy * mm,struct drm_buddy_block * block)293 void drm_buddy_free_block(struct drm_buddy *mm,
294 struct drm_buddy_block *block)
295 {
296 BUG_ON(!drm_buddy_block_is_allocated(block));
297 mm->avail += drm_buddy_block_size(mm, block);
298 __drm_buddy_free(mm, block);
299 }
300 EXPORT_SYMBOL(drm_buddy_free_block);
301
302 /**
303 * drm_buddy_free_list - free blocks
304 *
305 * @mm: DRM buddy manager
306 * @objects: input list head to free blocks
307 */
drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects)308 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
309 {
310 struct drm_buddy_block *block, *on;
311
312 list_for_each_entry_safe(block, on, objects, link) {
313 drm_buddy_free_block(mm, block);
314 cond_resched();
315 }
316 INIT_LIST_HEAD(objects);
317 }
318 EXPORT_SYMBOL(drm_buddy_free_list);
319
overlaps(u64 s1,u64 e1,u64 s2,u64 e2)320 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321 {
322 return s1 <= e2 && e1 >= s2;
323 }
324
contains(u64 s1,u64 e1,u64 s2,u64 e2)325 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326 {
327 return s1 <= s2 && e1 >= e2;
328 }
329
330 static struct drm_buddy_block *
alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order)331 alloc_range_bias(struct drm_buddy *mm,
332 u64 start, u64 end,
333 unsigned int order)
334 {
335 struct drm_buddy_block *block;
336 struct drm_buddy_block *buddy;
337 LIST_HEAD(dfs);
338 int err;
339 int i;
340
341 end = end - 1;
342
343 for (i = 0; i < mm->n_roots; ++i)
344 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
345
346 do {
347 u64 block_start;
348 u64 block_end;
349
350 block = list_first_entry_or_null(&dfs,
351 struct drm_buddy_block,
352 tmp_link);
353 if (!block)
354 break;
355
356 list_del(&block->tmp_link);
357
358 if (drm_buddy_block_order(block) < order)
359 continue;
360
361 block_start = drm_buddy_block_offset(block);
362 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
363
364 if (!overlaps(start, end, block_start, block_end))
365 continue;
366
367 if (drm_buddy_block_is_allocated(block))
368 continue;
369
370 if (contains(start, end, block_start, block_end) &&
371 order == drm_buddy_block_order(block)) {
372 /*
373 * Find the free block within the range.
374 */
375 if (drm_buddy_block_is_free(block))
376 return block;
377
378 continue;
379 }
380
381 if (!drm_buddy_block_is_split(block)) {
382 err = split_block(mm, block);
383 if (unlikely(err))
384 goto err_undo;
385 }
386
387 list_add(&block->right->tmp_link, &dfs);
388 list_add(&block->left->tmp_link, &dfs);
389 } while (1);
390
391 return ERR_PTR(-ENOSPC);
392
393 err_undo:
394 /*
395 * We really don't want to leave around a bunch of split blocks, since
396 * bigger is better, so make sure we merge everything back before we
397 * free the allocated blocks.
398 */
399 buddy = __get_buddy(block);
400 if (buddy &&
401 (drm_buddy_block_is_free(block) &&
402 drm_buddy_block_is_free(buddy)))
403 __drm_buddy_free(mm, block);
404 return ERR_PTR(err);
405 }
406
407 static struct drm_buddy_block *
get_maxblock(struct drm_buddy * mm,unsigned int order)408 get_maxblock(struct drm_buddy *mm, unsigned int order)
409 {
410 struct drm_buddy_block *max_block = NULL, *node;
411 unsigned int i;
412
413 for (i = order; i <= mm->max_order; ++i) {
414 if (!list_empty(&mm->free_list[i])) {
415 node = list_last_entry(&mm->free_list[i],
416 struct drm_buddy_block,
417 link);
418 if (!max_block) {
419 max_block = node;
420 continue;
421 }
422
423 if (drm_buddy_block_offset(node) >
424 drm_buddy_block_offset(max_block)) {
425 max_block = node;
426 }
427 }
428 }
429
430 return max_block;
431 }
432
433 static struct drm_buddy_block *
alloc_from_freelist(struct drm_buddy * mm,unsigned int order,unsigned long flags)434 alloc_from_freelist(struct drm_buddy *mm,
435 unsigned int order,
436 unsigned long flags)
437 {
438 struct drm_buddy_block *block = NULL;
439 unsigned int tmp;
440 int err;
441
442 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
443 block = get_maxblock(mm, order);
444 if (block)
445 /* Store the obtained block order */
446 tmp = drm_buddy_block_order(block);
447 } else {
448 for (tmp = order; tmp <= mm->max_order; ++tmp) {
449 if (!list_empty(&mm->free_list[tmp])) {
450 block = list_last_entry(&mm->free_list[tmp],
451 struct drm_buddy_block,
452 link);
453 if (block)
454 break;
455 }
456 }
457 }
458
459 if (!block)
460 return ERR_PTR(-ENOSPC);
461
462 BUG_ON(!drm_buddy_block_is_free(block));
463
464 while (tmp != order) {
465 err = split_block(mm, block);
466 if (unlikely(err))
467 goto err_undo;
468
469 block = block->right;
470 tmp--;
471 }
472 return block;
473
474 err_undo:
475 if (tmp != order)
476 __drm_buddy_free(mm, block);
477 return ERR_PTR(err);
478 }
479
__alloc_range(struct drm_buddy * mm,struct list_head * dfs,u64 start,u64 size,struct list_head * blocks)480 static int __alloc_range(struct drm_buddy *mm,
481 struct list_head *dfs,
482 u64 start, u64 size,
483 struct list_head *blocks)
484 {
485 struct drm_buddy_block *block;
486 struct drm_buddy_block *buddy;
487 LIST_HEAD(allocated);
488 u64 end;
489 int err;
490
491 end = start + size - 1;
492
493 do {
494 u64 block_start;
495 u64 block_end;
496
497 block = list_first_entry_or_null(dfs,
498 struct drm_buddy_block,
499 tmp_link);
500 if (!block)
501 break;
502
503 list_del(&block->tmp_link);
504
505 block_start = drm_buddy_block_offset(block);
506 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
507
508 if (!overlaps(start, end, block_start, block_end))
509 continue;
510
511 if (drm_buddy_block_is_allocated(block)) {
512 err = -ENOSPC;
513 goto err_free;
514 }
515
516 if (contains(start, end, block_start, block_end)) {
517 if (!drm_buddy_block_is_free(block)) {
518 err = -ENOSPC;
519 goto err_free;
520 }
521
522 mark_allocated(block);
523 mm->avail -= drm_buddy_block_size(mm, block);
524 list_add_tail(&block->link, &allocated);
525 continue;
526 }
527
528 if (!drm_buddy_block_is_split(block)) {
529 err = split_block(mm, block);
530 if (unlikely(err))
531 goto err_undo;
532 }
533
534 list_add(&block->right->tmp_link, dfs);
535 list_add(&block->left->tmp_link, dfs);
536 } while (1);
537
538 list_splice_tail(&allocated, blocks);
539 return 0;
540
541 err_undo:
542 /*
543 * We really don't want to leave around a bunch of split blocks, since
544 * bigger is better, so make sure we merge everything back before we
545 * free the allocated blocks.
546 */
547 buddy = __get_buddy(block);
548 if (buddy &&
549 (drm_buddy_block_is_free(block) &&
550 drm_buddy_block_is_free(buddy)))
551 __drm_buddy_free(mm, block);
552
553 err_free:
554 drm_buddy_free_list(mm, &allocated);
555 return err;
556 }
557
__drm_buddy_alloc_range(struct drm_buddy * mm,u64 start,u64 size,struct list_head * blocks)558 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
559 u64 start,
560 u64 size,
561 struct list_head *blocks)
562 {
563 LIST_HEAD(dfs);
564 int i;
565
566 for (i = 0; i < mm->n_roots; ++i)
567 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
568
569 return __alloc_range(mm, &dfs, start, size, blocks);
570 }
571
572 /**
573 * drm_buddy_block_trim - free unused pages
574 *
575 * @mm: DRM buddy manager
576 * @new_size: original size requested
577 * @blocks: Input and output list of allocated blocks.
578 * MUST contain single block as input to be trimmed.
579 * On success will contain the newly allocated blocks
580 * making up the @new_size. Blocks always appear in
581 * ascending order
582 *
583 * For contiguous allocation, we round up the size to the nearest
584 * power of two value, drivers consume *actual* size, so remaining
585 * portions are unused and can be optionally freed with this function
586 *
587 * Returns:
588 * 0 on success, error code on failure.
589 */
drm_buddy_block_trim(struct drm_buddy * mm,u64 new_size,struct list_head * blocks)590 int drm_buddy_block_trim(struct drm_buddy *mm,
591 u64 new_size,
592 struct list_head *blocks)
593 {
594 struct drm_buddy_block *parent;
595 struct drm_buddy_block *block;
596 LIST_HEAD(dfs);
597 u64 new_start;
598 int err;
599
600 if (!list_is_singular(blocks))
601 return -EINVAL;
602
603 block = list_first_entry(blocks,
604 struct drm_buddy_block,
605 link);
606
607 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
608 return -EINVAL;
609
610 if (new_size > drm_buddy_block_size(mm, block))
611 return -EINVAL;
612
613 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
614 return -EINVAL;
615
616 if (new_size == drm_buddy_block_size(mm, block))
617 return 0;
618
619 list_del(&block->link);
620 mark_free(mm, block);
621 mm->avail += drm_buddy_block_size(mm, block);
622
623 /* Prevent recursively freeing this node */
624 parent = block->parent;
625 block->parent = NULL;
626
627 new_start = drm_buddy_block_offset(block);
628 list_add(&block->tmp_link, &dfs);
629 err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
630 if (err) {
631 mark_allocated(block);
632 mm->avail -= drm_buddy_block_size(mm, block);
633 list_add(&block->link, blocks);
634 }
635
636 block->parent = parent;
637 return err;
638 }
639 EXPORT_SYMBOL(drm_buddy_block_trim);
640
641 /**
642 * drm_buddy_alloc_blocks - allocate power-of-two blocks
643 *
644 * @mm: DRM buddy manager to allocate from
645 * @start: start of the allowed range for this block
646 * @end: end of the allowed range for this block
647 * @size: size of the allocation
648 * @min_page_size: alignment of the allocation
649 * @blocks: output list head to add allocated blocks
650 * @flags: DRM_BUDDY_*_ALLOCATION flags
651 *
652 * alloc_range_bias() called on range limitations, which traverses
653 * the tree and returns the desired block.
654 *
655 * alloc_from_freelist() called when *no* range restrictions
656 * are enforced, which picks the block from the freelist.
657 *
658 * Returns:
659 * 0 on success, error code on failure.
660 */
drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,u64 size,u64 min_page_size,struct list_head * blocks,unsigned long flags)661 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
662 u64 start, u64 end, u64 size,
663 u64 min_page_size,
664 struct list_head *blocks,
665 unsigned long flags)
666 {
667 struct drm_buddy_block *block = NULL;
668 unsigned int min_order, order;
669 unsigned long pages;
670 LIST_HEAD(allocated);
671 int err;
672
673 if (size < mm->chunk_size)
674 return -EINVAL;
675
676 if (min_page_size < mm->chunk_size)
677 return -EINVAL;
678
679 if (!is_power_of_2(min_page_size))
680 return -EINVAL;
681
682 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
683 return -EINVAL;
684
685 if (end > mm->size)
686 return -EINVAL;
687
688 if (range_overflows(start, size, mm->size))
689 return -EINVAL;
690
691 /* Actual range allocation */
692 if (start + size == end)
693 return __drm_buddy_alloc_range(mm, start, size, blocks);
694
695 if (!IS_ALIGNED(size, min_page_size))
696 return -EINVAL;
697
698 pages = size >> ilog2(mm->chunk_size);
699 order = fls(pages) - 1;
700 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
701
702 do {
703 order = min(order, (unsigned int)fls(pages) - 1);
704 BUG_ON(order > mm->max_order);
705 BUG_ON(order < min_order);
706
707 do {
708 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
709 /* Allocate traversing within the range */
710 block = alloc_range_bias(mm, start, end, order);
711 else
712 /* Allocate from freelist */
713 block = alloc_from_freelist(mm, order, flags);
714
715 if (!IS_ERR(block))
716 break;
717
718 if (order-- == min_order) {
719 err = -ENOSPC;
720 goto err_free;
721 }
722 } while (1);
723
724 mark_allocated(block);
725 mm->avail -= drm_buddy_block_size(mm, block);
726 kmemleak_update_trace(block);
727 list_add_tail(&block->link, &allocated);
728
729 pages -= BIT(order);
730
731 if (!pages)
732 break;
733 } while (1);
734
735 list_splice_tail(&allocated, blocks);
736 return 0;
737
738 err_free:
739 drm_buddy_free_list(mm, &allocated);
740 return err;
741 }
742 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
743
744 /**
745 * drm_buddy_block_print - print block information
746 *
747 * @mm: DRM buddy manager
748 * @block: DRM buddy block
749 * @p: DRM printer to use
750 */
drm_buddy_block_print(struct drm_buddy * mm,struct drm_buddy_block * block,struct drm_printer * p)751 void drm_buddy_block_print(struct drm_buddy *mm,
752 struct drm_buddy_block *block,
753 struct drm_printer *p)
754 {
755 u64 start = drm_buddy_block_offset(block);
756 u64 size = drm_buddy_block_size(mm, block);
757
758 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
759 }
760 EXPORT_SYMBOL(drm_buddy_block_print);
761
762 /**
763 * drm_buddy_print - print allocator state
764 *
765 * @mm: DRM buddy manager
766 * @p: DRM printer to use
767 */
drm_buddy_print(struct drm_buddy * mm,struct drm_printer * p)768 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
769 {
770 int order;
771
772 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
773 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
774
775 for (order = mm->max_order; order >= 0; order--) {
776 struct drm_buddy_block *block;
777 u64 count = 0, free;
778
779 list_for_each_entry(block, &mm->free_list[order], link) {
780 BUG_ON(!drm_buddy_block_is_free(block));
781 count++;
782 }
783
784 drm_printf(p, "order-%d ", order);
785
786 free = count * (mm->chunk_size << order);
787 if (free < SZ_1M)
788 drm_printf(p, "free: %lluKiB", free >> 10);
789 else
790 drm_printf(p, "free: %lluMiB", free >> 20);
791
792 drm_printf(p, ", pages: %llu\n", count);
793 }
794 }
795 EXPORT_SYMBOL(drm_buddy_print);
796
drm_buddy_module_exit(void)797 static void drm_buddy_module_exit(void)
798 {
799 kmem_cache_destroy(slab_blocks);
800 }
801
drm_buddy_module_init(void)802 static int __init drm_buddy_module_init(void)
803 {
804 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
805 if (!slab_blocks)
806 return -ENOMEM;
807
808 return 0;
809 }
810
811 module_init(drm_buddy_module_init);
812 module_exit(drm_buddy_module_exit);
813
814 MODULE_DESCRIPTION("DRM Buddy Allocator");
815 MODULE_LICENSE("Dual MIT/GPL");
816