xref: /dflybsd-src/sys/dev/drm/drm_mm.c (revision 23832f75edc9855492226d612851679a00de2f9c)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/export.h>
48 
49 #define MM_UNUSED_TARGET 4
50 
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53 	struct drm_mm_node *child;
54 
55 	if (atomic)
56 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
57 	else
58 		child = kzalloc(sizeof(*child), GFP_KERNEL);
59 
60 	if (unlikely(child == NULL)) {
61 		spin_lock(&mm->unused_lock);
62 		if (list_empty(&mm->unused_nodes))
63 			child = NULL;
64 		else {
65 			child =
66 			    list_entry(mm->unused_nodes.next,
67 				       struct drm_mm_node, node_list);
68 			list_del(&child->node_list);
69 			--mm->num_unused;
70 		}
71 		spin_unlock(&mm->unused_lock);
72 	}
73 	return child;
74 }
75 
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77  * drm_mm:	memory manager struct we are pre-allocating for
78  *
79  * Returns 0 on success or -ENOMEM if allocation fails.
80  */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83 	struct drm_mm_node *node;
84 
85 	spin_lock(&mm->unused_lock);
86 	while (mm->num_unused < MM_UNUSED_TARGET) {
87 		spin_unlock(&mm->unused_lock);
88 		node = kzalloc(sizeof(*node), GFP_KERNEL);
89 		spin_lock(&mm->unused_lock);
90 
91 		if (unlikely(node == NULL)) {
92 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 			spin_unlock(&mm->unused_lock);
94 			return ret;
95 		}
96 		++mm->num_unused;
97 		list_add_tail(&node->node_list, &mm->unused_nodes);
98 	}
99 	spin_unlock(&mm->unused_lock);
100 	return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103 
104 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
105 				 struct drm_mm_node *node,
106 				 unsigned long size, unsigned alignment,
107 				 unsigned long color)
108 {
109 	struct drm_mm *mm = hole_node->mm;
110 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
111 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
112 	unsigned long adj_start = hole_start;
113 	unsigned long adj_end = hole_end;
114 
115 	BUG_ON(node->allocated);
116 
117 	if (mm->color_adjust)
118 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
119 
120 	if (alignment) {
121 		unsigned tmp = adj_start % alignment;
122 		if (tmp)
123 			adj_start += alignment - tmp;
124 	}
125 
126 	if (adj_start == hole_start) {
127 		hole_node->hole_follows = 0;
128 		list_del(&hole_node->hole_stack);
129 	}
130 
131 	node->start = adj_start;
132 	node->size = size;
133 	node->mm = mm;
134 	node->color = color;
135 	node->allocated = 1;
136 
137 	INIT_LIST_HEAD(&node->hole_stack);
138 	list_add(&node->node_list, &hole_node->node_list);
139 
140 	BUG_ON(node->start + node->size > adj_end);
141 
142 	node->hole_follows = 0;
143 	if (__drm_mm_hole_node_start(node) < hole_end) {
144 		list_add(&node->hole_stack, &mm->hole_stack);
145 		node->hole_follows = 1;
146 	}
147 }
148 
149 struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
150 					unsigned long start,
151 					unsigned long size,
152 					bool atomic)
153 {
154 	struct drm_mm_node *hole, *node;
155 	unsigned long end = start + size;
156 	unsigned long hole_start;
157 	unsigned long hole_end;
158 
159 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
160 		if (hole_start > start || hole_end < end)
161 			continue;
162 
163 		node = drm_mm_kmalloc(mm, atomic);
164 		if (unlikely(node == NULL))
165 			return NULL;
166 
167 		node->start = start;
168 		node->size = size;
169 		node->mm = mm;
170 		node->allocated = 1;
171 
172 		INIT_LIST_HEAD(&node->hole_stack);
173 		list_add(&node->node_list, &hole->node_list);
174 
175 		if (start == hole_start) {
176 			hole->hole_follows = 0;
177 			list_del_init(&hole->hole_stack);
178 		}
179 
180 		node->hole_follows = 0;
181 		if (end != hole_end) {
182 			list_add(&node->hole_stack, &mm->hole_stack);
183 			node->hole_follows = 1;
184 		}
185 
186 		return node;
187 	}
188 
189 	WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
190 	return NULL;
191 }
192 EXPORT_SYMBOL(drm_mm_create_block);
193 
194 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
195 					     unsigned long size,
196 					     unsigned alignment,
197 					     unsigned long color,
198 					     int atomic)
199 {
200 	struct drm_mm_node *node;
201 
202 	node = drm_mm_kmalloc(hole_node->mm, atomic);
203 	if (unlikely(node == NULL))
204 		return NULL;
205 
206 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
207 
208 	return node;
209 }
210 EXPORT_SYMBOL(drm_mm_get_block_generic);
211 
212 /**
213  * Search for free space and insert a preallocated memory node. Returns
214  * -ENOSPC if no suitable free area is available. The preallocated memory node
215  * must be cleared.
216  */
217 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
218 			       unsigned long size, unsigned alignment,
219 			       unsigned long color)
220 {
221 	struct drm_mm_node *hole_node;
222 
223 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
224 					       color, 0);
225 	if (!hole_node)
226 		return -ENOSPC;
227 
228 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
229 	return 0;
230 }
231 EXPORT_SYMBOL(drm_mm_insert_node_generic);
232 
233 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
234 		       unsigned long size, unsigned alignment)
235 {
236 	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
237 }
238 EXPORT_SYMBOL(drm_mm_insert_node);
239 
240 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
241 				       struct drm_mm_node *node,
242 				       unsigned long size, unsigned alignment,
243 				       unsigned long color,
244 				       unsigned long start, unsigned long end)
245 {
246 	struct drm_mm *mm = hole_node->mm;
247 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
248 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
249 	unsigned long adj_start = hole_start;
250 	unsigned long adj_end = hole_end;
251 
252 	BUG_ON(!hole_node->hole_follows || node->allocated);
253 
254 	if (adj_start < start)
255 		adj_start = start;
256 	if (adj_end > end)
257 		adj_end = end;
258 
259 	if (mm->color_adjust)
260 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
261 
262 	if (alignment) {
263 		unsigned tmp = adj_start % alignment;
264 		if (tmp)
265 			adj_start += alignment - tmp;
266 	}
267 
268 	if (adj_start == hole_start) {
269 		hole_node->hole_follows = 0;
270 		list_del(&hole_node->hole_stack);
271 	}
272 
273 	node->start = adj_start;
274 	node->size = size;
275 	node->mm = mm;
276 	node->color = color;
277 	node->allocated = 1;
278 
279 	INIT_LIST_HEAD(&node->hole_stack);
280 	list_add(&node->node_list, &hole_node->node_list);
281 
282 	BUG_ON(node->start + node->size > adj_end);
283 	BUG_ON(node->start + node->size > end);
284 
285 	node->hole_follows = 0;
286 	if (__drm_mm_hole_node_start(node) < hole_end) {
287 		list_add(&node->hole_stack, &mm->hole_stack);
288 		node->hole_follows = 1;
289 	}
290 }
291 
292 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
293 						unsigned long size,
294 						unsigned alignment,
295 						unsigned long color,
296 						unsigned long start,
297 						unsigned long end,
298 						int atomic)
299 {
300 	struct drm_mm_node *node;
301 
302 	node = drm_mm_kmalloc(hole_node->mm, atomic);
303 	if (unlikely(node == NULL))
304 		return NULL;
305 
306 	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
307 				   start, end);
308 
309 	return node;
310 }
311 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
312 
313 /**
314  * Search for free space and insert a preallocated memory node. Returns
315  * -ENOSPC if no suitable free area is available. This is for range
316  * restricted allocations. The preallocated memory node must be cleared.
317  */
318 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
319 					unsigned long size, unsigned alignment, unsigned long color,
320 					unsigned long start, unsigned long end)
321 {
322 	struct drm_mm_node *hole_node;
323 
324 	hole_node = drm_mm_search_free_in_range_generic(mm,
325 							size, alignment, color,
326 							start, end, 0);
327 	if (!hole_node)
328 		return -ENOSPC;
329 
330 	drm_mm_insert_helper_range(hole_node, node,
331 				   size, alignment, color,
332 				   start, end);
333 	return 0;
334 }
335 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
336 
337 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
338 				unsigned long size, unsigned alignment,
339 				unsigned long start, unsigned long end)
340 {
341 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
342 }
343 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
344 
345 /**
346  * Remove a memory node from the allocator.
347  */
348 void drm_mm_remove_node(struct drm_mm_node *node)
349 {
350 	struct drm_mm *mm = node->mm;
351 	struct drm_mm_node *prev_node;
352 
353 	BUG_ON(node->scanned_block || node->scanned_prev_free
354 				   || node->scanned_next_free);
355 
356 	prev_node =
357 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
358 
359 	if (node->hole_follows) {
360 		BUG_ON(__drm_mm_hole_node_start(node) ==
361 		       __drm_mm_hole_node_end(node));
362 		list_del(&node->hole_stack);
363 	} else
364 		BUG_ON(__drm_mm_hole_node_start(node) !=
365 		       __drm_mm_hole_node_end(node));
366 
367 
368 	if (!prev_node->hole_follows) {
369 		prev_node->hole_follows = 1;
370 		list_add(&prev_node->hole_stack, &mm->hole_stack);
371 	} else
372 		list_move(&prev_node->hole_stack, &mm->hole_stack);
373 
374 	list_del(&node->node_list);
375 	node->allocated = 0;
376 }
377 EXPORT_SYMBOL(drm_mm_remove_node);
378 
379 /*
380  * Remove a memory node from the allocator and free the allocated struct
381  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
382  * drm_mm_get_block functions.
383  */
384 void drm_mm_put_block(struct drm_mm_node *node)
385 {
386 
387 	struct drm_mm *mm = node->mm;
388 
389 	drm_mm_remove_node(node);
390 
391 	spin_lock(&mm->unused_lock);
392 	if (mm->num_unused < MM_UNUSED_TARGET) {
393 		list_add(&node->node_list, &mm->unused_nodes);
394 		++mm->num_unused;
395 	} else
396 		kfree(node);
397 	spin_unlock(&mm->unused_lock);
398 }
399 EXPORT_SYMBOL(drm_mm_put_block);
400 
401 static int check_free_hole(unsigned long start, unsigned long end,
402 			   unsigned long size, unsigned alignment)
403 {
404 	if (end - start < size)
405 		return 0;
406 
407 	if (alignment) {
408 		unsigned tmp = start % alignment;
409 		if (tmp)
410 			start += alignment - tmp;
411 	}
412 
413 	return end >= start + size;
414 }
415 
416 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
417 					       unsigned long size,
418 					       unsigned alignment,
419 					       unsigned long color,
420 					       bool best_match)
421 {
422 	struct drm_mm_node *entry;
423 	struct drm_mm_node *best;
424 	unsigned long adj_start;
425 	unsigned long adj_end;
426 	unsigned long best_size;
427 
428 	BUG_ON(mm->scanned_blocks);
429 
430 	best = NULL;
431 	best_size = ~0UL;
432 
433 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
434 		if (mm->color_adjust) {
435 			mm->color_adjust(entry, color, &adj_start, &adj_end);
436 			if (adj_end <= adj_start)
437 				continue;
438 		}
439 
440 		if (!check_free_hole(adj_start, adj_end, size, alignment))
441 			continue;
442 
443 		if (!best_match)
444 			return entry;
445 
446 		if (entry->size < best_size) {
447 			best = entry;
448 			best_size = entry->size;
449 		}
450 	}
451 
452 	return best;
453 }
454 EXPORT_SYMBOL(drm_mm_search_free_generic);
455 
456 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
457 							unsigned long size,
458 							unsigned alignment,
459 							unsigned long color,
460 							unsigned long start,
461 							unsigned long end,
462 							bool best_match)
463 {
464 	struct drm_mm_node *entry;
465 	struct drm_mm_node *best;
466 	unsigned long adj_start;
467 	unsigned long adj_end;
468 	unsigned long best_size;
469 
470 	BUG_ON(mm->scanned_blocks);
471 
472 	best = NULL;
473 	best_size = ~0UL;
474 
475 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
476 		if (adj_start < start)
477 			adj_start = start;
478 		if (adj_end > end)
479 			adj_end = end;
480 
481 		if (mm->color_adjust) {
482 			mm->color_adjust(entry, color, &adj_start, &adj_end);
483 			if (adj_end <= adj_start)
484 				continue;
485 		}
486 
487 		if (!check_free_hole(adj_start, adj_end, size, alignment))
488 			continue;
489 
490 		if (!best_match)
491 			return entry;
492 
493 		if (entry->size < best_size) {
494 			best = entry;
495 			best_size = entry->size;
496 		}
497 	}
498 
499 	return best;
500 }
501 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
502 
503 /**
504  * Moves an allocation. To be used with embedded struct drm_mm_node.
505  */
506 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
507 {
508 	list_replace(&old->node_list, &new->node_list);
509 	list_replace(&old->hole_stack, &new->hole_stack);
510 	new->hole_follows = old->hole_follows;
511 	new->mm = old->mm;
512 	new->start = old->start;
513 	new->size = old->size;
514 	new->color = old->color;
515 
516 	old->allocated = 0;
517 	new->allocated = 1;
518 }
519 EXPORT_SYMBOL(drm_mm_replace_node);
520 
521 /**
522  * Initializa lru scanning.
523  *
524  * This simply sets up the scanning routines with the parameters for the desired
525  * hole.
526  *
527  * Warning: As long as the scan list is non-empty, no other operations than
528  * adding/removing nodes to/from the scan list are allowed.
529  */
530 void drm_mm_init_scan(struct drm_mm *mm,
531 		      unsigned long size,
532 		      unsigned alignment,
533 		      unsigned long color)
534 {
535 	mm->scan_color = color;
536 	mm->scan_alignment = alignment;
537 	mm->scan_size = size;
538 	mm->scanned_blocks = 0;
539 	mm->scan_hit_start = 0;
540 	mm->scan_hit_end = 0;
541 	mm->scan_check_range = 0;
542 	mm->prev_scanned_node = NULL;
543 }
544 EXPORT_SYMBOL(drm_mm_init_scan);
545 
546 /**
547  * Initializa lru scanning.
548  *
549  * This simply sets up the scanning routines with the parameters for the desired
550  * hole. This version is for range-restricted scans.
551  *
552  * Warning: As long as the scan list is non-empty, no other operations than
553  * adding/removing nodes to/from the scan list are allowed.
554  */
555 void drm_mm_init_scan_with_range(struct drm_mm *mm,
556 				 unsigned long size,
557 				 unsigned alignment,
558 				 unsigned long color,
559 				 unsigned long start,
560 				 unsigned long end)
561 {
562 	mm->scan_color = color;
563 	mm->scan_alignment = alignment;
564 	mm->scan_size = size;
565 	mm->scanned_blocks = 0;
566 	mm->scan_hit_start = 0;
567 	mm->scan_hit_end = 0;
568 	mm->scan_start = start;
569 	mm->scan_end = end;
570 	mm->scan_check_range = 1;
571 	mm->prev_scanned_node = NULL;
572 }
573 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
574 
575 /**
576  * Add a node to the scan list that might be freed to make space for the desired
577  * hole.
578  *
579  * Returns non-zero, if a hole has been found, zero otherwise.
580  */
581 int drm_mm_scan_add_block(struct drm_mm_node *node)
582 {
583 	struct drm_mm *mm = node->mm;
584 	struct drm_mm_node *prev_node;
585 	unsigned long hole_start, hole_end;
586 	unsigned long adj_start, adj_end;
587 
588 	mm->scanned_blocks++;
589 
590 	BUG_ON(node->scanned_block);
591 	node->scanned_block = 1;
592 
593 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
594 			       node_list);
595 
596 	node->scanned_preceeds_hole = prev_node->hole_follows;
597 	prev_node->hole_follows = 1;
598 	list_del(&node->node_list);
599 	node->node_list.prev = &prev_node->node_list;
600 	node->node_list.next = &mm->prev_scanned_node->node_list;
601 	mm->prev_scanned_node = node;
602 
603 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
604 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
605 
606 	if (mm->scan_check_range) {
607 		if (adj_start < mm->scan_start)
608 			adj_start = mm->scan_start;
609 		if (adj_end > mm->scan_end)
610 			adj_end = mm->scan_end;
611 	}
612 
613 	if (mm->color_adjust)
614 		mm->color_adjust(prev_node, mm->scan_color,
615 				 &adj_start, &adj_end);
616 
617 	if (check_free_hole(adj_start, adj_end,
618 			    mm->scan_size, mm->scan_alignment)) {
619 		mm->scan_hit_start = hole_start;
620 		mm->scan_hit_end = hole_end;
621 		return 1;
622 	}
623 
624 	return 0;
625 }
626 EXPORT_SYMBOL(drm_mm_scan_add_block);
627 
628 /**
629  * Remove a node from the scan list.
630  *
631  * Nodes _must_ be removed in the exact same order from the scan list as they
632  * have been added, otherwise the internal state of the memory manager will be
633  * corrupted.
634  *
635  * When the scan list is empty, the selected memory nodes can be freed. An
636  * immediately following drm_mm_search_free with best_match = 0 will then return
637  * the just freed block (because its at the top of the free_stack list).
638  *
639  * Returns one if this block should be evicted, zero otherwise. Will always
640  * return zero when no hole has been found.
641  */
642 int drm_mm_scan_remove_block(struct drm_mm_node *node)
643 {
644 	struct drm_mm *mm = node->mm;
645 	struct drm_mm_node *prev_node;
646 
647 	mm->scanned_blocks--;
648 
649 	BUG_ON(!node->scanned_block);
650 	node->scanned_block = 0;
651 
652 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
653 			       node_list);
654 
655 	prev_node->hole_follows = node->scanned_preceeds_hole;
656 	list_add(&node->node_list, &prev_node->node_list);
657 
658 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
659 		 node->start < mm->scan_hit_end);
660 }
661 EXPORT_SYMBOL(drm_mm_scan_remove_block);
662 
663 int drm_mm_clean(struct drm_mm * mm)
664 {
665 	struct list_head *head = &mm->head_node.node_list;
666 
667 	return (head->next->next == head);
668 }
669 EXPORT_SYMBOL(drm_mm_clean);
670 
671 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
672 {
673 	INIT_LIST_HEAD(&mm->hole_stack);
674 	INIT_LIST_HEAD(&mm->unused_nodes);
675 	mm->num_unused = 0;
676 	mm->scanned_blocks = 0;
677 	spin_init(&mm->unused_lock, "drmmminit");
678 
679 	/* Clever trick to avoid a special case in the free hole tracking. */
680 	INIT_LIST_HEAD(&mm->head_node.node_list);
681 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
682 	mm->head_node.hole_follows = 1;
683 	mm->head_node.scanned_block = 0;
684 	mm->head_node.scanned_prev_free = 0;
685 	mm->head_node.scanned_next_free = 0;
686 	mm->head_node.mm = mm;
687 	mm->head_node.start = start + size;
688 	mm->head_node.size = start - mm->head_node.start;
689 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
690 
691 	mm->color_adjust = NULL;
692 }
693 EXPORT_SYMBOL(drm_mm_init);
694 
695 void drm_mm_takedown(struct drm_mm * mm)
696 {
697 	struct drm_mm_node *entry, *next;
698 
699 	if (WARN(!list_empty(&mm->head_node.node_list),
700 		 "Memory manager not clean. Delaying takedown\n")) {
701 		return;
702 	}
703 
704 	spin_lock(&mm->unused_lock);
705 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
706 		list_del(&entry->node_list);
707 		kfree(entry);
708 		--mm->num_unused;
709 	}
710 	spin_unlock(&mm->unused_lock);
711 
712 	BUG_ON(mm->num_unused != 0);
713 }
714 EXPORT_SYMBOL(drm_mm_takedown);
715 
716 static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
717 				       const char *prefix)
718 {
719 	unsigned long hole_start, hole_end, hole_size;
720 
721 	if (entry->hole_follows) {
722 		hole_start = drm_mm_hole_node_start(entry);
723 		hole_end = drm_mm_hole_node_end(entry);
724 		hole_size = hole_end - hole_start;
725 		kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n",
726 			prefix, hole_start, hole_end,
727 			hole_size);
728 		return hole_size;
729 	}
730 
731 	return 0;
732 }
733 
734 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
735 {
736 	struct drm_mm_node *entry;
737 	unsigned long total_used = 0, total_free = 0, total = 0;
738 
739 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
740 
741 	drm_mm_for_each_node(entry, mm) {
742 		kprintf("%s 0x%08lx-0x%08lx: %8lu: used\n",
743 			prefix, entry->start, entry->start + entry->size,
744 			entry->size);
745 		total_used += entry->size;
746 		total_free += drm_mm_debug_hole(entry, prefix);
747 	}
748 	total = total_free + total_used;
749 
750 	kprintf("%s total: %lu, used %lu free %lu\n", prefix, total,
751 		total_used, total_free);
752 }
753 EXPORT_SYMBOL(drm_mm_debug_table);
754 
755 #if defined(CONFIG_DEBUG_FS)
756 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
757 {
758 	unsigned long hole_start, hole_end, hole_size;
759 
760 	if (entry->hole_follows) {
761 		hole_start = drm_mm_hole_node_start(entry);
762 		hole_end = drm_mm_hole_node_end(entry);
763 		hole_size = hole_end - hole_start;
764 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
765 				hole_start, hole_end, hole_size);
766 		return hole_size;
767 	}
768 
769 	return 0;
770 }
771 
772 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
773 {
774 	struct drm_mm_node *entry;
775 	unsigned long total_used = 0, total_free = 0, total = 0;
776 
777 	total_free += drm_mm_dump_hole(m, &mm->head_node);
778 
779 	drm_mm_for_each_node(entry, mm) {
780 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
781 				entry->start, entry->start + entry->size,
782 				entry->size);
783 		total_used += entry->size;
784 		total_free += drm_mm_dump_hole(m, entry);
785 	}
786 	total = total_free + total_used;
787 
788 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
789 	return 0;
790 }
791 EXPORT_SYMBOL(drm_mm_dump_table);
792 #endif
793