xref: /dflybsd-src/sys/dev/drm/drm_mm.c (revision 8d1ea4cc63e8cedbe8f670822d4606adf2cf7d45)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 
50 #define MM_UNUSED_TARGET 4
51 
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54 	struct drm_mm_node *child;
55 
56 	if (atomic)
57 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 	else
59 		child = kzalloc(sizeof(*child), GFP_KERNEL);
60 
61 	if (unlikely(child == NULL)) {
62 		spin_lock(&mm->unused_lock);
63 		if (list_empty(&mm->unused_nodes))
64 			child = NULL;
65 		else {
66 			child =
67 			    list_entry(mm->unused_nodes.next,
68 				       struct drm_mm_node, node_list);
69 			list_del(&child->node_list);
70 			--mm->num_unused;
71 		}
72 		spin_unlock(&mm->unused_lock);
73 	}
74 	return child;
75 }
76 
77 int drm_mm_pre_get(struct drm_mm *mm)
78 {
79 	struct drm_mm_node *node;
80 
81 	spin_lock(&mm->unused_lock);
82 	while (mm->num_unused < MM_UNUSED_TARGET) {
83 		spin_unlock(&mm->unused_lock);
84 		node = kzalloc(sizeof(*node), GFP_KERNEL);
85 		spin_lock(&mm->unused_lock);
86 
87 		if (unlikely(node == NULL)) {
88 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
89 			spin_unlock(&mm->unused_lock);
90 			return ret;
91 		}
92 		++mm->num_unused;
93 		list_add_tail(&node->node_list, &mm->unused_nodes);
94 	}
95 	spin_unlock(&mm->unused_lock);
96 	return 0;
97 }
98 
99 /**
100  * DOC: Overview
101  *
102  * drm_mm provides a simple range allocator. The drivers are free to use the
103  * resource allocator from the linux core if it suits them, the upside of drm_mm
104  * is that it's in the DRM core. Which means that it's easier to extend for
105  * some of the crazier special purpose needs of gpus.
106  *
107  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
108  * Drivers are free to embed either of them into their own suitable
109  * datastructures. drm_mm itself will not do any allocations of its own, so if
110  * drivers choose not to embed nodes they need to still allocate them
111  * themselves.
112  *
113  * The range allocator also supports reservation of preallocated blocks. This is
114  * useful for taking over initial mode setting configurations from the firmware,
115  * where an object needs to be created which exactly matches the firmware's
116  * scanout target. As long as the range is still free it can be inserted anytime
117  * after the allocator is initialized, which helps with avoiding looped
118  * depencies in the driver load sequence.
119  *
120  * drm_mm maintains a stack of most recently freed holes, which of all
121  * simplistic datastructures seems to be a fairly decent approach to clustering
122  * allocations and avoiding too much fragmentation. This means free space
123  * searches are O(num_holes). Given that all the fancy features drm_mm supports
124  * something better would be fairly complex and since gfx thrashing is a fairly
125  * steep cliff not a real concern. Removing a node again is O(1).
126  *
127  * drm_mm supports a few features: Alignment and range restrictions can be
128  * supplied. Further more every &drm_mm_node has a color value (which is just an
129  * opaqua unsigned long) which in conjunction with a driver callback can be used
130  * to implement sophisticated placement restrictions. The i915 DRM driver uses
131  * this to implement guard pages between incompatible caching domains in the
132  * graphics TT.
133  *
134  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
135  * The default is bottom-up. Top-down allocation can be used if the memory area
136  * has different restrictions, or just to reduce fragmentation.
137  *
138  * Finally iteration helpers to walk all nodes and all holes are provided as are
139  * some basic allocator dumpers for debugging.
140  */
141 
142 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
143 						u64 size,
144 						unsigned alignment,
145 						unsigned long color,
146 						u64 start,
147 						u64 end,
148 						enum drm_mm_search_flags flags);
149 
150 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
151 				 struct drm_mm_node *node,
152 				 u64 size, unsigned alignment,
153 				 unsigned long color,
154 				 enum drm_mm_allocator_flags flags)
155 {
156 	struct drm_mm *mm = hole_node->mm;
157 	u64 hole_start = drm_mm_hole_node_start(hole_node);
158 	u64 hole_end = drm_mm_hole_node_end(hole_node);
159 	u64 adj_start = hole_start;
160 	u64 adj_end = hole_end;
161 
162 	BUG_ON(node->allocated);
163 
164 	if (mm->color_adjust)
165 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
166 
167 	if (flags & DRM_MM_CREATE_TOP)
168 		adj_start = adj_end - size;
169 
170 	if (alignment) {
171 		u64 tmp = adj_start;
172 		unsigned rem;
173 
174 		rem = do_div(tmp, alignment);
175 		if (rem) {
176 			if (flags & DRM_MM_CREATE_TOP)
177 				adj_start -= rem;
178 			else
179 				adj_start += alignment - rem;
180 		}
181 	}
182 
183 	BUG_ON(adj_start < hole_start);
184 	BUG_ON(adj_end > hole_end);
185 
186 	if (adj_start == hole_start) {
187 		hole_node->hole_follows = 0;
188 		list_del(&hole_node->hole_stack);
189 	}
190 
191 	node->start = adj_start;
192 	node->size = size;
193 	node->mm = mm;
194 	node->color = color;
195 	node->allocated = 1;
196 
197 	INIT_LIST_HEAD(&node->hole_stack);
198 	list_add(&node->node_list, &hole_node->node_list);
199 
200 	BUG_ON(node->start + node->size > adj_end);
201 
202 	node->hole_follows = 0;
203 	if (__drm_mm_hole_node_start(node) < hole_end) {
204 		list_add(&node->hole_stack, &mm->hole_stack);
205 		node->hole_follows = 1;
206 	}
207 }
208 
209 /**
210  * drm_mm_reserve_node - insert an pre-initialized node
211  * @mm: drm_mm allocator to insert @node into
212  * @node: drm_mm_node to insert
213  *
214  * This functions inserts an already set-up drm_mm_node into the allocator,
215  * meaning that start, size and color must be set by the caller. This is useful
216  * to initialize the allocator with preallocated objects which must be set-up
217  * before the range allocator can be set-up, e.g. when taking over a firmware
218  * framebuffer.
219  *
220  * Returns:
221  * 0 on success, -ENOSPC if there's no hole where @node is.
222  */
223 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
224 {
225 	struct drm_mm_node *hole;
226 	u64 end = node->start + node->size;
227 	u64 hole_start;
228 	u64 hole_end;
229 
230 	BUG_ON(node == NULL);
231 
232 	/* Find the relevant hole to add our node to */
233 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
234 		if (hole_start > node->start || hole_end < end)
235 			continue;
236 
237 		node->mm = mm;
238 		node->allocated = 1;
239 
240 		INIT_LIST_HEAD(&node->hole_stack);
241 		list_add(&node->node_list, &hole->node_list);
242 
243 		if (node->start == hole_start) {
244 			hole->hole_follows = 0;
245 			list_del_init(&hole->hole_stack);
246 		}
247 
248 		node->hole_follows = 0;
249 		if (end != hole_end) {
250 			list_add(&node->hole_stack, &mm->hole_stack);
251 			node->hole_follows = 1;
252 		}
253 
254 		return 0;
255 	}
256 
257 	return -ENOSPC;
258 }
259 EXPORT_SYMBOL(drm_mm_reserve_node);
260 
261 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
262 					     unsigned long size,
263 					     unsigned alignment,
264 					     unsigned long color,
265 					     int atomic)
266 {
267 	struct drm_mm_node *node;
268 
269 	node = drm_mm_kmalloc(hole_node->mm, atomic);
270 	if (unlikely(node == NULL))
271 		return NULL;
272 
273 	drm_mm_insert_helper(hole_node, node, size, alignment, color, DRM_MM_CREATE_DEFAULT);
274 
275 	return node;
276 }
277 
278 /**
279  * drm_mm_insert_node_generic - search for space and insert @node
280  * @mm: drm_mm to allocate from
281  * @node: preallocate node to insert
282  * @size: size of the allocation
283  * @alignment: alignment of the allocation
284  * @color: opaque tag value to use for this node
285  * @sflags: flags to fine-tune the allocation search
286  * @aflags: flags to fine-tune the allocation behavior
287  *
288  * The preallocated node must be cleared to 0.
289  *
290  * Returns:
291  * 0 on success, -ENOSPC if there's no suitable hole.
292  */
293 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
294 			       u64 size, unsigned alignment,
295 			       unsigned long color,
296 			       enum drm_mm_search_flags sflags,
297 			       enum drm_mm_allocator_flags aflags)
298 {
299 	struct drm_mm_node *hole_node;
300 
301 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
302 					       color, sflags);
303 	if (!hole_node)
304 		return -ENOSPC;
305 
306 	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
307 	return 0;
308 }
309 EXPORT_SYMBOL(drm_mm_insert_node_generic);
310 
311 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
312 				       struct drm_mm_node *node,
313 				       u64 size, unsigned alignment,
314 				       unsigned long color,
315 				       u64 start, u64 end,
316 				       enum drm_mm_allocator_flags flags)
317 {
318 	struct drm_mm *mm = hole_node->mm;
319 	u64 hole_start = drm_mm_hole_node_start(hole_node);
320 	u64 hole_end = drm_mm_hole_node_end(hole_node);
321 	u64 adj_start = hole_start;
322 	u64 adj_end = hole_end;
323 
324 	BUG_ON(!hole_node->hole_follows || node->allocated);
325 
326 	if (adj_start < start)
327 		adj_start = start;
328 	if (adj_end > end)
329 		adj_end = end;
330 
331 	if (mm->color_adjust)
332 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
333 
334 	if (flags & DRM_MM_CREATE_TOP)
335 		adj_start = adj_end - size;
336 
337 	if (alignment) {
338 		u64 tmp = adj_start;
339 		unsigned rem;
340 
341 		rem = do_div(tmp, alignment);
342 		if (rem) {
343 			if (flags & DRM_MM_CREATE_TOP)
344 				adj_start -= rem;
345 			else
346 				adj_start += alignment - rem;
347 		}
348 	}
349 
350 	if (adj_start == hole_start) {
351 		hole_node->hole_follows = 0;
352 		list_del(&hole_node->hole_stack);
353 	}
354 
355 	node->start = adj_start;
356 	node->size = size;
357 	node->mm = mm;
358 	node->color = color;
359 	node->allocated = 1;
360 
361 	INIT_LIST_HEAD(&node->hole_stack);
362 	list_add(&node->node_list, &hole_node->node_list);
363 
364 	BUG_ON(node->start < start);
365 	BUG_ON(node->start < adj_start);
366 	BUG_ON(node->start + node->size > adj_end);
367 	BUG_ON(node->start + node->size > end);
368 
369 	node->hole_follows = 0;
370 	if (__drm_mm_hole_node_start(node) < hole_end) {
371 		list_add(&node->hole_stack, &mm->hole_stack);
372 		node->hole_follows = 1;
373 	}
374 }
375 
376 /**
377  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
378  * @mm: drm_mm to allocate from
379  * @node: preallocate node to insert
380  * @size: size of the allocation
381  * @alignment: alignment of the allocation
382  * @color: opaque tag value to use for this node
383  * @start: start of the allowed range for this node
384  * @end: end of the allowed range for this node
385  * @sflags: flags to fine-tune the allocation search
386  * @aflags: flags to fine-tune the allocation behavior
387  *
388  * The preallocated node must be cleared to 0.
389  *
390  * Returns:
391  * 0 on success, -ENOSPC if there's no suitable hole.
392  */
393 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
394 					u64 size, unsigned alignment,
395 					unsigned long color,
396 					u64 start, u64 end,
397 					enum drm_mm_search_flags sflags,
398 					enum drm_mm_allocator_flags aflags)
399 {
400 	struct drm_mm_node *hole_node;
401 
402 	hole_node = drm_mm_search_free_in_range_generic(mm,
403 							size, alignment, color,
404 							start, end, sflags);
405 	if (!hole_node)
406 		return -ENOSPC;
407 
408 	drm_mm_insert_helper_range(hole_node, node,
409 				   size, alignment, color,
410 				   start, end, aflags);
411 	return 0;
412 }
413 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
414 
415 /**
416  * drm_mm_remove_node - Remove a memory node from the allocator.
417  * @node: drm_mm_node to remove
418  *
419  * This just removes a node from its drm_mm allocator. The node does not need to
420  * be cleared again before it can be re-inserted into this or any other drm_mm
421  * allocator. It is a bug to call this function on a un-allocated node.
422  */
423 void drm_mm_remove_node(struct drm_mm_node *node)
424 {
425 	struct drm_mm *mm = node->mm;
426 	struct drm_mm_node *prev_node;
427 
428 	if (WARN_ON(!node->allocated))
429 		return;
430 
431 	BUG_ON(node->scanned_block || node->scanned_prev_free
432 				   || node->scanned_next_free);
433 
434 	prev_node =
435 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
436 
437 	if (node->hole_follows) {
438 		BUG_ON(__drm_mm_hole_node_start(node) ==
439 		       __drm_mm_hole_node_end(node));
440 		list_del(&node->hole_stack);
441 	} else
442 		BUG_ON(__drm_mm_hole_node_start(node) !=
443 		       __drm_mm_hole_node_end(node));
444 
445 
446 	if (!prev_node->hole_follows) {
447 		prev_node->hole_follows = 1;
448 		list_add(&prev_node->hole_stack, &mm->hole_stack);
449 	} else
450 		list_move(&prev_node->hole_stack, &mm->hole_stack);
451 
452 	list_del(&node->node_list);
453 	node->allocated = 0;
454 }
455 EXPORT_SYMBOL(drm_mm_remove_node);
456 
457 /*
458  * Remove a memory node from the allocator and free the allocated struct
459  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
460  * drm_mm_get_block functions.
461  */
462 void drm_mm_put_block(struct drm_mm_node *node)
463 {
464 
465 	struct drm_mm *mm = node->mm;
466 
467 	drm_mm_remove_node(node);
468 
469 	spin_lock(&mm->unused_lock);
470 	if (mm->num_unused < MM_UNUSED_TARGET) {
471 		list_add(&node->node_list, &mm->unused_nodes);
472 		++mm->num_unused;
473 	} else
474 		kfree(node);
475 	spin_unlock(&mm->unused_lock);
476 }
477 
478 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
479 {
480 	if (end - start < size)
481 		return 0;
482 
483 	if (alignment) {
484 		u64 tmp = start;
485 		unsigned rem;
486 
487 		rem = do_div(tmp, alignment);
488 		if (rem)
489 			start += alignment - rem;
490 	}
491 
492 	return end >= start + size;
493 }
494 
495 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
496 						      u64 size,
497 						      unsigned alignment,
498 						      unsigned long color,
499 						      enum drm_mm_search_flags flags)
500 {
501 	struct drm_mm_node *entry;
502 	struct drm_mm_node *best;
503 	u64 adj_start;
504 	u64 adj_end;
505 	u64 best_size;
506 
507 	BUG_ON(mm->scanned_blocks);
508 
509 	best = NULL;
510 	best_size = ~0UL;
511 
512 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
513 			       flags & DRM_MM_SEARCH_BELOW) {
514 		u64 hole_size = adj_end - adj_start;
515 
516 		if (mm->color_adjust) {
517 			mm->color_adjust(entry, color, &adj_start, &adj_end);
518 			if (adj_end <= adj_start)
519 				continue;
520 		}
521 
522 		if (!check_free_hole(adj_start, adj_end, size, alignment))
523 			continue;
524 
525 		if (!(flags & DRM_MM_SEARCH_BEST))
526 			return entry;
527 
528 		if (hole_size < best_size) {
529 			best = entry;
530 			best_size = hole_size;
531 		}
532 	}
533 
534 	return best;
535 }
536 
537 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
538 							u64 size,
539 							unsigned alignment,
540 							unsigned long color,
541 							u64 start,
542 							u64 end,
543 							enum drm_mm_search_flags flags)
544 {
545 	struct drm_mm_node *entry;
546 	struct drm_mm_node *best;
547 	u64 adj_start;
548 	u64 adj_end;
549 	u64 best_size;
550 
551 	BUG_ON(mm->scanned_blocks);
552 
553 	best = NULL;
554 	best_size = ~0UL;
555 
556 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
557 			       flags & DRM_MM_SEARCH_BELOW) {
558 		u64 hole_size = adj_end - adj_start;
559 
560 		if (adj_start < start)
561 			adj_start = start;
562 		if (adj_end > end)
563 			adj_end = end;
564 
565 		if (mm->color_adjust) {
566 			mm->color_adjust(entry, color, &adj_start, &adj_end);
567 			if (adj_end <= adj_start)
568 				continue;
569 		}
570 
571 		if (!check_free_hole(adj_start, adj_end, size, alignment))
572 			continue;
573 
574 		if (!(flags & DRM_MM_SEARCH_BEST))
575 			return entry;
576 
577 		if (hole_size < best_size) {
578 			best = entry;
579 			best_size = hole_size;
580 		}
581 	}
582 
583 	return best;
584 }
585 
586 /**
587  * drm_mm_replace_node - move an allocation from @old to @new
588  * @old: drm_mm_node to remove from the allocator
589  * @new: drm_mm_node which should inherit @old's allocation
590  *
591  * This is useful for when drivers embed the drm_mm_node structure and hence
592  * can't move allocations by reassigning pointers. It's a combination of remove
593  * and insert with the guarantee that the allocation start will match.
594  */
595 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
596 {
597 	list_replace(&old->node_list, &new->node_list);
598 	list_replace(&old->hole_stack, &new->hole_stack);
599 	new->hole_follows = old->hole_follows;
600 	new->mm = old->mm;
601 	new->start = old->start;
602 	new->size = old->size;
603 	new->color = old->color;
604 
605 	old->allocated = 0;
606 	new->allocated = 1;
607 }
608 EXPORT_SYMBOL(drm_mm_replace_node);
609 
610 /**
611  * DOC: lru scan roaster
612  *
613  * Very often GPUs need to have continuous allocations for a given object. When
614  * evicting objects to make space for a new one it is therefore not most
615  * efficient when we simply start to select all objects from the tail of an LRU
616  * until there's a suitable hole: Especially for big objects or nodes that
617  * otherwise have special allocation constraints there's a good chance we evict
618  * lots of (smaller) objects unecessarily.
619  *
620  * The DRM range allocator supports this use-case through the scanning
621  * interfaces. First a scan operation needs to be initialized with
622  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
623  * objects to the roaster (probably by walking an LRU list, but this can be
624  * freely implemented) until a suitable hole is found or there's no further
625  * evitable object.
626  *
627  * The the driver must walk through all objects again in exactly the reverse
628  * order to restore the allocator state. Note that while the allocator is used
629  * in the scan mode no other operation is allowed.
630  *
631  * Finally the driver evicts all objects selected in the scan. Adding and
632  * removing an object is O(1), and since freeing a node is also O(1) the overall
633  * complexity is O(scanned_objects). So like the free stack which needs to be
634  * walked before a scan operation even begins this is linear in the number of
635  * objects. It doesn't seem to hurt badly.
636  */
637 
638 /**
639  * drm_mm_init_scan - initialize lru scanning
640  * @mm: drm_mm to scan
641  * @size: size of the allocation
642  * @alignment: alignment of the allocation
643  * @color: opaque tag value to use for the allocation
644  *
645  * This simply sets up the scanning routines with the parameters for the desired
646  * hole. Note that there's no need to specify allocation flags, since they only
647  * change the place a node is allocated from within a suitable hole.
648  *
649  * Warning:
650  * As long as the scan list is non-empty, no other operations than
651  * adding/removing nodes to/from the scan list are allowed.
652  */
653 void drm_mm_init_scan(struct drm_mm *mm,
654 		      u64 size,
655 		      unsigned alignment,
656 		      unsigned long color)
657 {
658 	mm->scan_color = color;
659 	mm->scan_alignment = alignment;
660 	mm->scan_size = size;
661 	mm->scanned_blocks = 0;
662 	mm->scan_hit_start = 0;
663 	mm->scan_hit_end = 0;
664 	mm->scan_check_range = 0;
665 	mm->prev_scanned_node = NULL;
666 }
667 EXPORT_SYMBOL(drm_mm_init_scan);
668 
669 /**
670  * drm_mm_init_scan - initialize range-restricted lru scanning
671  * @mm: drm_mm to scan
672  * @size: size of the allocation
673  * @alignment: alignment of the allocation
674  * @color: opaque tag value to use for the allocation
675  * @start: start of the allowed range for the allocation
676  * @end: end of the allowed range for the allocation
677  *
678  * This simply sets up the scanning routines with the parameters for the desired
679  * hole. Note that there's no need to specify allocation flags, since they only
680  * change the place a node is allocated from within a suitable hole.
681  *
682  * Warning:
683  * As long as the scan list is non-empty, no other operations than
684  * adding/removing nodes to/from the scan list are allowed.
685  */
686 void drm_mm_init_scan_with_range(struct drm_mm *mm,
687 				 u64 size,
688 				 unsigned alignment,
689 				 unsigned long color,
690 				 u64 start,
691 				 u64 end)
692 {
693 	mm->scan_color = color;
694 	mm->scan_alignment = alignment;
695 	mm->scan_size = size;
696 	mm->scanned_blocks = 0;
697 	mm->scan_hit_start = 0;
698 	mm->scan_hit_end = 0;
699 	mm->scan_start = start;
700 	mm->scan_end = end;
701 	mm->scan_check_range = 1;
702 	mm->prev_scanned_node = NULL;
703 }
704 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
705 
706 /**
707  * drm_mm_scan_add_block - add a node to the scan list
708  * @node: drm_mm_node to add
709  *
710  * Add a node to the scan list that might be freed to make space for the desired
711  * hole.
712  *
713  * Returns:
714  * True if a hole has been found, false otherwise.
715  */
716 bool drm_mm_scan_add_block(struct drm_mm_node *node)
717 {
718 	struct drm_mm *mm = node->mm;
719 	struct drm_mm_node *prev_node;
720 	u64 hole_start, hole_end;
721 	u64 adj_start, adj_end;
722 
723 	mm->scanned_blocks++;
724 
725 	BUG_ON(node->scanned_block);
726 	node->scanned_block = 1;
727 
728 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
729 			       node_list);
730 
731 	node->scanned_preceeds_hole = prev_node->hole_follows;
732 	prev_node->hole_follows = 1;
733 	list_del(&node->node_list);
734 	node->node_list.prev = &prev_node->node_list;
735 	node->node_list.next = &mm->prev_scanned_node->node_list;
736 	mm->prev_scanned_node = node;
737 
738 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
739 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
740 
741 	if (mm->scan_check_range) {
742 		if (adj_start < mm->scan_start)
743 			adj_start = mm->scan_start;
744 		if (adj_end > mm->scan_end)
745 			adj_end = mm->scan_end;
746 	}
747 
748 	if (mm->color_adjust)
749 		mm->color_adjust(prev_node, mm->scan_color,
750 				 &adj_start, &adj_end);
751 
752 	if (check_free_hole(adj_start, adj_end,
753 			    mm->scan_size, mm->scan_alignment)) {
754 		mm->scan_hit_start = hole_start;
755 		mm->scan_hit_end = hole_end;
756 		return true;
757 	}
758 
759 	return false;
760 }
761 EXPORT_SYMBOL(drm_mm_scan_add_block);
762 
763 /**
764  * drm_mm_scan_remove_block - remove a node from the scan list
765  * @node: drm_mm_node to remove
766  *
767  * Nodes _must_ be removed in the exact same order from the scan list as they
768  * have been added, otherwise the internal state of the memory manager will be
769  * corrupted.
770  *
771  * When the scan list is empty, the selected memory nodes can be freed. An
772  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
773  * return the just freed block (because its at the top of the free_stack list).
774  *
775  * Returns:
776  * True if this block should be evicted, false otherwise. Will always
777  * return false when no hole has been found.
778  */
779 bool drm_mm_scan_remove_block(struct drm_mm_node *node)
780 {
781 	struct drm_mm *mm = node->mm;
782 	struct drm_mm_node *prev_node;
783 
784 	mm->scanned_blocks--;
785 
786 	BUG_ON(!node->scanned_block);
787 	node->scanned_block = 0;
788 
789 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
790 			       node_list);
791 
792 	prev_node->hole_follows = node->scanned_preceeds_hole;
793 	list_add(&node->node_list, &prev_node->node_list);
794 
795 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
796 		 node->start < mm->scan_hit_end);
797 }
798 EXPORT_SYMBOL(drm_mm_scan_remove_block);
799 
800 /**
801  * drm_mm_clean - checks whether an allocator is clean
802  * @mm: drm_mm allocator to check
803  *
804  * Returns:
805  * True if the allocator is completely free, false if there's still a node
806  * allocated in it.
807  */
808 bool drm_mm_clean(struct drm_mm * mm)
809 {
810 	struct list_head *head = &mm->head_node.node_list;
811 
812 	return (head->next->next == head);
813 }
814 EXPORT_SYMBOL(drm_mm_clean);
815 
816 /**
817  * drm_mm_init - initialize a drm-mm allocator
818  * @mm: the drm_mm structure to initialize
819  * @start: start of the range managed by @mm
820  * @size: end of the range managed by @mm
821  *
822  * Note that @mm must be cleared to 0 before calling this function.
823  */
824 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
825 {
826 	INIT_LIST_HEAD(&mm->hole_stack);
827 	INIT_LIST_HEAD(&mm->unused_nodes);
828 	mm->num_unused = 0;
829 	mm->scanned_blocks = 0;
830 
831 	/* Clever trick to avoid a special case in the free hole tracking. */
832 	INIT_LIST_HEAD(&mm->head_node.node_list);
833 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
834 	mm->head_node.hole_follows = 1;
835 	mm->head_node.scanned_block = 0;
836 	mm->head_node.scanned_prev_free = 0;
837 	mm->head_node.scanned_next_free = 0;
838 	mm->head_node.mm = mm;
839 	mm->head_node.start = start + size;
840 	mm->head_node.size = start - mm->head_node.start;
841 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
842 
843 	mm->color_adjust = NULL;
844 }
845 EXPORT_SYMBOL(drm_mm_init);
846 
847 /**
848  * drm_mm_takedown - clean up a drm_mm allocator
849  * @mm: drm_mm allocator to clean up
850  *
851  * Note that it is a bug to call this function on an allocator which is not
852  * clean.
853  */
854 void drm_mm_takedown(struct drm_mm * mm)
855 {
856 	WARN(!list_empty(&mm->head_node.node_list),
857 	     "Memory manager not clean during takedown.\n");
858 }
859 EXPORT_SYMBOL(drm_mm_takedown);
860 
861 static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
862 				     const char *prefix)
863 {
864 	u64 hole_start, hole_end, hole_size;
865 
866 	if (entry->hole_follows) {
867 		hole_start = drm_mm_hole_node_start(entry);
868 		hole_end = drm_mm_hole_node_end(entry);
869 		hole_size = hole_end - hole_start;
870 		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
871 			 hole_end, hole_size);
872 		return hole_size;
873 	}
874 
875 	return 0;
876 }
877 
878 /**
879  * drm_mm_debug_table - dump allocator state to dmesg
880  * @mm: drm_mm allocator to dump
881  * @prefix: prefix to use for dumping to dmesg
882  */
883 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
884 {
885 	struct drm_mm_node *entry;
886 	u64 total_used = 0, total_free = 0, total = 0;
887 
888 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
889 
890 	drm_mm_for_each_node(entry, mm) {
891 		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
892 			 entry->start + entry->size, entry->size);
893 		total_used += entry->size;
894 		total_free += drm_mm_debug_hole(entry, prefix);
895 	}
896 	total = total_free + total_used;
897 
898 	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
899 		 total_used, total_free);
900 }
901 EXPORT_SYMBOL(drm_mm_debug_table);
902 
903 #if defined(CONFIG_DEBUG_FS)
904 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
905 {
906 	u64 hole_start, hole_end, hole_size;
907 
908 	if (entry->hole_follows) {
909 		hole_start = drm_mm_hole_node_start(entry);
910 		hole_end = drm_mm_hole_node_end(entry);
911 		hole_size = hole_end - hole_start;
912 		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
913 			   hole_end, hole_size);
914 		return hole_size;
915 	}
916 
917 	return 0;
918 }
919 
920 /**
921  * drm_mm_dump_table - dump allocator state to a seq_file
922  * @m: seq_file to dump to
923  * @mm: drm_mm allocator to dump
924  */
925 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
926 {
927 	struct drm_mm_node *entry;
928 	u64 total_used = 0, total_free = 0, total = 0;
929 
930 	total_free += drm_mm_dump_hole(m, &mm->head_node);
931 
932 	drm_mm_for_each_node(entry, mm) {
933 		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
934 			   entry->start + entry->size, entry->size);
935 		total_used += entry->size;
936 		total_free += drm_mm_dump_hole(m, entry);
937 	}
938 	total = total_free + total_used;
939 
940 	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
941 		   total_used, total_free);
942 	return 0;
943 }
944 EXPORT_SYMBOL(drm_mm_dump_table);
945 #endif
946