xref: /openbsd-src/sys/dev/pci/drm/drm_mm.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * Copyright 2016 Intel Corporation
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  **************************************************************************/
29 
30 /*
31  * Generic simple memory manager implementation. Intended to be used as a base
32  * class implementation for more advanced memory managers.
33  *
34  * Note that the algorithm used is quite simple and there might be substantial
35  * performance gains if a smarter free list is implemented. Currently it is
36  * just an unordered stack of free regions. This could easily be improved if
37  * an RB-tree is used instead. At least if we expect heavy fragmentation.
38  *
39  * Aligned allocations can also see improvement.
40  *
41  * Authors:
42  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43  */
44 
45 #include <drm/drmP.h>
46 #include <drm/drm_mm.h>
47 #include <linux/slab.h>
48 #include <linux/seq_file.h>
49 #include <linux/export.h>
50 #include <linux/interval_tree_generic.h>
51 
52 /**
53  * DOC: Overview
54  *
55  * drm_mm provides a simple range allocator. The drivers are free to use the
56  * resource allocator from the linux core if it suits them, the upside of drm_mm
57  * is that it's in the DRM core. Which means that it's easier to extend for
58  * some of the crazier special purpose needs of gpus.
59  *
60  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61  * Drivers are free to embed either of them into their own suitable
62  * datastructures. drm_mm itself will not do any memory allocations of its own,
63  * so if drivers choose not to embed nodes they need to still allocate them
64  * themselves.
65  *
66  * The range allocator also supports reservation of preallocated blocks. This is
67  * useful for taking over initial mode setting configurations from the firmware,
68  * where an object needs to be created which exactly matches the firmware's
69  * scanout target. As long as the range is still free it can be inserted anytime
70  * after the allocator is initialized, which helps with avoiding looped
71  * dependencies in the driver load sequence.
72  *
73  * drm_mm maintains a stack of most recently freed holes, which of all
74  * simplistic datastructures seems to be a fairly decent approach to clustering
75  * allocations and avoiding too much fragmentation. This means free space
76  * searches are O(num_holes). Given that all the fancy features drm_mm supports
77  * something better would be fairly complex and since gfx thrashing is a fairly
78  * steep cliff not a real concern. Removing a node again is O(1).
79  *
80  * drm_mm supports a few features: Alignment and range restrictions can be
81  * supplied. Furthermore every &drm_mm_node has a color value (which is just an
82  * opaque unsigned long) which in conjunction with a driver callback can be used
83  * to implement sophisticated placement restrictions. The i915 DRM driver uses
84  * this to implement guard pages between incompatible caching domains in the
85  * graphics TT.
86  *
87  * Two behaviors are supported for searching and allocating: bottom-up and
88  * top-down. The default is bottom-up. Top-down allocation can be used if the
89  * memory area has different restrictions, or just to reduce fragmentation.
90  *
91  * Finally iteration helpers to walk all nodes and all holes are provided as are
92  * some basic allocator dumpers for debugging.
93  *
94  * Note that this range allocator is not thread-safe, drivers need to protect
95  * modifications with their on locking. The idea behind this is that for a full
96  * memory manager additional data needs to be protected anyway, hence internal
97  * locking would be fully redundant.
98  */
99 
100 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
101 						u64 size,
102 						u64 alignment,
103 						unsigned long color,
104 						u64 start,
105 						u64 end,
106 						enum drm_mm_search_flags flags);
107 
108 #ifdef CONFIG_DRM_DEBUG_MM
109 #include <linux/stackdepot.h>
110 
111 #define STACKDEPTH 32
112 #define BUFSZ 4096
113 
114 static noinline void save_stack(struct drm_mm_node *node)
115 {
116 	unsigned long entries[STACKDEPTH];
117 	struct stack_trace trace = {
118 		.entries = entries,
119 		.max_entries = STACKDEPTH,
120 		.skip = 1
121 	};
122 
123 	save_stack_trace(&trace);
124 	if (trace.nr_entries != 0 &&
125 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
126 		trace.nr_entries--;
127 
128 	/* May be called under spinlock, so avoid sleeping */
129 	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
130 }
131 
132 static void show_leaks(struct drm_mm *mm)
133 {
134 	struct drm_mm_node *node;
135 	unsigned long entries[STACKDEPTH];
136 	char *buf;
137 
138 	buf = kmalloc(BUFSZ, GFP_KERNEL);
139 	if (!buf)
140 		return;
141 
142 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
143 		struct stack_trace trace = {
144 			.entries = entries,
145 			.max_entries = STACKDEPTH
146 		};
147 
148 		if (!node->stack) {
149 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
150 				  node->start, node->size);
151 			continue;
152 		}
153 
154 		depot_fetch_stack(node->stack, &trace);
155 		snprint_stack_trace(buf, BUFSZ, &trace, 0);
156 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
157 			  node->start, node->size, buf);
158 	}
159 
160 	kfree(buf);
161 }
162 
163 #undef STACKDEPTH
164 #undef BUFSZ
165 #else
166 static void save_stack(struct drm_mm_node *node) { }
167 static void show_leaks(struct drm_mm *mm) { }
168 #endif
169 
170 #define START(node) ((node)->start)
171 #define LAST(node)  ((node)->start + (node)->size - 1)
172 
173 #ifdef __linux__
174 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
175 		     u64, __subtree_last,
176 		     START, LAST, static inline, drm_mm_interval_tree)
177 #else
178 struct drm_mm_node *
179 drm_mm_interval_tree_iter_first(struct rb_root *rb, u64 start, u64 last)
180 {
181 	struct drm_mm *mm = container_of(rb, typeof(*mm), interval_tree);
182 	struct drm_mm_node *node;
183 
184 	drm_mm_for_each_node(node, mm) {
185 		if (LAST(node) >= start && START(node) <= last)
186 			return node;
187 	}
188 	return NULL;
189 }
190 #endif
191 
192 struct drm_mm_node *
193 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
194 {
195 	return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
196 					       start, last);
197 }
198 EXPORT_SYMBOL(__drm_mm_interval_first);
199 
200 #ifdef __linux__
201 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
202 					  struct drm_mm_node *node)
203 {
204 	struct drm_mm *mm = hole_node->mm;
205 	struct rb_node **link, *rb;
206 	struct drm_mm_node *parent;
207 
208 	node->__subtree_last = LAST(node);
209 
210 	if (hole_node->allocated) {
211 		rb = &hole_node->rb;
212 		while (rb) {
213 			parent = rb_entry(rb, struct drm_mm_node, rb);
214 			if (parent->__subtree_last >= node->__subtree_last)
215 				break;
216 
217 			parent->__subtree_last = node->__subtree_last;
218 			rb = rb_parent(rb);
219 		}
220 
221 		rb = &hole_node->rb;
222 		link = &hole_node->rb.rb_right;
223 	} else {
224 		rb = NULL;
225 		link = &mm->interval_tree.rb_node;
226 	}
227 
228 	while (*link) {
229 		rb = *link;
230 		parent = rb_entry(rb, struct drm_mm_node, rb);
231 		if (parent->__subtree_last < node->__subtree_last)
232 			parent->__subtree_last = node->__subtree_last;
233 		if (node->start < parent->start)
234 			link = &parent->rb.rb_left;
235 		else
236 			link = &parent->rb.rb_right;
237 	}
238 
239 	rb_link_node(&node->rb, rb, link);
240 	rb_insert_augmented(&node->rb,
241 			    &mm->interval_tree,
242 			    &drm_mm_interval_tree_augment);
243 }
244 #endif
245 
246 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
247 				 struct drm_mm_node *node,
248 				 u64 size, u64 alignment,
249 				 unsigned long color,
250 				 u64 range_start, u64 range_end,
251 				 enum drm_mm_allocator_flags flags)
252 {
253 	struct drm_mm *mm = hole_node->mm;
254 	u64 hole_start = drm_mm_hole_node_start(hole_node);
255 	u64 hole_end = drm_mm_hole_node_end(hole_node);
256 	u64 adj_start = hole_start;
257 	u64 adj_end = hole_end;
258 
259 	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
260 
261 	if (mm->color_adjust)
262 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
263 
264 	adj_start = max(adj_start, range_start);
265 	adj_end = min(adj_end, range_end);
266 
267 	if (flags & DRM_MM_CREATE_TOP)
268 		adj_start = adj_end - size;
269 
270 	if (alignment) {
271 		u64 rem;
272 
273 		div64_u64_rem(adj_start, alignment, &rem);
274 		if (rem) {
275 			if (flags & DRM_MM_CREATE_TOP)
276 				adj_start -= rem;
277 			else
278 				adj_start += alignment - rem;
279 		}
280 	}
281 
282 	if (adj_start == hole_start) {
283 		hole_node->hole_follows = 0;
284 		list_del(&hole_node->hole_stack);
285 	}
286 
287 	node->start = adj_start;
288 	node->size = size;
289 	node->mm = mm;
290 	node->color = color;
291 	node->allocated = 1;
292 
293 	list_add(&node->node_list, &hole_node->node_list);
294 
295 #ifdef __linux__
296 	drm_mm_interval_tree_add_node(hole_node, node);
297 #endif
298 
299 	DRM_MM_BUG_ON(node->start < range_start);
300 	DRM_MM_BUG_ON(node->start < adj_start);
301 	DRM_MM_BUG_ON(node->start + node->size > adj_end);
302 	DRM_MM_BUG_ON(node->start + node->size > range_end);
303 
304 	node->hole_follows = 0;
305 	if (__drm_mm_hole_node_start(node) < hole_end) {
306 		list_add(&node->hole_stack, &mm->hole_stack);
307 		node->hole_follows = 1;
308 	}
309 
310 	save_stack(node);
311 }
312 
313 /**
314  * drm_mm_reserve_node - insert an pre-initialized node
315  * @mm: drm_mm allocator to insert @node into
316  * @node: drm_mm_node to insert
317  *
318  * This functions inserts an already set-up &drm_mm_node into the allocator,
319  * meaning that start, size and color must be set by the caller. All other
320  * fields must be cleared to 0. This is useful to initialize the allocator with
321  * preallocated objects which must be set-up before the range allocator can be
322  * set-up, e.g. when taking over a firmware framebuffer.
323  *
324  * Returns:
325  * 0 on success, -ENOSPC if there's no hole where @node is.
326  */
327 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
328 {
329 	u64 end = node->start + node->size;
330 	struct drm_mm_node *hole;
331 	u64 hole_start, hole_end;
332 	u64 adj_start, adj_end;
333 
334 	end = node->start + node->size;
335 	if (unlikely(end <= node->start))
336 		return -ENOSPC;
337 
338 	/* Find the relevant hole to add our node to */
339 	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
340 					       node->start, ~(u64)0);
341 	if (hole) {
342 		if (hole->start < end)
343 			return -ENOSPC;
344 	} else {
345 		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
346 	}
347 
348 	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
349 	if (!drm_mm_hole_follows(hole))
350 		return -ENOSPC;
351 
352 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
353 	adj_end = hole_end = __drm_mm_hole_node_end(hole);
354 
355 	if (mm->color_adjust)
356 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
357 
358 	if (adj_start > node->start || adj_end < end)
359 		return -ENOSPC;
360 
361 	node->mm = mm;
362 	node->allocated = 1;
363 
364 	list_add(&node->node_list, &hole->node_list);
365 
366 #ifdef __linux__
367 	drm_mm_interval_tree_add_node(hole, node);
368 #endif
369 
370 	if (node->start == hole_start) {
371 		hole->hole_follows = 0;
372 		list_del(&hole->hole_stack);
373 	}
374 
375 	node->hole_follows = 0;
376 	if (end != hole_end) {
377 		list_add(&node->hole_stack, &mm->hole_stack);
378 		node->hole_follows = 1;
379 	}
380 
381 	save_stack(node);
382 
383 	return 0;
384 }
385 EXPORT_SYMBOL(drm_mm_reserve_node);
386 
387 /**
388  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
389  * @mm: drm_mm to allocate from
390  * @node: preallocate node to insert
391  * @size: size of the allocation
392  * @alignment: alignment of the allocation
393  * @color: opaque tag value to use for this node
394  * @start: start of the allowed range for this node
395  * @end: end of the allowed range for this node
396  * @sflags: flags to fine-tune the allocation search
397  * @aflags: flags to fine-tune the allocation behavior
398  *
399  * The preallocated @node must be cleared to 0.
400  *
401  * Returns:
402  * 0 on success, -ENOSPC if there's no suitable hole.
403  */
404 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
405 					u64 size, u64 alignment,
406 					unsigned long color,
407 					u64 start, u64 end,
408 					enum drm_mm_search_flags sflags,
409 					enum drm_mm_allocator_flags aflags)
410 {
411 	struct drm_mm_node *hole_node;
412 
413 	if (WARN_ON(size == 0))
414 		return -EINVAL;
415 
416 	hole_node = drm_mm_search_free_in_range_generic(mm,
417 							size, alignment, color,
418 							start, end, sflags);
419 	if (!hole_node)
420 		return -ENOSPC;
421 
422 	drm_mm_insert_helper(hole_node, node,
423 			     size, alignment, color,
424 			     start, end, aflags);
425 	return 0;
426 }
427 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
428 
429 /**
430  * drm_mm_remove_node - Remove a memory node from the allocator.
431  * @node: drm_mm_node to remove
432  *
433  * This just removes a node from its drm_mm allocator. The node does not need to
434  * be cleared again before it can be re-inserted into this or any other drm_mm
435  * allocator. It is a bug to call this function on a unallocated node.
436  */
437 void drm_mm_remove_node(struct drm_mm_node *node)
438 {
439 	struct drm_mm *mm = node->mm;
440 	struct drm_mm_node *prev_node;
441 
442 	DRM_MM_BUG_ON(!node->allocated);
443 	DRM_MM_BUG_ON(node->scanned_block);
444 
445 	prev_node =
446 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
447 
448 	if (drm_mm_hole_follows(node)) {
449 		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
450 			      __drm_mm_hole_node_end(node));
451 		list_del(&node->hole_stack);
452 	} else {
453 		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
454 			      __drm_mm_hole_node_end(node));
455 	}
456 
457 	if (!drm_mm_hole_follows(prev_node)) {
458 		prev_node->hole_follows = 1;
459 		list_add(&prev_node->hole_stack, &mm->hole_stack);
460 	} else
461 		list_move(&prev_node->hole_stack, &mm->hole_stack);
462 
463 #ifdef __linux__
464 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
465 #endif
466 	list_del(&node->node_list);
467 	node->allocated = 0;
468 }
469 EXPORT_SYMBOL(drm_mm_remove_node);
470 
471 static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
472 {
473 	if (end - start < size)
474 		return 0;
475 
476 	if (alignment) {
477 		u64 rem;
478 
479 		div64_u64_rem(start, alignment, &rem);
480 		if (rem)
481 			start += alignment - rem;
482 	}
483 
484 	return end >= start + size;
485 }
486 
487 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
488 							u64 size,
489 							u64 alignment,
490 							unsigned long color,
491 							u64 start,
492 							u64 end,
493 							enum drm_mm_search_flags flags)
494 {
495 	struct drm_mm_node *entry;
496 	struct drm_mm_node *best;
497 	u64 adj_start;
498 	u64 adj_end;
499 	u64 best_size;
500 
501 	DRM_MM_BUG_ON(mm->scan_active);
502 
503 	best = NULL;
504 	best_size = ~0UL;
505 
506 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
507 			       flags & DRM_MM_SEARCH_BELOW) {
508 		u64 hole_size = adj_end - adj_start;
509 
510 		if (mm->color_adjust) {
511 			mm->color_adjust(entry, color, &adj_start, &adj_end);
512 			if (adj_end <= adj_start)
513 				continue;
514 		}
515 
516 		adj_start = max(adj_start, start);
517 		adj_end = min(adj_end, end);
518 
519 		if (!check_free_hole(adj_start, adj_end, size, alignment))
520 			continue;
521 
522 		if (!(flags & DRM_MM_SEARCH_BEST))
523 			return entry;
524 
525 		if (hole_size < best_size) {
526 			best = entry;
527 			best_size = hole_size;
528 		}
529 	}
530 
531 	return best;
532 }
533 
534 /**
535  * drm_mm_replace_node - move an allocation from @old to @new
536  * @old: drm_mm_node to remove from the allocator
537  * @new: drm_mm_node which should inherit @old's allocation
538  *
539  * This is useful for when drivers embed the drm_mm_node structure and hence
540  * can't move allocations by reassigning pointers. It's a combination of remove
541  * and insert with the guarantee that the allocation start will match.
542  */
543 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
544 {
545 	DRM_MM_BUG_ON(!old->allocated);
546 
547 	list_replace(&old->node_list, &new->node_list);
548 	list_replace(&old->hole_stack, &new->hole_stack);
549 #ifdef __linux__
550 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
551 #endif
552 	new->hole_follows = old->hole_follows;
553 	new->mm = old->mm;
554 	new->start = old->start;
555 	new->size = old->size;
556 	new->color = old->color;
557 	new->__subtree_last = old->__subtree_last;
558 
559 	old->allocated = 0;
560 	new->allocated = 1;
561 }
562 EXPORT_SYMBOL(drm_mm_replace_node);
563 
564 /**
565  * DOC: lru scan roster
566  *
567  * Very often GPUs need to have continuous allocations for a given object. When
568  * evicting objects to make space for a new one it is therefore not most
569  * efficient when we simply start to select all objects from the tail of an LRU
570  * until there's a suitable hole: Especially for big objects or nodes that
571  * otherwise have special allocation constraints there's a good chance we evict
572  * lots of (smaller) objects unnecessarily.
573  *
574  * The DRM range allocator supports this use-case through the scanning
575  * interfaces. First a scan operation needs to be initialized with
576  * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
577  * objects to the roster, probably by walking an LRU list, but this can be
578  * freely implemented. Eviction candiates are added using
579  * drm_mm_scan_add_block() until a suitable hole is found or there are no
580  * further evictable objects. Eviction roster metadata is tracked in &struct
581  * drm_mm_scan.
582  *
583  * The driver must walk through all objects again in exactly the reverse
584  * order to restore the allocator state. Note that while the allocator is used
585  * in the scan mode no other operation is allowed.
586  *
587  * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
588  * reported true) in the scan, and any overlapping nodes after color adjustment
589  * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
590  * since freeing a node is also O(1) the overall complexity is
591  * O(scanned_objects). So like the free stack which needs to be walked before a
592  * scan operation even begins this is linear in the number of objects. It
593  * doesn't seem to hurt too badly.
594  */
595 
596 /**
597  * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
598  * @scan: scan state
599  * @mm: drm_mm to scan
600  * @size: size of the allocation
601  * @alignment: alignment of the allocation
602  * @color: opaque tag value to use for the allocation
603  * @start: start of the allowed range for the allocation
604  * @end: end of the allowed range for the allocation
605  * @flags: flags to specify how the allocation will be performed afterwards
606  *
607  * This simply sets up the scanning routines with the parameters for the desired
608  * hole.
609  *
610  * Warning:
611  * As long as the scan list is non-empty, no other operations than
612  * adding/removing nodes to/from the scan list are allowed.
613  */
614 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
615 				 struct drm_mm *mm,
616 				 u64 size,
617 				 u64 alignment,
618 				 unsigned long color,
619 				 u64 start,
620 				 u64 end,
621 				 unsigned int flags)
622 {
623 	DRM_MM_BUG_ON(start >= end);
624 	DRM_MM_BUG_ON(!size || size > end - start);
625 	DRM_MM_BUG_ON(mm->scan_active);
626 
627 	scan->mm = mm;
628 
629 	if (alignment <= 1)
630 		alignment = 0;
631 
632 	scan->color = color;
633 	scan->alignment = alignment;
634 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
635 	scan->size = size;
636 	scan->flags = flags;
637 
638 	DRM_MM_BUG_ON(end <= start);
639 	scan->range_start = start;
640 	scan->range_end = end;
641 
642 	scan->hit_start = U64_MAX;
643 	scan->hit_end = 0;
644 }
645 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
646 
647 /**
648  * drm_mm_scan_add_block - add a node to the scan list
649  * @scan: the active drm_mm scanner
650  * @node: drm_mm_node to add
651  *
652  * Add a node to the scan list that might be freed to make space for the desired
653  * hole.
654  *
655  * Returns:
656  * True if a hole has been found, false otherwise.
657  */
658 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
659 			   struct drm_mm_node *node)
660 {
661 	struct drm_mm *mm = scan->mm;
662 	struct drm_mm_node *hole;
663 	u64 hole_start, hole_end;
664 	u64 col_start, col_end;
665 	u64 adj_start, adj_end;
666 
667 	DRM_MM_BUG_ON(node->mm != mm);
668 	DRM_MM_BUG_ON(!node->allocated);
669 	DRM_MM_BUG_ON(node->scanned_block);
670 	node->scanned_block = true;
671 	mm->scan_active++;
672 
673 	/* Remove this block from the node_list so that we enlarge the hole
674 	 * (distance between the end of our previous node and the start of
675 	 * or next), without poisoning the link so that we can restore it
676 	 * later in drm_mm_scan_remove_block().
677 	 */
678 	hole = list_prev_entry(node, node_list);
679 	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
680 	__list_del_entry(&node->node_list);
681 
682 	hole_start = __drm_mm_hole_node_start(hole);
683 	hole_end = __drm_mm_hole_node_end(hole);
684 
685 	col_start = hole_start;
686 	col_end = hole_end;
687 	if (mm->color_adjust)
688 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
689 
690 	adj_start = max(col_start, scan->range_start);
691 	adj_end = min(col_end, scan->range_end);
692 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
693 		return false;
694 
695 	if (scan->flags == DRM_MM_CREATE_TOP)
696 		adj_start = adj_end - scan->size;
697 
698 	if (scan->alignment) {
699 		u64 rem;
700 
701 		if (likely(scan->remainder_mask))
702 			rem = adj_start & scan->remainder_mask;
703 		else
704 			div64_u64_rem(adj_start, scan->alignment, &rem);
705 		if (rem) {
706 			adj_start -= rem;
707 			if (scan->flags != DRM_MM_CREATE_TOP)
708 				adj_start += scan->alignment;
709 			if (adj_start < max(col_start, scan->range_start) ||
710 			    min(col_end, scan->range_end) - adj_start < scan->size)
711 				return false;
712 
713 			if (adj_end <= adj_start ||
714 			    adj_end - adj_start < scan->size)
715 				return false;
716 		}
717 	}
718 
719 	scan->hit_start = adj_start;
720 	scan->hit_end = adj_start + scan->size;
721 
722 	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
723 	DRM_MM_BUG_ON(scan->hit_start < hole_start);
724 	DRM_MM_BUG_ON(scan->hit_end > hole_end);
725 
726 	return true;
727 }
728 EXPORT_SYMBOL(drm_mm_scan_add_block);
729 
730 /**
731  * drm_mm_scan_remove_block - remove a node from the scan list
732  * @scan: the active drm_mm scanner
733  * @node: drm_mm_node to remove
734  *
735  * Nodes **must** be removed in exactly the reverse order from the scan list as
736  * they have been added (e.g. using list_add() as they are added and then
737  * list_for_each() over that eviction list to remove), otherwise the internal
738  * state of the memory manager will be corrupted.
739  *
740  * When the scan list is empty, the selected memory nodes can be freed. An
741  * immediately following drm_mm_insert_node_in_range_generic() or one of the
742  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
743  * the just freed block (because its at the top of the free_stack list).
744  *
745  * Returns:
746  * True if this block should be evicted, false otherwise. Will always
747  * return false when no hole has been found.
748  */
749 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
750 			      struct drm_mm_node *node)
751 {
752 	struct drm_mm_node *prev_node;
753 
754 	DRM_MM_BUG_ON(node->mm != scan->mm);
755 	DRM_MM_BUG_ON(!node->scanned_block);
756 	node->scanned_block = false;
757 
758 	DRM_MM_BUG_ON(!node->mm->scan_active);
759 	node->mm->scan_active--;
760 
761 	/* During drm_mm_scan_add_block() we decoupled this node leaving
762 	 * its pointers intact. Now that the caller is walking back along
763 	 * the eviction list we can restore this block into its rightful
764 	 * place on the full node_list. To confirm that the caller is walking
765 	 * backwards correctly we check that prev_node->next == node->next,
766 	 * i.e. both believe the same node should be on the other side of the
767 	 * hole.
768 	 */
769 	prev_node = list_prev_entry(node, node_list);
770 	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
771 		      list_next_entry(node, node_list));
772 	list_add(&node->node_list, &prev_node->node_list);
773 
774 	return (node->start + node->size > scan->hit_start &&
775 		node->start < scan->hit_end);
776 }
777 EXPORT_SYMBOL(drm_mm_scan_remove_block);
778 
779 /**
780  * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
781  * @scan: drm_mm scan with target hole
782  *
783  * After completing an eviction scan and removing the selected nodes, we may
784  * need to remove a few more nodes from either side of the target hole if
785  * mm.color_adjust is being used.
786  *
787  * Returns:
788  * A node to evict, or NULL if there are no overlapping nodes.
789  */
790 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
791 {
792 	struct drm_mm *mm = scan->mm;
793 	struct drm_mm_node *hole;
794 	u64 hole_start, hole_end;
795 
796 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
797 
798 	if (!mm->color_adjust)
799 		return NULL;
800 
801 	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
802 	hole_start = __drm_mm_hole_node_start(hole);
803 	hole_end = __drm_mm_hole_node_end(hole);
804 
805 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
806 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
807 
808 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
809 	if (hole_start > scan->hit_start)
810 		return hole;
811 	if (hole_end < scan->hit_end)
812 		return list_next_entry(hole, node_list);
813 
814 	return NULL;
815 }
816 EXPORT_SYMBOL(drm_mm_scan_color_evict);
817 
818 /**
819  * drm_mm_init - initialize a drm-mm allocator
820  * @mm: the drm_mm structure to initialize
821  * @start: start of the range managed by @mm
822  * @size: end of the range managed by @mm
823  *
824  * Note that @mm must be cleared to 0 before calling this function.
825  */
826 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
827 {
828 	DRM_MM_BUG_ON(start + size <= start);
829 
830 	INIT_LIST_HEAD(&mm->hole_stack);
831 	mm->scan_active = 0;
832 
833 	/* Clever trick to avoid a special case in the free hole tracking. */
834 	INIT_LIST_HEAD(&mm->head_node.node_list);
835 	mm->head_node.allocated = 0;
836 	mm->head_node.hole_follows = 1;
837 	mm->head_node.mm = mm;
838 	mm->head_node.start = start + size;
839 	mm->head_node.size = start - mm->head_node.start;
840 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
841 
842 	mm->interval_tree = RB_ROOT;
843 
844 	mm->color_adjust = NULL;
845 }
846 EXPORT_SYMBOL(drm_mm_init);
847 
848 /**
849  * drm_mm_takedown - clean up a drm_mm allocator
850  * @mm: drm_mm allocator to clean up
851  *
852  * Note that it is a bug to call this function on an allocator which is not
853  * clean.
854  */
855 void drm_mm_takedown(struct drm_mm *mm)
856 {
857 	if (WARN(!drm_mm_clean(mm),
858 		 "Memory manager not clean during takedown.\n"))
859 		show_leaks(mm);
860 }
861 EXPORT_SYMBOL(drm_mm_takedown);
862 
863 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
864 {
865 	u64 hole_start, hole_end, hole_size;
866 
867 	if (entry->hole_follows) {
868 		hole_start = drm_mm_hole_node_start(entry);
869 		hole_end = drm_mm_hole_node_end(entry);
870 		hole_size = hole_end - hole_start;
871 		drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
872 			   hole_end, hole_size);
873 		return hole_size;
874 	}
875 
876 	return 0;
877 }
878 
879 /**
880  * drm_mm_print - print allocator state
881  * @mm: drm_mm allocator to print
882  * @p: DRM printer to use
883  */
884 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
885 {
886 	const struct drm_mm_node *entry;
887 	u64 total_used = 0, total_free = 0, total = 0;
888 
889 	total_free += drm_mm_dump_hole(p, &mm->head_node);
890 
891 	drm_mm_for_each_node(entry, mm) {
892 		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
893 			   entry->start + entry->size, entry->size);
894 		total_used += entry->size;
895 		total_free += drm_mm_dump_hole(p, entry);
896 	}
897 	total = total_free + total_used;
898 
899 	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
900 		   total_used, total_free);
901 }
902 EXPORT_SYMBOL(drm_mm_print);
903