1 /* $NetBSD: drm_mm.c,v 1.6 2020/02/14 14:34:57 maya Exp $ */ 2 3 /************************************************************************** 4 * 5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 * 29 **************************************************************************/ 30 31 /* 32 * Generic simple memory manager implementation. Intended to be used as a base 33 * class implementation for more advanced memory managers. 34 * 35 * Note that the algorithm used is quite simple and there might be substantial 36 * performance gains if a smarter free list is implemented. Currently it is just an 37 * unordered stack of free regions. This could easily be improved if an RB-tree 38 * is used instead. At least if we expect heavy fragmentation. 39 * 40 * Aligned allocations can also see improvement. 41 * 42 * Authors: 43 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.6 2020/02/14 14:34:57 maya Exp $"); 48 49 #include <drm/drmP.h> 50 #include <drm/drm_mm.h> 51 #include <linux/slab.h> 52 #include <linux/seq_file.h> 53 #include <linux/export.h> 54 55 /** 56 * DOC: Overview 57 * 58 * drm_mm provides a simple range allocator. The drivers are free to use the 59 * resource allocator from the linux core if it suits them, the upside of drm_mm 60 * is that it's in the DRM core. Which means that it's easier to extend for 61 * some of the crazier special purpose needs of gpus. 62 * 63 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 64 * Drivers are free to embed either of them into their own suitable 65 * datastructures. drm_mm itself will not do any allocations of its own, so if 66 * drivers choose not to embed nodes they need to still allocate them 67 * themselves. 68 * 69 * The range allocator also supports reservation of preallocated blocks. This is 70 * useful for taking over initial mode setting configurations from the firmware, 71 * where an object needs to be created which exactly matches the firmware's 72 * scanout target. As long as the range is still free it can be inserted anytime 73 * after the allocator is initialized, which helps with avoiding looped 74 * depencies in the driver load sequence. 75 * 76 * drm_mm maintains a stack of most recently freed holes, which of all 77 * simplistic datastructures seems to be a fairly decent approach to clustering 78 * allocations and avoiding too much fragmentation. This means free space 79 * searches are O(num_holes). Given that all the fancy features drm_mm supports 80 * something better would be fairly complex and since gfx thrashing is a fairly 81 * steep cliff not a real concern. Removing a node again is O(1). 82 * 83 * drm_mm supports a few features: Alignment and range restrictions can be 84 * supplied. Further more every &drm_mm_node has a color value (which is just an 85 * opaqua unsigned long) which in conjunction with a driver callback can be used 86 * to implement sophisticated placement restrictions. The i915 DRM driver uses 87 * this to implement guard pages between incompatible caching domains in the 88 * graphics TT. 89 * 90 * Two behaviors are supported for searching and allocating: bottom-up and top-down. 91 * The default is bottom-up. Top-down allocation can be used if the memory area 92 * has different restrictions, or just to reduce fragmentation. 93 * 94 * Finally iteration helpers to walk all nodes and all holes are provided as are 95 * some basic allocator dumpers for debugging. 96 */ 97 98 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 99 u64 size, 100 unsigned alignment, 101 unsigned long color, 102 enum drm_mm_search_flags flags); 103 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 104 u64 size, 105 unsigned alignment, 106 unsigned long color, 107 u64 start, 108 u64 end, 109 enum drm_mm_search_flags flags); 110 111 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 112 struct drm_mm_node *node, 113 u64 size, unsigned alignment, 114 unsigned long color, 115 enum drm_mm_allocator_flags flags) 116 { 117 struct drm_mm *mm = hole_node->mm; 118 u64 hole_start = drm_mm_hole_node_start(hole_node); 119 u64 hole_end = drm_mm_hole_node_end(hole_node); 120 u64 adj_start = hole_start; 121 u64 adj_end = hole_end; 122 123 BUG_ON(node->allocated); 124 125 if (mm->color_adjust) 126 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 127 128 if (flags & DRM_MM_CREATE_TOP) 129 adj_start = adj_end - size; 130 131 if (alignment) { 132 u64 tmp = adj_start; 133 unsigned rem; 134 135 rem = do_div(tmp, alignment); 136 if (rem) { 137 if (flags & DRM_MM_CREATE_TOP) 138 adj_start -= rem; 139 else 140 adj_start += alignment - rem; 141 } 142 } 143 144 BUG_ON(adj_start < hole_start); 145 BUG_ON(adj_end > hole_end); 146 147 if (adj_start == hole_start) { 148 hole_node->hole_follows = 0; 149 list_del(&hole_node->hole_stack); 150 } 151 152 node->start = adj_start; 153 node->size = size; 154 node->mm = mm; 155 node->color = color; 156 node->allocated = 1; 157 158 INIT_LIST_HEAD(&node->hole_stack); 159 list_add(&node->node_list, &hole_node->node_list); 160 161 BUG_ON(node->start + node->size > adj_end); 162 163 node->hole_follows = 0; 164 if (__drm_mm_hole_node_start(node) < hole_end) { 165 list_add(&node->hole_stack, &mm->hole_stack); 166 node->hole_follows = 1; 167 } 168 } 169 170 /** 171 * drm_mm_reserve_node - insert an pre-initialized node 172 * @mm: drm_mm allocator to insert @node into 173 * @node: drm_mm_node to insert 174 * 175 * This functions inserts an already set-up drm_mm_node into the allocator, 176 * meaning that start, size and color must be set by the caller. This is useful 177 * to initialize the allocator with preallocated objects which must be set-up 178 * before the range allocator can be set-up, e.g. when taking over a firmware 179 * framebuffer. 180 * 181 * Returns: 182 * 0 on success, -ENOSPC if there's no hole where @node is. 183 */ 184 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 185 { 186 struct drm_mm_node *hole; 187 u64 end = node->start + node->size; 188 u64 hole_start; 189 u64 hole_end; 190 191 BUG_ON(node == NULL); 192 193 /* Find the relevant hole to add our node to */ 194 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 195 if (hole_start > node->start || hole_end < end) 196 continue; 197 198 node->mm = mm; 199 node->allocated = 1; 200 201 INIT_LIST_HEAD(&node->hole_stack); 202 list_add(&node->node_list, &hole->node_list); 203 204 if (node->start == hole_start) { 205 hole->hole_follows = 0; 206 list_del_init(&hole->hole_stack); 207 } 208 209 node->hole_follows = 0; 210 if (end != hole_end) { 211 list_add(&node->hole_stack, &mm->hole_stack); 212 node->hole_follows = 1; 213 } 214 215 return 0; 216 } 217 218 return -ENOSPC; 219 } 220 EXPORT_SYMBOL(drm_mm_reserve_node); 221 222 /** 223 * drm_mm_insert_node_generic - search for space and insert @node 224 * @mm: drm_mm to allocate from 225 * @node: preallocate node to insert 226 * @size: size of the allocation 227 * @alignment: alignment of the allocation 228 * @color: opaque tag value to use for this node 229 * @sflags: flags to fine-tune the allocation search 230 * @aflags: flags to fine-tune the allocation behavior 231 * 232 * The preallocated node must be cleared to 0. 233 * 234 * Returns: 235 * 0 on success, -ENOSPC if there's no suitable hole. 236 */ 237 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 238 u64 size, unsigned alignment, 239 unsigned long color, 240 enum drm_mm_search_flags sflags, 241 enum drm_mm_allocator_flags aflags) 242 { 243 struct drm_mm_node *hole_node; 244 245 hole_node = drm_mm_search_free_generic(mm, size, alignment, 246 color, sflags); 247 if (!hole_node) 248 return -ENOSPC; 249 250 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); 251 return 0; 252 } 253 EXPORT_SYMBOL(drm_mm_insert_node_generic); 254 255 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 256 struct drm_mm_node *node, 257 u64 size, unsigned alignment, 258 unsigned long color, 259 u64 start, u64 end, 260 enum drm_mm_allocator_flags flags) 261 { 262 struct drm_mm *mm = hole_node->mm; 263 u64 hole_start = drm_mm_hole_node_start(hole_node); 264 u64 hole_end = drm_mm_hole_node_end(hole_node); 265 u64 adj_start = hole_start; 266 u64 adj_end = hole_end; 267 268 BUG_ON(!hole_node->hole_follows || node->allocated); 269 270 if (mm->color_adjust) 271 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 272 273 adj_start = max(adj_start, start); 274 adj_end = min(adj_end, end); 275 276 if (flags & DRM_MM_CREATE_TOP) 277 adj_start = adj_end - size; 278 279 if (alignment) { 280 u64 tmp = adj_start; 281 unsigned rem; 282 283 rem = do_div(tmp, alignment); 284 if (rem) { 285 if (flags & DRM_MM_CREATE_TOP) 286 adj_start -= rem; 287 else 288 adj_start += alignment - rem; 289 } 290 } 291 292 if (adj_start == hole_start) { 293 hole_node->hole_follows = 0; 294 list_del(&hole_node->hole_stack); 295 } 296 297 node->start = adj_start; 298 node->size = size; 299 node->mm = mm; 300 node->color = color; 301 node->allocated = 1; 302 303 INIT_LIST_HEAD(&node->hole_stack); 304 list_add(&node->node_list, &hole_node->node_list); 305 306 BUG_ON(node->start < start); 307 BUG_ON(node->start < adj_start); 308 BUG_ON(node->start + node->size > adj_end); 309 BUG_ON(node->start + node->size > end); 310 311 node->hole_follows = 0; 312 if (__drm_mm_hole_node_start(node) < hole_end) { 313 list_add(&node->hole_stack, &mm->hole_stack); 314 node->hole_follows = 1; 315 } 316 } 317 318 /** 319 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 320 * @mm: drm_mm to allocate from 321 * @node: preallocate node to insert 322 * @size: size of the allocation 323 * @alignment: alignment of the allocation 324 * @color: opaque tag value to use for this node 325 * @start: start of the allowed range for this node 326 * @end: end of the allowed range for this node 327 * @sflags: flags to fine-tune the allocation search 328 * @aflags: flags to fine-tune the allocation behavior 329 * 330 * The preallocated node must be cleared to 0. 331 * 332 * Returns: 333 * 0 on success, -ENOSPC if there's no suitable hole. 334 */ 335 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 336 u64 size, unsigned alignment, 337 unsigned long color, 338 u64 start, u64 end, 339 enum drm_mm_search_flags sflags, 340 enum drm_mm_allocator_flags aflags) 341 { 342 struct drm_mm_node *hole_node; 343 344 hole_node = drm_mm_search_free_in_range_generic(mm, 345 size, alignment, color, 346 start, end, sflags); 347 if (!hole_node) 348 return -ENOSPC; 349 350 drm_mm_insert_helper_range(hole_node, node, 351 size, alignment, color, 352 start, end, aflags); 353 return 0; 354 } 355 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 356 357 /** 358 * drm_mm_remove_node - Remove a memory node from the allocator. 359 * @node: drm_mm_node to remove 360 * 361 * This just removes a node from its drm_mm allocator. The node does not need to 362 * be cleared again before it can be re-inserted into this or any other drm_mm 363 * allocator. It is a bug to call this function on a un-allocated node. 364 */ 365 void drm_mm_remove_node(struct drm_mm_node *node) 366 { 367 struct drm_mm *mm = node->mm; 368 struct drm_mm_node *prev_node; 369 370 if (WARN_ON(!node->allocated)) 371 return; 372 373 BUG_ON(node->scanned_block || node->scanned_prev_free 374 || node->scanned_next_free); 375 376 prev_node = 377 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 378 379 if (node->hole_follows) { 380 BUG_ON(__drm_mm_hole_node_start(node) == 381 __drm_mm_hole_node_end(node)); 382 list_del(&node->hole_stack); 383 } else 384 BUG_ON(__drm_mm_hole_node_start(node) != 385 __drm_mm_hole_node_end(node)); 386 387 388 if (!prev_node->hole_follows) { 389 prev_node->hole_follows = 1; 390 list_add(&prev_node->hole_stack, &mm->hole_stack); 391 } else 392 list_move(&prev_node->hole_stack, &mm->hole_stack); 393 394 list_del(&node->node_list); 395 node->allocated = 0; 396 } 397 EXPORT_SYMBOL(drm_mm_remove_node); 398 399 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) 400 { 401 if (end - start < size) 402 return 0; 403 404 if (alignment) { 405 u64 tmp = start; 406 unsigned rem; 407 408 rem = do_div(tmp, alignment); 409 if (rem) 410 start += alignment - rem; 411 } 412 413 return end >= start + size; 414 } 415 416 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 417 u64 size, 418 unsigned alignment, 419 unsigned long color, 420 enum drm_mm_search_flags flags) 421 { 422 struct drm_mm_node *entry; 423 struct drm_mm_node *best; 424 u64 adj_start; 425 u64 adj_end; 426 u64 best_size; 427 428 BUG_ON(mm->scanned_blocks); 429 430 best = NULL; 431 best_size = ~0UL; 432 433 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 434 flags & DRM_MM_SEARCH_BELOW) { 435 u64 hole_size = adj_end - adj_start; 436 437 if (mm->color_adjust) { 438 mm->color_adjust(entry, color, &adj_start, &adj_end); 439 if (adj_end <= adj_start) 440 continue; 441 } 442 443 if (!check_free_hole(adj_start, adj_end, size, alignment)) 444 continue; 445 446 if (!(flags & DRM_MM_SEARCH_BEST)) 447 return entry; 448 449 if (hole_size < best_size) { 450 best = entry; 451 best_size = hole_size; 452 } 453 } 454 455 return best; 456 } 457 458 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 459 u64 size, 460 unsigned alignment, 461 unsigned long color, 462 u64 start, 463 u64 end, 464 enum drm_mm_search_flags flags) 465 { 466 struct drm_mm_node *entry; 467 struct drm_mm_node *best; 468 u64 adj_start; 469 u64 adj_end; 470 u64 best_size; 471 472 BUG_ON(mm->scanned_blocks); 473 474 best = NULL; 475 best_size = ~0UL; 476 477 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 478 flags & DRM_MM_SEARCH_BELOW) { 479 u64 hole_size = adj_end - adj_start; 480 481 if (mm->color_adjust) { 482 mm->color_adjust(entry, color, &adj_start, &adj_end); 483 if (adj_end <= adj_start) 484 continue; 485 } 486 487 adj_start = max(adj_start, start); 488 adj_end = min(adj_end, end); 489 490 if (!check_free_hole(adj_start, adj_end, size, alignment)) 491 continue; 492 493 if (!(flags & DRM_MM_SEARCH_BEST)) 494 return entry; 495 496 if (hole_size < best_size) { 497 best = entry; 498 best_size = hole_size; 499 } 500 } 501 502 return best; 503 } 504 505 /** 506 * drm_mm_replace_node - move an allocation from @old to @new 507 * @old: drm_mm_node to remove from the allocator 508 * @new: drm_mm_node which should inherit @old's allocation 509 * 510 * This is useful for when drivers embed the drm_mm_node structure and hence 511 * can't move allocations by reassigning pointers. It's a combination of remove 512 * and insert with the guarantee that the allocation start will match. 513 */ 514 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 515 { 516 list_replace(&old->node_list, &new->node_list); 517 list_replace(&old->hole_stack, &new->hole_stack); 518 new->hole_follows = old->hole_follows; 519 new->mm = old->mm; 520 new->start = old->start; 521 new->size = old->size; 522 new->color = old->color; 523 524 old->allocated = 0; 525 new->allocated = 1; 526 } 527 EXPORT_SYMBOL(drm_mm_replace_node); 528 529 /** 530 * DOC: lru scan roaster 531 * 532 * Very often GPUs need to have continuous allocations for a given object. When 533 * evicting objects to make space for a new one it is therefore not most 534 * efficient when we simply start to select all objects from the tail of an LRU 535 * until there's a suitable hole: Especially for big objects or nodes that 536 * otherwise have special allocation constraints there's a good chance we evict 537 * lots of (smaller) objects unecessarily. 538 * 539 * The DRM range allocator supports this use-case through the scanning 540 * interfaces. First a scan operation needs to be initialized with 541 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds 542 * objects to the roaster (probably by walking an LRU list, but this can be 543 * freely implemented) until a suitable hole is found or there's no further 544 * evitable object. 545 * 546 * The the driver must walk through all objects again in exactly the reverse 547 * order to restore the allocator state. Note that while the allocator is used 548 * in the scan mode no other operation is allowed. 549 * 550 * Finally the driver evicts all objects selected in the scan. Adding and 551 * removing an object is O(1), and since freeing a node is also O(1) the overall 552 * complexity is O(scanned_objects). So like the free stack which needs to be 553 * walked before a scan operation even begins this is linear in the number of 554 * objects. It doesn't seem to hurt badly. 555 */ 556 557 /** 558 * drm_mm_init_scan - initialize lru scanning 559 * @mm: drm_mm to scan 560 * @size: size of the allocation 561 * @alignment: alignment of the allocation 562 * @color: opaque tag value to use for the allocation 563 * 564 * This simply sets up the scanning routines with the parameters for the desired 565 * hole. Note that there's no need to specify allocation flags, since they only 566 * change the place a node is allocated from within a suitable hole. 567 * 568 * Warning: 569 * As long as the scan list is non-empty, no other operations than 570 * adding/removing nodes to/from the scan list are allowed. 571 */ 572 void drm_mm_init_scan(struct drm_mm *mm, 573 u64 size, 574 unsigned alignment, 575 unsigned long color) 576 { 577 mm->scan_color = color; 578 mm->scan_alignment = alignment; 579 mm->scan_size = size; 580 mm->scanned_blocks = 0; 581 mm->scan_hit_start = 0; 582 mm->scan_hit_end = 0; 583 mm->scan_check_range = 0; 584 mm->prev_scanned_node = NULL; 585 } 586 EXPORT_SYMBOL(drm_mm_init_scan); 587 588 /** 589 * drm_mm_init_scan - initialize range-restricted lru scanning 590 * @mm: drm_mm to scan 591 * @size: size of the allocation 592 * @alignment: alignment of the allocation 593 * @color: opaque tag value to use for the allocation 594 * @start: start of the allowed range for the allocation 595 * @end: end of the allowed range for the allocation 596 * 597 * This simply sets up the scanning routines with the parameters for the desired 598 * hole. Note that there's no need to specify allocation flags, since they only 599 * change the place a node is allocated from within a suitable hole. 600 * 601 * Warning: 602 * As long as the scan list is non-empty, no other operations than 603 * adding/removing nodes to/from the scan list are allowed. 604 */ 605 void drm_mm_init_scan_with_range(struct drm_mm *mm, 606 u64 size, 607 unsigned alignment, 608 unsigned long color, 609 u64 start, 610 u64 end) 611 { 612 mm->scan_color = color; 613 mm->scan_alignment = alignment; 614 mm->scan_size = size; 615 mm->scanned_blocks = 0; 616 mm->scan_hit_start = 0; 617 mm->scan_hit_end = 0; 618 mm->scan_start = start; 619 mm->scan_end = end; 620 mm->scan_check_range = 1; 621 mm->prev_scanned_node = NULL; 622 } 623 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 624 625 /** 626 * drm_mm_scan_add_block - add a node to the scan list 627 * @node: drm_mm_node to add 628 * 629 * Add a node to the scan list that might be freed to make space for the desired 630 * hole. 631 * 632 * Returns: 633 * True if a hole has been found, false otherwise. 634 */ 635 bool drm_mm_scan_add_block(struct drm_mm_node *node) 636 { 637 struct drm_mm *mm = node->mm; 638 struct drm_mm_node *prev_node; 639 u64 hole_start, hole_end; 640 u64 adj_start, adj_end; 641 642 mm->scanned_blocks++; 643 644 BUG_ON(node->scanned_block); 645 node->scanned_block = 1; 646 647 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 648 node_list); 649 650 node->scanned_preceeds_hole = prev_node->hole_follows; 651 prev_node->hole_follows = 1; 652 list_del(&node->node_list); 653 node->node_list.prev = &prev_node->node_list; 654 node->node_list.next = &mm->prev_scanned_node->node_list; 655 mm->prev_scanned_node = node; 656 657 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 658 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 659 660 if (mm->scan_check_range) { 661 if (adj_start < mm->scan_start) 662 adj_start = mm->scan_start; 663 if (adj_end > mm->scan_end) 664 adj_end = mm->scan_end; 665 } 666 667 if (mm->color_adjust) 668 mm->color_adjust(prev_node, mm->scan_color, 669 &adj_start, &adj_end); 670 671 if (check_free_hole(adj_start, adj_end, 672 mm->scan_size, mm->scan_alignment)) { 673 mm->scan_hit_start = hole_start; 674 mm->scan_hit_end = hole_end; 675 return true; 676 } 677 678 return false; 679 } 680 EXPORT_SYMBOL(drm_mm_scan_add_block); 681 682 /** 683 * drm_mm_scan_remove_block - remove a node from the scan list 684 * @node: drm_mm_node to remove 685 * 686 * Nodes _must_ be removed in the exact same order from the scan list as they 687 * have been added, otherwise the internal state of the memory manager will be 688 * corrupted. 689 * 690 * When the scan list is empty, the selected memory nodes can be freed. An 691 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 692 * return the just freed block (because its at the top of the free_stack list). 693 * 694 * Returns: 695 * True if this block should be evicted, false otherwise. Will always 696 * return false when no hole has been found. 697 */ 698 bool drm_mm_scan_remove_block(struct drm_mm_node *node) 699 { 700 struct drm_mm *mm = node->mm; 701 struct drm_mm_node *prev_node; 702 703 mm->scanned_blocks--; 704 705 BUG_ON(!node->scanned_block); 706 node->scanned_block = 0; 707 708 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 709 node_list); 710 711 prev_node->hole_follows = node->scanned_preceeds_hole; 712 list_add(&node->node_list, &prev_node->node_list); 713 714 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 715 node->start < mm->scan_hit_end); 716 } 717 EXPORT_SYMBOL(drm_mm_scan_remove_block); 718 719 /** 720 * drm_mm_clean - checks whether an allocator is clean 721 * @mm: drm_mm allocator to check 722 * 723 * Returns: 724 * True if the allocator is completely free, false if there's still a node 725 * allocated in it. 726 */ 727 bool drm_mm_clean(struct drm_mm * mm) 728 { 729 struct list_head *head = &mm->head_node.node_list; 730 731 return (head->next->next == head); 732 } 733 EXPORT_SYMBOL(drm_mm_clean); 734 735 /** 736 * drm_mm_init - initialize a drm-mm allocator 737 * @mm: the drm_mm structure to initialize 738 * @start: start of the range managed by @mm 739 * @size: end of the range managed by @mm 740 * 741 * Note that @mm must be cleared to 0 before calling this function. 742 */ 743 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) 744 { 745 INIT_LIST_HEAD(&mm->hole_stack); 746 mm->scanned_blocks = 0; 747 748 /* Clever trick to avoid a special case in the free hole tracking. */ 749 INIT_LIST_HEAD(&mm->head_node.node_list); 750 INIT_LIST_HEAD(&mm->head_node.hole_stack); 751 mm->head_node.hole_follows = 1; 752 mm->head_node.scanned_block = 0; 753 mm->head_node.scanned_prev_free = 0; 754 mm->head_node.scanned_next_free = 0; 755 mm->head_node.mm = mm; 756 mm->head_node.start = start + size; 757 mm->head_node.size = start - mm->head_node.start; 758 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 759 760 mm->color_adjust = NULL; 761 } 762 EXPORT_SYMBOL(drm_mm_init); 763 764 /** 765 * drm_mm_takedown - clean up a drm_mm allocator 766 * @mm: drm_mm allocator to clean up 767 * 768 * Note that it is a bug to call this function on an allocator which is not 769 * clean. 770 */ 771 void drm_mm_takedown(struct drm_mm * mm) 772 { 773 WARN(!list_empty(&mm->head_node.node_list), 774 "Memory manager not clean during takedown.\n"); 775 } 776 EXPORT_SYMBOL(drm_mm_takedown); 777 778 static u64 drm_mm_debug_hole(struct drm_mm_node *entry, 779 const char *prefix) 780 { 781 u64 hole_start, hole_end, hole_size; 782 783 if (entry->hole_follows) { 784 hole_start = drm_mm_hole_node_start(entry); 785 hole_end = drm_mm_hole_node_end(entry); 786 hole_size = hole_end - hole_start; 787 pr_debug("%s %#"PRIx64"-%#"PRIx64": %"PRIu64": free\n", prefix, hole_start, 788 hole_end, hole_size); 789 return hole_size; 790 } 791 792 return 0; 793 } 794 795 /** 796 * drm_mm_debug_table - dump allocator state to dmesg 797 * @mm: drm_mm allocator to dump 798 * @prefix: prefix to use for dumping to dmesg 799 */ 800 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 801 { 802 struct drm_mm_node *entry; 803 u64 total_used = 0, total_free = 0, total = 0; 804 805 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 806 807 drm_mm_for_each_node(entry, mm) { 808 pr_debug("%s %#"PRIx64"-%#"PRIx64": %"PRIu64": used\n", prefix, entry->start, 809 entry->start + entry->size, entry->size); 810 total_used += entry->size; 811 total_free += drm_mm_debug_hole(entry, prefix); 812 } 813 total = total_free + total_used; 814 815 pr_debug("%s total: %"PRIu64", used %"PRIu64" free %"PRIu64"\n", prefix, total, 816 total_used, total_free); 817 } 818 EXPORT_SYMBOL(drm_mm_debug_table); 819 820 #if defined(CONFIG_DEBUG_FS) 821 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 822 { 823 u64 hole_start, hole_end, hole_size; 824 825 if (entry->hole_follows) { 826 hole_start = drm_mm_hole_node_start(entry); 827 hole_end = drm_mm_hole_node_end(entry); 828 hole_size = hole_end - hole_start; 829 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, 830 hole_end, hole_size); 831 return hole_size; 832 } 833 834 return 0; 835 } 836 837 /** 838 * drm_mm_dump_table - dump allocator state to a seq_file 839 * @m: seq_file to dump to 840 * @mm: drm_mm allocator to dump 841 */ 842 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 843 { 844 struct drm_mm_node *entry; 845 u64 total_used = 0, total_free = 0, total = 0; 846 847 total_free += drm_mm_dump_hole(m, &mm->head_node); 848 849 drm_mm_for_each_node(entry, mm) { 850 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, 851 entry->start + entry->size, entry->size); 852 total_used += entry->size; 853 total_free += drm_mm_dump_hole(m, entry); 854 } 855 total = total_free + total_used; 856 857 seq_printf(m, "total: %llu, used %llu free %llu\n", total, 858 total_used, total_free); 859 return 0; 860 } 861 EXPORT_SYMBOL(drm_mm_dump_table); 862 #endif 863