1 /* $OpenBSD: drm_mm.c,v 1.7 2015/09/23 23:12:11 kettenis Exp $ */ 2 /************************************************************************** 3 * 4 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * 28 **************************************************************************/ 29 30 /* 31 * Generic simple memory manager implementation. Intended to be used as a base 32 * class implementation for more advanced memory managers. 33 * 34 * Note that the algorithm used is quite simple and there might be substantial 35 * performance gains if a smarter free list is implemented. Currently it is just an 36 * unordered stack of free regions. This could easily be improved if an RB-tree 37 * is used instead. At least if we expect heavy fragmentation. 38 * 39 * Aligned allocations can also see improvement. 40 * 41 * Authors: 42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 43 */ 44 45 #include "drmP.h" 46 #include "drm_mm.h" 47 48 #define MM_UNUSED_TARGET 4 49 50 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 51 { 52 struct drm_mm_node *child; 53 54 if (atomic) 55 child = kzalloc(sizeof(*child), GFP_ATOMIC); 56 else 57 child = kzalloc(sizeof(*child), GFP_KERNEL); 58 59 if (unlikely(child == NULL)) { 60 spin_lock(&mm->unused_lock); 61 if (list_empty(&mm->unused_nodes)) 62 child = NULL; 63 else { 64 child = 65 list_entry(mm->unused_nodes.next, 66 struct drm_mm_node, node_list); 67 list_del(&child->node_list); 68 --mm->num_unused; 69 } 70 spin_unlock(&mm->unused_lock); 71 } 72 return child; 73 } 74 75 /* drm_mm_pre_get() - pre allocate drm_mm_node structure 76 * drm_mm: memory manager struct we are pre-allocating for 77 * 78 * Returns 0 on success or -ENOMEM if allocation fails. 79 */ 80 int drm_mm_pre_get(struct drm_mm *mm) 81 { 82 struct drm_mm_node *node; 83 84 spin_lock(&mm->unused_lock); 85 while (mm->num_unused < MM_UNUSED_TARGET) { 86 spin_unlock(&mm->unused_lock); 87 node = kzalloc(sizeof(*node), GFP_KERNEL); 88 spin_lock(&mm->unused_lock); 89 90 if (unlikely(node == NULL)) { 91 int ret = (mm->num_unused < 2) ? -ENOMEM : 0; 92 spin_unlock(&mm->unused_lock); 93 return ret; 94 } 95 ++mm->num_unused; 96 list_add_tail(&node->node_list, &mm->unused_nodes); 97 } 98 spin_unlock(&mm->unused_lock); 99 return 0; 100 } 101 EXPORT_SYMBOL(drm_mm_pre_get); 102 103 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 104 struct drm_mm_node *node, 105 unsigned long size, unsigned alignment, 106 unsigned long color) 107 { 108 struct drm_mm *mm = hole_node->mm; 109 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 110 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 111 unsigned long adj_start = hole_start; 112 unsigned long adj_end = hole_end; 113 114 BUG_ON(node->allocated); 115 116 if (mm->color_adjust) 117 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 118 119 if (alignment) { 120 unsigned tmp = adj_start % alignment; 121 if (tmp) 122 adj_start += alignment - tmp; 123 } 124 125 if (adj_start == hole_start) { 126 hole_node->hole_follows = 0; 127 list_del(&hole_node->hole_stack); 128 } 129 130 node->start = adj_start; 131 node->size = size; 132 node->mm = mm; 133 node->color = color; 134 node->allocated = 1; 135 136 INIT_LIST_HEAD(&node->hole_stack); 137 list_add(&node->node_list, &hole_node->node_list); 138 139 BUG_ON(node->start + node->size > adj_end); 140 141 node->hole_follows = 0; 142 if (__drm_mm_hole_node_start(node) < hole_end) { 143 list_add(&node->hole_stack, &mm->hole_stack); 144 node->hole_follows = 1; 145 } 146 } 147 148 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 149 { 150 struct drm_mm_node *hole; 151 unsigned long end = node->start + node->size; 152 unsigned long hole_start; 153 unsigned long hole_end; 154 155 BUG_ON(node == NULL); 156 157 /* Find the relevant hole to add our node to */ 158 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 159 if (hole_start > node->start || hole_end < end) 160 continue; 161 162 node->mm = mm; 163 node->allocated = 1; 164 165 INIT_LIST_HEAD(&node->hole_stack); 166 list_add(&node->node_list, &hole->node_list); 167 168 if (node->start == hole_start) { 169 hole->hole_follows = 0; 170 list_del_init(&hole->hole_stack); 171 } 172 173 node->hole_follows = 0; 174 if (end != hole_end) { 175 list_add(&node->hole_stack, &mm->hole_stack); 176 node->hole_follows = 1; 177 } 178 179 return 0; 180 } 181 182 WARN(1, "no hole found for node 0x%lx + 0x%lx\n", 183 node->start, node->size); 184 return -ENOSPC; 185 } 186 EXPORT_SYMBOL(drm_mm_reserve_node); 187 188 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 189 unsigned long size, 190 unsigned alignment, 191 unsigned long color, 192 int atomic) 193 { 194 struct drm_mm_node *node; 195 196 node = drm_mm_kmalloc(hole_node->mm, atomic); 197 if (unlikely(node == NULL)) 198 return NULL; 199 200 drm_mm_insert_helper(hole_node, node, size, alignment, color); 201 202 return node; 203 } 204 EXPORT_SYMBOL(drm_mm_get_block_generic); 205 206 /** 207 * Search for free space and insert a preallocated memory node. Returns 208 * -ENOSPC if no suitable free area is available. The preallocated memory node 209 * must be cleared. 210 */ 211 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 212 unsigned long size, unsigned alignment, 213 unsigned long color, 214 enum drm_mm_search_flags flags) 215 { 216 struct drm_mm_node *hole_node; 217 218 hole_node = drm_mm_search_free_generic(mm, size, alignment, 219 color, flags); 220 if (!hole_node) 221 return -ENOSPC; 222 223 drm_mm_insert_helper(hole_node, node, size, alignment, color); 224 return 0; 225 } 226 EXPORT_SYMBOL(drm_mm_insert_node_generic); 227 228 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 229 struct drm_mm_node *node, 230 unsigned long size, unsigned alignment, 231 unsigned long color, 232 unsigned long start, unsigned long end) 233 { 234 struct drm_mm *mm = hole_node->mm; 235 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 236 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 237 unsigned long adj_start = hole_start; 238 unsigned long adj_end = hole_end; 239 240 BUG_ON(!hole_node->hole_follows || node->allocated); 241 242 if (adj_start < start) 243 adj_start = start; 244 if (adj_end > end) 245 adj_end = end; 246 247 if (mm->color_adjust) 248 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 249 250 if (alignment) { 251 unsigned tmp = adj_start % alignment; 252 if (tmp) 253 adj_start += alignment - tmp; 254 } 255 256 if (adj_start == hole_start) { 257 hole_node->hole_follows = 0; 258 list_del(&hole_node->hole_stack); 259 } 260 261 node->start = adj_start; 262 node->size = size; 263 node->mm = mm; 264 node->color = color; 265 node->allocated = 1; 266 267 INIT_LIST_HEAD(&node->hole_stack); 268 list_add(&node->node_list, &hole_node->node_list); 269 270 BUG_ON(node->start + node->size > adj_end); 271 BUG_ON(node->start + node->size > end); 272 273 node->hole_follows = 0; 274 if (__drm_mm_hole_node_start(node) < hole_end) { 275 list_add(&node->hole_stack, &mm->hole_stack); 276 node->hole_follows = 1; 277 } 278 } 279 280 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, 281 unsigned long size, 282 unsigned alignment, 283 unsigned long color, 284 unsigned long start, 285 unsigned long end, 286 int atomic) 287 { 288 struct drm_mm_node *node; 289 290 node = drm_mm_kmalloc(hole_node->mm, atomic); 291 if (unlikely(node == NULL)) 292 return NULL; 293 294 drm_mm_insert_helper_range(hole_node, node, size, alignment, color, 295 start, end); 296 297 return node; 298 } 299 EXPORT_SYMBOL(drm_mm_get_block_range_generic); 300 301 /** 302 * Search for free space and insert a preallocated memory node. Returns 303 * -ENOSPC if no suitable free area is available. This is for range 304 * restricted allocations. The preallocated memory node must be cleared. 305 */ 306 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 307 unsigned long size, unsigned alignment, unsigned long color, 308 unsigned long start, unsigned long end, 309 enum drm_mm_search_flags flags) 310 { 311 struct drm_mm_node *hole_node; 312 313 hole_node = drm_mm_search_free_in_range_generic(mm, 314 size, alignment, color, 315 start, end, flags); 316 if (!hole_node) 317 return -ENOSPC; 318 319 drm_mm_insert_helper_range(hole_node, node, 320 size, alignment, color, 321 start, end); 322 return 0; 323 } 324 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 325 326 /** 327 * Remove a memory node from the allocator. 328 */ 329 void drm_mm_remove_node(struct drm_mm_node *node) 330 { 331 struct drm_mm *mm = node->mm; 332 struct drm_mm_node *prev_node; 333 334 if (WARN_ON(!node->allocated)) 335 return; 336 337 BUG_ON(node->scanned_block || node->scanned_prev_free 338 || node->scanned_next_free); 339 340 prev_node = 341 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 342 343 if (node->hole_follows) { 344 BUG_ON(__drm_mm_hole_node_start(node) == 345 __drm_mm_hole_node_end(node)); 346 list_del(&node->hole_stack); 347 } else 348 BUG_ON(__drm_mm_hole_node_start(node) != 349 __drm_mm_hole_node_end(node)); 350 351 352 if (!prev_node->hole_follows) { 353 prev_node->hole_follows = 1; 354 list_add(&prev_node->hole_stack, &mm->hole_stack); 355 } else 356 list_move(&prev_node->hole_stack, &mm->hole_stack); 357 358 list_del(&node->node_list); 359 node->allocated = 0; 360 } 361 EXPORT_SYMBOL(drm_mm_remove_node); 362 363 /* 364 * Remove a memory node from the allocator and free the allocated struct 365 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the 366 * drm_mm_get_block functions. 367 */ 368 void drm_mm_put_block(struct drm_mm_node *node) 369 { 370 371 struct drm_mm *mm = node->mm; 372 373 drm_mm_remove_node(node); 374 375 spin_lock(&mm->unused_lock); 376 if (mm->num_unused < MM_UNUSED_TARGET) { 377 list_add(&node->node_list, &mm->unused_nodes); 378 ++mm->num_unused; 379 } else 380 kfree(node); 381 spin_unlock(&mm->unused_lock); 382 } 383 EXPORT_SYMBOL(drm_mm_put_block); 384 385 static int check_free_hole(unsigned long start, unsigned long end, 386 unsigned long size, unsigned alignment) 387 { 388 if (end - start < size) 389 return 0; 390 391 if (alignment) { 392 unsigned tmp = start % alignment; 393 if (tmp) 394 start += alignment - tmp; 395 } 396 397 return end >= start + size; 398 } 399 400 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 401 unsigned long size, 402 unsigned alignment, 403 unsigned long color, 404 enum drm_mm_search_flags flags) 405 { 406 struct drm_mm_node *entry; 407 struct drm_mm_node *best; 408 unsigned long adj_start; 409 unsigned long adj_end; 410 unsigned long best_size; 411 412 BUG_ON(mm->scanned_blocks); 413 414 best = NULL; 415 best_size = ~0UL; 416 417 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 418 if (mm->color_adjust) { 419 mm->color_adjust(entry, color, &adj_start, &adj_end); 420 if (adj_end <= adj_start) 421 continue; 422 } 423 424 if (!check_free_hole(adj_start, adj_end, size, alignment)) 425 continue; 426 427 if (!(flags & DRM_MM_SEARCH_BEST)) 428 return entry; 429 430 if (entry->size < best_size) { 431 best = entry; 432 best_size = entry->size; 433 } 434 } 435 436 return best; 437 } 438 EXPORT_SYMBOL(drm_mm_search_free_generic); 439 440 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 441 unsigned long size, 442 unsigned alignment, 443 unsigned long color, 444 unsigned long start, 445 unsigned long end, 446 enum drm_mm_search_flags flags) 447 { 448 struct drm_mm_node *entry; 449 struct drm_mm_node *best; 450 unsigned long adj_start; 451 unsigned long adj_end; 452 unsigned long best_size; 453 454 BUG_ON(mm->scanned_blocks); 455 456 best = NULL; 457 best_size = ~0UL; 458 459 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 460 if (adj_start < start) 461 adj_start = start; 462 if (adj_end > end) 463 adj_end = end; 464 465 if (mm->color_adjust) { 466 mm->color_adjust(entry, color, &adj_start, &adj_end); 467 if (adj_end <= adj_start) 468 continue; 469 } 470 471 if (!check_free_hole(adj_start, adj_end, size, alignment)) 472 continue; 473 474 if (!(flags & DRM_MM_SEARCH_BEST)) 475 return entry; 476 477 if (entry->size < best_size) { 478 best = entry; 479 best_size = entry->size; 480 } 481 } 482 483 return best; 484 } 485 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic); 486 487 /** 488 * Moves an allocation. To be used with embedded struct drm_mm_node. 489 */ 490 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 491 { 492 list_replace(&old->node_list, &new->node_list); 493 list_replace(&old->hole_stack, &new->hole_stack); 494 new->hole_follows = old->hole_follows; 495 new->mm = old->mm; 496 new->start = old->start; 497 new->size = old->size; 498 new->color = old->color; 499 500 old->allocated = 0; 501 new->allocated = 1; 502 } 503 EXPORT_SYMBOL(drm_mm_replace_node); 504 505 /** 506 * Initializa lru scanning. 507 * 508 * This simply sets up the scanning routines with the parameters for the desired 509 * hole. 510 * 511 * Warning: As long as the scan list is non-empty, no other operations than 512 * adding/removing nodes to/from the scan list are allowed. 513 */ 514 void drm_mm_init_scan(struct drm_mm *mm, 515 unsigned long size, 516 unsigned alignment, 517 unsigned long color) 518 { 519 mm->scan_color = color; 520 mm->scan_alignment = alignment; 521 mm->scan_size = size; 522 mm->scanned_blocks = 0; 523 mm->scan_hit_start = 0; 524 mm->scan_hit_end = 0; 525 mm->scan_check_range = 0; 526 mm->prev_scanned_node = NULL; 527 } 528 EXPORT_SYMBOL(drm_mm_init_scan); 529 530 /** 531 * Initializa lru scanning. 532 * 533 * This simply sets up the scanning routines with the parameters for the desired 534 * hole. This version is for range-restricted scans. 535 * 536 * Warning: As long as the scan list is non-empty, no other operations than 537 * adding/removing nodes to/from the scan list are allowed. 538 */ 539 void drm_mm_init_scan_with_range(struct drm_mm *mm, 540 unsigned long size, 541 unsigned alignment, 542 unsigned long color, 543 unsigned long start, 544 unsigned long end) 545 { 546 mm->scan_color = color; 547 mm->scan_alignment = alignment; 548 mm->scan_size = size; 549 mm->scanned_blocks = 0; 550 mm->scan_hit_start = 0; 551 mm->scan_hit_end = 0; 552 mm->scan_start = start; 553 mm->scan_end = end; 554 mm->scan_check_range = 1; 555 mm->prev_scanned_node = NULL; 556 } 557 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 558 559 /** 560 * Add a node to the scan list that might be freed to make space for the desired 561 * hole. 562 * 563 * Returns non-zero, if a hole has been found, zero otherwise. 564 */ 565 int drm_mm_scan_add_block(struct drm_mm_node *node) 566 { 567 struct drm_mm *mm = node->mm; 568 struct drm_mm_node *prev_node; 569 unsigned long hole_start, hole_end; 570 unsigned long adj_start, adj_end; 571 572 mm->scanned_blocks++; 573 574 BUG_ON(node->scanned_block); 575 node->scanned_block = 1; 576 577 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 578 node_list); 579 580 node->scanned_preceeds_hole = prev_node->hole_follows; 581 prev_node->hole_follows = 1; 582 list_del(&node->node_list); 583 node->node_list.prev = &prev_node->node_list; 584 node->node_list.next = &mm->prev_scanned_node->node_list; 585 mm->prev_scanned_node = node; 586 587 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 588 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 589 590 if (mm->scan_check_range) { 591 if (adj_start < mm->scan_start) 592 adj_start = mm->scan_start; 593 if (adj_end > mm->scan_end) 594 adj_end = mm->scan_end; 595 } 596 597 if (mm->color_adjust) 598 mm->color_adjust(prev_node, mm->scan_color, 599 &adj_start, &adj_end); 600 601 if (check_free_hole(adj_start, adj_end, 602 mm->scan_size, mm->scan_alignment)) { 603 mm->scan_hit_start = hole_start; 604 mm->scan_hit_end = hole_end; 605 return 1; 606 } 607 608 return 0; 609 } 610 EXPORT_SYMBOL(drm_mm_scan_add_block); 611 612 /** 613 * Remove a node from the scan list. 614 * 615 * Nodes _must_ be removed in the exact same order from the scan list as they 616 * have been added, otherwise the internal state of the memory manager will be 617 * corrupted. 618 * 619 * When the scan list is empty, the selected memory nodes can be freed. An 620 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 621 * return the just freed block (because its at the top of the free_stack list). 622 * 623 * Returns one if this block should be evicted, zero otherwise. Will always 624 * return zero when no hole has been found. 625 */ 626 int drm_mm_scan_remove_block(struct drm_mm_node *node) 627 { 628 struct drm_mm *mm = node->mm; 629 struct drm_mm_node *prev_node; 630 631 mm->scanned_blocks--; 632 633 BUG_ON(!node->scanned_block); 634 node->scanned_block = 0; 635 636 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 637 node_list); 638 639 prev_node->hole_follows = node->scanned_preceeds_hole; 640 list_add(&node->node_list, &prev_node->node_list); 641 642 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 643 node->start < mm->scan_hit_end); 644 } 645 EXPORT_SYMBOL(drm_mm_scan_remove_block); 646 647 int drm_mm_clean(struct drm_mm * mm) 648 { 649 struct list_head *head = &mm->head_node.node_list; 650 651 return (head->next->next == head); 652 } 653 EXPORT_SYMBOL(drm_mm_clean); 654 655 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 656 { 657 INIT_LIST_HEAD(&mm->hole_stack); 658 INIT_LIST_HEAD(&mm->unused_nodes); 659 mm->num_unused = 0; 660 mm->scanned_blocks = 0; 661 mtx_init(&mm->unused_lock, IPL_NONE); 662 663 /* Clever trick to avoid a special case in the free hole tracking. */ 664 INIT_LIST_HEAD(&mm->head_node.node_list); 665 INIT_LIST_HEAD(&mm->head_node.hole_stack); 666 mm->head_node.hole_follows = 1; 667 mm->head_node.scanned_block = 0; 668 mm->head_node.scanned_prev_free = 0; 669 mm->head_node.scanned_next_free = 0; 670 mm->head_node.mm = mm; 671 mm->head_node.start = start + size; 672 mm->head_node.size = start - mm->head_node.start; 673 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 674 675 mm->color_adjust = NULL; 676 } 677 EXPORT_SYMBOL(drm_mm_init); 678 679 void drm_mm_takedown(struct drm_mm * mm) 680 { 681 struct drm_mm_node *entry, *next; 682 683 if (WARN(!list_empty(&mm->head_node.node_list), 684 "Memory manager not clean. Delaying takedown\n")) { 685 return; 686 } 687 688 spin_lock(&mm->unused_lock); 689 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { 690 list_del(&entry->node_list); 691 kfree(entry); 692 --mm->num_unused; 693 } 694 spin_unlock(&mm->unused_lock); 695 696 BUG_ON(mm->num_unused != 0); 697 } 698 EXPORT_SYMBOL(drm_mm_takedown); 699 700 static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, 701 const char *prefix) 702 { 703 unsigned long hole_start, hole_end, hole_size; 704 705 if (entry->hole_follows) { 706 hole_start = drm_mm_hole_node_start(entry); 707 hole_end = drm_mm_hole_node_end(entry); 708 hole_size = hole_end - hole_start; 709 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", 710 prefix, hole_start, hole_end, 711 hole_size); 712 return hole_size; 713 } 714 715 return 0; 716 } 717 718 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 719 { 720 struct drm_mm_node *entry; 721 unsigned long total_used = 0, total_free = 0, total = 0; 722 723 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 724 725 drm_mm_for_each_node(entry, mm) { 726 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", 727 prefix, entry->start, entry->start + entry->size, 728 entry->size); 729 total_used += entry->size; 730 total_free += drm_mm_debug_hole(entry, prefix); 731 } 732 total = total_free + total_used; 733 734 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, 735 total_used, total_free); 736 } 737 EXPORT_SYMBOL(drm_mm_debug_table); 738 739 #if defined(CONFIG_DEBUG_FS) 740 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 741 { 742 unsigned long hole_start, hole_end, hole_size; 743 744 if (entry->hole_follows) { 745 hole_start = drm_mm_hole_node_start(entry); 746 hole_end = drm_mm_hole_node_end(entry); 747 hole_size = hole_end - hole_start; 748 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 749 hole_start, hole_end, hole_size); 750 return hole_size; 751 } 752 753 return 0; 754 } 755 756 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 757 { 758 struct drm_mm_node *entry; 759 unsigned long total_used = 0, total_free = 0, total = 0; 760 761 total_free += drm_mm_dump_hole(m, &mm->head_node); 762 763 drm_mm_for_each_node(entry, mm) { 764 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 765 entry->start, entry->start + entry->size, 766 entry->size); 767 total_used += entry->size; 768 total_free += drm_mm_dump_hole(m, entry); 769 } 770 total = total_free + total_used; 771 772 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); 773 return 0; 774 } 775 EXPORT_SYMBOL(drm_mm_dump_table); 776 #endif 777