Lines Matching +full:dma +full:- +full:pool
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
39 #include <linux/dma-mapping.h>
77 * The pool structure. There are usually six pools:
78 * - generic (not restricted to DMA32):
79 * - write combined, uncached, cached.
80 * - dma32 (up to 2^32 - so up 4GB):
81 * - write combined, uncached, cached.
84 * @pools: The 'struct device->dma_pools' link.
85 * @type: Type of the pool
87 * used with irqsave/irqrestore variants because pool allocator maybe called
89 * @inuse_list: Pool of pages that are in use. The order is very important and
91 * @free_list: Pool of pages that are free to be used. No order requirements.
93 * @size: Size used during DMA allocation.
94 * @npages_free: Count of available pages for re-use.
96 * @nfrees: Stats when pool is shrinking.
97 * @nrefills: Stats when the pool is grown.
99 * @name: Name of the pool.
100 * @dev_name: Name derieved from dev - similar to how dev_info works.
104 struct list_head pools; /* The 'struct device->dma_pools link */
122 * the DMA address.
125 * @dma: The bus address of the page. If the page is not allocated
126 * via the DMA API, it will be -1.
132 dma_addr_t dma; member
136 * Limits for the pool. They are handled without locks because only place where
149 * DMA pools. Guarded by _mutex->lock.
150 * @pools: The link to 'struct ttm_pool_manager->pools'
151 * @dev: The 'struct device' associated with the 'pool'
152 * @pool: The 'struct dma_pool' associated with the 'dev'
157 struct dma_pool *pool; member
161 * struct ttm_pool_manager - Holds memory pools for fast allocation
165 * @options: Limits for the pool.
222 m->options.max_size = val; in ttm_pool_store()
224 m->options.small = val; in ttm_pool_store()
235 m->options.alloc_size = val; in ttm_pool_store()
249 val = m->options.max_size; in ttm_pool_show()
251 val = m->options.small; in ttm_pool_show()
253 val = m->options.alloc_size; in ttm_pool_show()
306 static int ttm_set_pages_caching(struct dma_pool *pool, in ttm_set_pages_caching() argument
311 if (pool->type & IS_UC) { in ttm_set_pages_caching()
315 pool->dev_name, cpages); in ttm_set_pages_caching()
317 if (pool->type & IS_WC) { in ttm_set_pages_caching()
321 pool->dev_name, cpages); in ttm_set_pages_caching()
326 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) in __ttm_dma_free_page() argument
328 dma_addr_t dma = d_page->dma; in __ttm_dma_free_page() local
329 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); in __ttm_dma_free_page()
334 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) in __ttm_dma_alloc_page() argument
342 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, in __ttm_dma_alloc_page()
343 &d_page->dma, in __ttm_dma_alloc_page()
344 pool->gfp_flags); in __ttm_dma_alloc_page()
345 if (d_page->vaddr) in __ttm_dma_alloc_page()
346 d_page->p = virt_to_page(d_page->vaddr); in __ttm_dma_alloc_page()
369 static void ttm_pool_update_free_locked(struct dma_pool *pool, in ttm_pool_update_free_locked() argument
372 pool->npages_free -= freed_pages; in ttm_pool_update_free_locked()
373 pool->nfrees += freed_pages; in ttm_pool_update_free_locked()
378 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, in ttm_dma_pages_put() argument
383 /* Don't set WB on WB page pool. */ in ttm_dma_pages_put()
384 if (npages && !(pool->type & IS_CACHED) && in ttm_dma_pages_put()
387 pool->dev_name, npages); in ttm_dma_pages_put()
390 list_del(&d_page->page_list); in ttm_dma_pages_put()
391 __ttm_dma_free_page(pool, d_page); in ttm_dma_pages_put()
395 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) in ttm_dma_page_put() argument
397 /* Don't set WB on WB page pool. */ in ttm_dma_page_put()
398 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) in ttm_dma_page_put()
400 pool->dev_name, 1); in ttm_dma_page_put()
402 list_del(&d_page->page_list); in ttm_dma_page_put()
403 __ttm_dma_free_page(pool, d_page); in ttm_dma_page_put()
407 * Free pages from pool.
412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool
415 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) in ttm_dma_page_pool_free() argument
429 pool->dev_name, pool->name, current->pid, in ttm_dma_page_pool_free()
437 pr_err("%s: Failed to allocate memory for pool free operation\n", in ttm_dma_page_pool_free()
438 pool->dev_name); in ttm_dma_page_pool_free()
443 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_page_pool_free()
446 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, in ttm_dma_page_pool_free()
452 list_move(&dma_p->page_list, &d_pages); in ttm_dma_page_pool_free()
454 pages_to_free[freed_pages++] = dma_p->p; in ttm_dma_page_pool_free()
458 ttm_pool_update_free_locked(pool, freed_pages); in ttm_dma_page_pool_free()
461 * we unlock the pool to prevent stalling. in ttm_dma_page_pool_free()
463 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_page_pool_free()
465 ttm_dma_pages_put(pool, &d_pages, pages_to_free, in ttm_dma_page_pool_free()
471 nr_free -= freed_pages; in ttm_dma_page_pool_free()
493 /* remove range of pages from the pool */ in ttm_dma_page_pool_free()
495 ttm_pool_update_free_locked(pool, freed_pages); in ttm_dma_page_pool_free()
496 nr_free -= freed_pages; in ttm_dma_page_pool_free()
499 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_page_pool_free()
502 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); in ttm_dma_page_pool_free()
511 struct dma_pool *pool; in ttm_dma_free_pool() local
516 mutex_lock(&_manager->lock); in ttm_dma_free_pool()
517 list_for_each_entry_reverse(p, &_manager->pools, pools) { in ttm_dma_free_pool()
518 if (p->dev != dev) in ttm_dma_free_pool()
520 pool = p->pool; in ttm_dma_free_pool()
521 if (pool->type != type) in ttm_dma_free_pool()
524 list_del(&p->pools); in ttm_dma_free_pool()
526 _manager->npools--; in ttm_dma_free_pool()
529 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { in ttm_dma_free_pool()
530 if (pool->type != type) in ttm_dma_free_pool()
533 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); in ttm_dma_free_pool()
534 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); in ttm_dma_free_pool()
536 * struct device has been dropped - so nobody should be in ttm_dma_free_pool()
539 list_del(&pool->pools); in ttm_dma_free_pool()
540 kfree(pool); in ttm_dma_free_pool()
543 mutex_unlock(&_manager->lock); in ttm_dma_free_pool()
547 * On free-ing of the 'struct device' this deconstructor is run.
548 * Albeit the pool might have already been freed earlier.
552 struct dma_pool *pool = *(struct dma_pool **)res; in ttm_dma_pool_release() local
554 if (pool) in ttm_dma_pool_release()
555 ttm_dma_free_pool(dev, pool->type); in ttm_dma_pool_release()
569 struct dma_pool *pool = NULL, **ptr; in ttm_dma_pool_init() local
571 int ret = -ENODEV; in ttm_dma_pool_init()
581 ret = -ENOMEM; in ttm_dma_pool_init()
583 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, in ttm_dma_pool_init()
585 if (!pool) in ttm_dma_pool_init()
593 INIT_LIST_HEAD(&sec_pool->pools); in ttm_dma_pool_init()
594 sec_pool->dev = dev; in ttm_dma_pool_init()
595 sec_pool->pool = pool; in ttm_dma_pool_init()
597 INIT_LIST_HEAD(&pool->free_list); in ttm_dma_pool_init()
598 INIT_LIST_HEAD(&pool->inuse_list); in ttm_dma_pool_init()
599 INIT_LIST_HEAD(&pool->pools); in ttm_dma_pool_init()
600 spin_lock_init(&pool->lock); in ttm_dma_pool_init()
601 pool->dev = dev; in ttm_dma_pool_init()
602 pool->npages_free = pool->npages_in_use = 0; in ttm_dma_pool_init()
603 pool->nfrees = 0; in ttm_dma_pool_init()
604 pool->gfp_flags = flags; in ttm_dma_pool_init()
605 pool->size = PAGE_SIZE; in ttm_dma_pool_init()
606 pool->type = type; in ttm_dma_pool_init()
607 pool->nrefills = 0; in ttm_dma_pool_init()
608 p = pool->name; in ttm_dma_pool_init()
611 p += snprintf(p, sizeof(pool->name) - (p - pool->name), in ttm_dma_pool_init()
617 * - the kobj->name has already been deallocated.*/ in ttm_dma_pool_init()
618 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", in ttm_dma_pool_init()
620 mutex_lock(&_manager->lock); in ttm_dma_pool_init()
622 list_add(&sec_pool->pools, &_manager->pools); in ttm_dma_pool_init()
623 _manager->npools++; in ttm_dma_pool_init()
625 list_add(&pool->pools, &dev->dma_pools); in ttm_dma_pool_init()
626 mutex_unlock(&_manager->lock); in ttm_dma_pool_init()
628 *ptr = pool; in ttm_dma_pool_init()
631 return pool; in ttm_dma_pool_init()
635 kfree(pool); in ttm_dma_pool_init()
642 struct dma_pool *pool, *tmp, *found = NULL; in ttm_dma_find_pool() local
649 * graphic driver loading - in the drm_pci_init it calls either in ttm_dma_find_pool()
658 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) { in ttm_dma_find_pool()
659 if (pool->type != type) in ttm_dma_find_pool()
661 found = pool; in ttm_dma_find_pool()
670 * pool.
672 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, in ttm_dma_handle_caching_state_failure() argument
686 if (d_page->p != p) in ttm_dma_handle_caching_state_failure()
689 list_del(&d_page->page_list); in ttm_dma_handle_caching_state_failure()
690 __ttm_dma_free_page(pool, d_page); in ttm_dma_handle_caching_state_failure()
705 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, in ttm_dma_pool_alloc_new_pages() argument
722 pool->dev_name); in ttm_dma_pool_alloc_new_pages()
723 return -ENOMEM; in ttm_dma_pool_alloc_new_pages()
728 pool->dev_name, pool->name, current->pid, count); in ttm_dma_pool_alloc_new_pages()
732 dma_p = __ttm_dma_alloc_page(pool); in ttm_dma_pool_alloc_new_pages()
735 pool->dev_name, i); in ttm_dma_pool_alloc_new_pages()
737 /* store already allocated pages in the pool after in ttm_dma_pool_alloc_new_pages()
740 r = ttm_set_pages_caching(pool, caching_array, in ttm_dma_pool_alloc_new_pages()
744 pool, d_pages, caching_array, in ttm_dma_pool_alloc_new_pages()
747 r = -ENOMEM; in ttm_dma_pool_alloc_new_pages()
750 p = dma_p->p; in ttm_dma_pool_alloc_new_pages()
761 r = ttm_set_pages_caching(pool, caching_array, in ttm_dma_pool_alloc_new_pages()
765 pool, d_pages, caching_array, in ttm_dma_pool_alloc_new_pages()
772 list_add(&dma_p->page_list, d_pages); in ttm_dma_pool_alloc_new_pages()
776 r = ttm_set_pages_caching(pool, caching_array, cpages); in ttm_dma_pool_alloc_new_pages()
778 ttm_dma_handle_caching_state_failure(pool, d_pages, in ttm_dma_pool_alloc_new_pages()
789 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, in ttm_dma_page_pool_fill_locked() argument
792 unsigned count = _manager->options.small; in ttm_dma_page_pool_fill_locked()
793 int r = pool->npages_free; in ttm_dma_page_pool_fill_locked()
795 if (count > pool->npages_free) { in ttm_dma_page_pool_fill_locked()
800 spin_unlock_irqrestore(&pool->lock, *irq_flags); in ttm_dma_page_pool_fill_locked()
804 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); in ttm_dma_page_pool_fill_locked()
806 spin_lock_irqsave(&pool->lock, *irq_flags); in ttm_dma_page_pool_fill_locked()
809 list_splice(&d_pages, &pool->free_list); in ttm_dma_page_pool_fill_locked()
810 ++pool->nrefills; in ttm_dma_page_pool_fill_locked()
811 pool->npages_free += count; in ttm_dma_page_pool_fill_locked()
817 pr_err("%s: Failed to fill %s pool (r:%d)!\n", in ttm_dma_page_pool_fill_locked()
818 pool->dev_name, pool->name, r); in ttm_dma_page_pool_fill_locked()
823 list_splice_tail(&d_pages, &pool->free_list); in ttm_dma_page_pool_fill_locked()
824 pool->npages_free += cpages; in ttm_dma_page_pool_fill_locked()
836 static int ttm_dma_pool_get_pages(struct dma_pool *pool, in ttm_dma_pool_get_pages() argument
841 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_pool_get_pages()
843 int count, r = -ENOMEM; in ttm_dma_pool_get_pages()
845 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_pool_get_pages()
846 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); in ttm_dma_pool_get_pages()
848 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); in ttm_dma_pool_get_pages()
849 ttm->pages[index] = d_page->p; in ttm_dma_pool_get_pages()
850 ttm_dma->dma_address[index] = d_page->dma; in ttm_dma_pool_get_pages()
851 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); in ttm_dma_pool_get_pages()
853 pool->npages_in_use += 1; in ttm_dma_pool_get_pages()
854 pool->npages_free -= 1; in ttm_dma_pool_get_pages()
856 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_pool_get_pages()
862 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
866 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_populate()
867 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; in ttm_dma_populate()
868 struct dma_pool *pool; in ttm_dma_populate() local
874 if (ttm->state != tt_unpopulated) in ttm_dma_populate()
877 type = ttm_to_type(ttm->page_flags, ttm->caching_state); in ttm_dma_populate()
878 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) in ttm_dma_populate()
882 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) in ttm_dma_populate()
885 pool = ttm_dma_find_pool(dev, type); in ttm_dma_populate()
886 if (!pool) { in ttm_dma_populate()
887 pool = ttm_dma_pool_init(dev, gfp_flags, type); in ttm_dma_populate()
888 if (IS_ERR_OR_NULL(pool)) { in ttm_dma_populate()
889 return -ENOMEM; in ttm_dma_populate()
893 INIT_LIST_HEAD(&ttm_dma->pages_list); in ttm_dma_populate()
894 for (i = 0; i < ttm->num_pages; ++i) { in ttm_dma_populate()
895 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i); in ttm_dma_populate()
898 return -ENOMEM; in ttm_dma_populate()
901 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], in ttm_dma_populate()
905 return -ENOMEM; in ttm_dma_populate()
909 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { in ttm_dma_populate()
917 ttm->state = tt_unbound; in ttm_dma_populate()
928 mutex_lock(&_manager->lock); in ttm_dma_pool_get_num_unused_pages()
929 list_for_each_entry(p, &_manager->pools, pools) in ttm_dma_pool_get_num_unused_pages()
930 total += p->pool->npages_free; in ttm_dma_pool_get_num_unused_pages()
931 mutex_unlock(&_manager->lock); in ttm_dma_pool_get_num_unused_pages()
935 /* Put all pages in pages list to correct pool to wait for reuse */
938 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_unpopulate()
939 struct dma_pool *pool; in ttm_dma_unpopulate() local
946 type = ttm_to_type(ttm->page_flags, ttm->caching_state); in ttm_dma_unpopulate()
947 pool = ttm_dma_find_pool(dev, type); in ttm_dma_unpopulate()
948 if (!pool) in ttm_dma_unpopulate()
951 is_cached = (ttm_dma_find_pool(pool->dev, in ttm_dma_unpopulate()
952 ttm_to_type(ttm->page_flags, tt_cached)) == pool); in ttm_dma_unpopulate()
955 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) { in ttm_dma_unpopulate()
956 ttm->pages[count] = d_page->p; in ttm_dma_unpopulate()
960 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_unpopulate()
961 pool->npages_in_use -= count; in ttm_dma_unpopulate()
963 pool->nfrees += count; in ttm_dma_unpopulate()
965 pool->npages_free += count; in ttm_dma_unpopulate()
966 list_splice(&ttm_dma->pages_list, &pool->free_list); in ttm_dma_unpopulate()
968 if (pool->npages_free > _manager->options.max_size) { in ttm_dma_unpopulate()
969 npages = pool->npages_free - _manager->options.max_size; in ttm_dma_unpopulate()
976 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_unpopulate()
979 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) { in ttm_dma_unpopulate()
980 ttm_mem_global_free_page(ttm->glob->mem_glob, in ttm_dma_unpopulate()
981 d_page->p); in ttm_dma_unpopulate()
982 ttm_dma_page_put(pool, d_page); in ttm_dma_unpopulate()
986 ttm_mem_global_free_page(ttm->glob->mem_glob, in ttm_dma_unpopulate()
987 ttm->pages[i]); in ttm_dma_unpopulate()
991 INIT_LIST_HEAD(&ttm_dma->pages_list); in ttm_dma_unpopulate()
992 for (i = 0; i < ttm->num_pages; i++) { in ttm_dma_unpopulate()
993 ttm->pages[i] = NULL; in ttm_dma_unpopulate()
994 ttm_dma->dma_address[i] = 0; in ttm_dma_unpopulate()
997 /* shrink pool if necessary (only on !is_cached pools)*/ in ttm_dma_unpopulate()
999 ttm_dma_page_pool_free(pool, npages); in ttm_dma_unpopulate()
1000 ttm->state = tt_unpopulated; in ttm_dma_unpopulate()
1005 * Callback for mm to request pool to reduce number of page held.
1013 unsigned shrink_pages = sc->nr_to_scan; in ttm_dma_pool_mm_shrink()
1016 if (list_empty(&_manager->pools)) in ttm_dma_pool_mm_shrink()
1019 mutex_lock(&_manager->lock); in ttm_dma_pool_mm_shrink()
1020 pool_offset = pool_offset % _manager->npools; in ttm_dma_pool_mm_shrink()
1021 list_for_each_entry(p, &_manager->pools, pools) { in ttm_dma_pool_mm_shrink()
1024 if (!p->dev) in ttm_dma_pool_mm_shrink()
1028 /* Do it in round-robin fashion. */ in ttm_dma_pool_mm_shrink()
1032 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); in ttm_dma_pool_mm_shrink()
1034 p->pool->dev_name, p->pool->name, current->pid, in ttm_dma_pool_mm_shrink()
1037 mutex_unlock(&_manager->lock); in ttm_dma_pool_mm_shrink()
1038 /* return estimated number of unused pages in pool */ in ttm_dma_pool_mm_shrink()
1044 manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink; in ttm_dma_pool_mm_shrink_init()
1045 manager->mm_shrink.seeks = 1; in ttm_dma_pool_mm_shrink_init()
1046 register_shrinker(&manager->mm_shrink); in ttm_dma_pool_mm_shrink_init()
1051 unregister_shrinker(&manager->mm_shrink); in ttm_dma_pool_mm_shrink_fini()
1056 int ret = -ENOMEM; in ttm_dma_page_alloc_init()
1060 pr_info("Initializing DMA pool allocator\n"); in ttm_dma_page_alloc_init()
1066 mutex_init(&_manager->lock); in ttm_dma_page_alloc_init()
1067 INIT_LIST_HEAD(&_manager->pools); in ttm_dma_page_alloc_init()
1069 _manager->options.max_size = max_pages; in ttm_dma_page_alloc_init()
1070 _manager->options.small = SMALL_ALLOCATION; in ttm_dma_page_alloc_init()
1071 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; in ttm_dma_page_alloc_init()
1073 /* This takes care of auto-freeing the _manager */ in ttm_dma_page_alloc_init()
1074 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, in ttm_dma_page_alloc_init()
1075 &glob->kobj, "dma_pool"); in ttm_dma_page_alloc_init()
1077 kobject_put(&_manager->kobj); in ttm_dma_page_alloc_init()
1090 pr_info("Finalizing DMA pool allocator\n"); in ttm_dma_page_alloc_fini()
1093 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { in ttm_dma_page_alloc_fini()
1094 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, in ttm_dma_page_alloc_fini()
1095 current->pid); in ttm_dma_page_alloc_fini()
1096 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, in ttm_dma_page_alloc_fini()
1097 ttm_dma_pool_match, p->pool)); in ttm_dma_page_alloc_fini()
1098 ttm_dma_free_pool(p->dev, p->pool->type); in ttm_dma_page_alloc_fini()
1100 kobject_put(&_manager->kobj); in ttm_dma_page_alloc_fini()
1107 struct dma_pool *pool = NULL; in ttm_dma_page_alloc_debugfs() local
1108 char *h[] = {"pool", "refills", "pages freed", "inuse", "available", in ttm_dma_page_alloc_debugfs()
1112 seq_printf(m, "No pool allocator running.\n"); in ttm_dma_page_alloc_debugfs()
1117 mutex_lock(&_manager->lock); in ttm_dma_page_alloc_debugfs()
1118 list_for_each_entry(p, &_manager->pools, pools) { in ttm_dma_page_alloc_debugfs()
1119 struct device *dev = p->dev; in ttm_dma_page_alloc_debugfs()
1122 pool = p->pool; in ttm_dma_page_alloc_debugfs()
1124 pool->name, pool->nrefills, in ttm_dma_page_alloc_debugfs()
1125 pool->nfrees, pool->npages_in_use, in ttm_dma_page_alloc_debugfs()
1126 pool->npages_free, in ttm_dma_page_alloc_debugfs()
1127 pool->dev_name); in ttm_dma_page_alloc_debugfs()
1129 mutex_unlock(&_manager->lock); in ttm_dma_page_alloc_debugfs()