xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_pool.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/highmem.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43 
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_tt.h>
47 #include <drm/drm_legacy.h>
48 
49 #include "ttm_module.h"
50 
51 /**
52  * struct ttm_pool_dma - Helper object for coherent DMA mappings
53  *
54  * @addr: original DMA address returned for the mapping
55  * @vaddr: original vaddr return for the mapping and order in the lower bits
56  */
57 struct ttm_pool_dma {
58 	dma_addr_t addr;
59 	unsigned long vaddr;
60 	struct drm_dmamem *dmah;
61 };
62 
63 static unsigned long page_pool_size;
64 
65 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
66 module_param(page_pool_size, ulong, 0644);
67 
68 static atomic_long_t allocated_pages;
69 
70 static struct ttm_pool_type global_write_combined[MAX_ORDER];
71 static struct ttm_pool_type global_uncached[MAX_ORDER];
72 
73 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
74 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
75 
76 static struct rwlock shrinker_lock;
77 static struct list_head shrinker_list;
78 static struct shrinker mm_shrinker;
79 
80 /* Allocate pages of size 1 << order with the given gfp_flags */
81 static struct vm_page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
82 					unsigned int order, bus_dma_tag_t dmat)
83 {
84 	int flags = 0;
85 #ifdef __linux__
86 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
87 #endif
88 	struct ttm_pool_dma *dma;
89 	struct vm_page *p;
90 	void *vaddr;
91 
92 	/* Don't set the __GFP_COMP flag for higher order allocations.
93 	 * Mapping pages directly into an userspace process and calling
94 	 * put_page() on a TTM allocated page is illegal.
95 	 */
96 	if (order)
97 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
98 			__GFP_KSWAPD_RECLAIM;
99 
100 	if (!pool->use_dma_alloc) {
101 		p = alloc_pages(gfp_flags, order);
102 
103 		return p;
104 	}
105 
106 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
107 	if (!dma)
108 		return NULL;
109 
110 #ifdef __linux__
111 	if (order)
112 		attr |= DMA_ATTR_NO_WARN;
113 
114 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
115 				&dma->addr, gfp_flags, attr);
116 #else
117 	dma->dmah = drm_dmamem_alloc(dmat,
118 	    (1ULL << order) * PAGE_SIZE,
119 	    PAGE_SIZE, 1,
120 	    (1ULL << order) * PAGE_SIZE, flags, 0);
121 	if (dma->dmah == NULL)
122 		goto error_free;
123 	dma->addr = dma->dmah->map->dm_segs[0].ds_addr;
124 	vaddr = dma->dmah->kva;
125 #endif
126 	if (!vaddr)
127 		goto error_free;
128 
129 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
130 	 * TTM page fault handling and extend the DMA API to clean this up.
131 	 */
132 	if (is_vmalloc_addr(vaddr))
133 		p = vmalloc_to_page(vaddr);
134 	else
135 		p = virt_to_page(vaddr);
136 
137 	dma->vaddr = (unsigned long)vaddr | order;
138 #ifdef notyet
139 	p->private = (unsigned long)dma;
140 #endif
141 	return p;
142 
143 error_free:
144 	kfree(dma);
145 	return NULL;
146 }
147 
148 /* Reset the caching and pages of size 1 << order */
149 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
150 			       unsigned int order, struct vm_page *p)
151 {
152 #ifdef __linux__
153 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
154 	struct ttm_pool_dma *dma;
155 	void *vaddr;
156 #endif
157 
158 #ifdef CONFIG_X86
159 	/* We don't care that set_pages_wb is inefficient here. This is only
160 	 * used when we have to shrink and CPU overhead is irrelevant then.
161 	 */
162 	if (caching != ttm_cached && !PageHighMem(p))
163 		set_pages_wb(p, 1 << order);
164 #endif
165 
166 	if (!pool || !pool->use_dma_alloc) {
167 		__free_pages(p, order);
168 		return;
169 	}
170 
171 #ifdef __linux__
172 	if (order)
173 		attr |= DMA_ATTR_NO_WARN;
174 
175 	dma = (void *)p->private;
176 	vaddr = (void *)(dma->vaddr & LINUX_PAGE_MASK);
177 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
178 		       attr);
179 	kfree(dma);
180 #else
181 	STUB();
182 #endif
183 }
184 
185 /* Apply a new caching to an array of pages */
186 static int ttm_pool_apply_caching(struct vm_page **first, struct vm_page **last,
187 				  enum ttm_caching caching)
188 {
189 #ifdef CONFIG_X86
190 	unsigned int num_pages = last - first;
191 
192 	if (!num_pages)
193 		return 0;
194 
195 	switch (caching) {
196 	case ttm_cached:
197 		break;
198 	case ttm_write_combined:
199 		return set_pages_array_wc(first, num_pages);
200 	case ttm_uncached:
201 		return set_pages_array_uc(first, num_pages);
202 	}
203 #endif
204 	return 0;
205 }
206 
207 /* Map pages of 1 << order size and fill the DMA address array  */
208 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
209 			struct vm_page *p, dma_addr_t **dma_addr)
210 {
211 	dma_addr_t addr;
212 	unsigned int i;
213 
214 	if (pool->use_dma_alloc) {
215 #ifdef notyet
216 		struct ttm_pool_dma *dma = (void *)p->private;
217 
218 		addr = dma->addr;
219 #else
220 		STUB();
221 		return -ENOSYS;
222 #endif
223 	} else {
224 		size_t size = (1ULL << order) * PAGE_SIZE;
225 
226 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
227 		if (dma_mapping_error(pool->dev, addr))
228 			return -EFAULT;
229 	}
230 
231 	for (i = 1 << order; i ; --i) {
232 		*(*dma_addr)++ = addr;
233 		addr += PAGE_SIZE;
234 	}
235 
236 	return 0;
237 }
238 
239 /* Unmap pages of 1 << order size */
240 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
241 			   unsigned int num_pages)
242 {
243 	/* Unmapped while freeing the page */
244 	if (pool->use_dma_alloc)
245 		return;
246 
247 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
248 		       DMA_BIDIRECTIONAL);
249 }
250 
251 /* Give pages into a specific pool_type */
252 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct vm_page *p)
253 {
254 	unsigned int i, num_pages = 1 << pt->order;
255 	struct ttm_pool_type_lru *entry;
256 
257 	for (i = 0; i < num_pages; ++i) {
258 #ifdef notyet
259 		if (PageHighMem(p))
260 			clear_highpage(p + i);
261 		else
262 #endif
263 			pmap_zero_page(p + i);
264 	}
265 
266 	entry = malloc(sizeof(struct ttm_pool_type_lru), M_DRM, M_WAITOK);
267 	entry->pg = p;
268 	spin_lock(&pt->lock);
269 	LIST_INSERT_HEAD(&pt->lru, entry, entries);
270 	spin_unlock(&pt->lock);
271 	atomic_long_add(1 << pt->order, &allocated_pages);
272 }
273 
274 /* Take pages from a specific pool_type, return NULL when nothing available */
275 static struct vm_page *ttm_pool_type_take(struct ttm_pool_type *pt)
276 {
277 	struct vm_page *p = NULL;
278 	struct ttm_pool_type_lru *entry;
279 
280 	spin_lock(&pt->lock);
281 	if (!LIST_EMPTY(&pt->lru)) {
282 		entry = LIST_FIRST(&pt->lru);
283 		p = entry->pg;
284 		atomic_long_sub(1 << pt->order, &allocated_pages);
285 		LIST_REMOVE(entry, entries);
286 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
287 	}
288 	spin_unlock(&pt->lock);
289 
290 	return p;
291 }
292 
293 /* Initialize and add a pool type to the global shrinker list */
294 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
295 			       enum ttm_caching caching, unsigned int order)
296 {
297 	pt->pool = pool;
298 	pt->caching = caching;
299 	pt->order = order;
300 	mtx_init(&pt->lock, IPL_NONE);
301 	INIT_LIST_HEAD(&pt->pages);
302 	LIST_INIT(&pt->lru);
303 
304 	mutex_lock(&shrinker_lock);
305 	list_add_tail(&pt->shrinker_list, &shrinker_list);
306 	mutex_unlock(&shrinker_lock);
307 }
308 
309 /* Remove a pool_type from the global shrinker list and free all pages */
310 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
311 {
312 	struct vm_page *p;
313 	struct ttm_pool_type_lru *entry;
314 
315 	mutex_lock(&shrinker_lock);
316 	list_del(&pt->shrinker_list);
317 	mutex_unlock(&shrinker_lock);
318 
319 	while ((p = ttm_pool_type_take(pt)))
320 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
321 
322 	while (!LIST_EMPTY(&pt->lru)) {
323 		entry = LIST_FIRST(&pt->lru);
324 		LIST_REMOVE(entry, entries);
325 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
326 	}
327 }
328 
329 /* Return the pool_type to use for the given caching and order */
330 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
331 						  enum ttm_caching caching,
332 						  unsigned int order)
333 {
334 	if (pool->use_dma_alloc)
335 		return &pool->caching[caching].orders[order];
336 
337 #ifdef CONFIG_X86
338 	switch (caching) {
339 	case ttm_write_combined:
340 		if (pool->use_dma32)
341 			return &global_dma32_write_combined[order];
342 
343 		return &global_write_combined[order];
344 	case ttm_uncached:
345 		if (pool->use_dma32)
346 			return &global_dma32_uncached[order];
347 
348 		return &global_uncached[order];
349 	default:
350 		break;
351 	}
352 #endif
353 
354 	return NULL;
355 }
356 
357 /* Free pages using the global shrinker list */
358 static unsigned int ttm_pool_shrink(void)
359 {
360 	struct ttm_pool_type *pt;
361 	unsigned int num_freed;
362 	struct vm_page *p;
363 
364 	mutex_lock(&shrinker_lock);
365 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
366 
367 	p = ttm_pool_type_take(pt);
368 	if (p) {
369 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
370 		num_freed = 1 << pt->order;
371 	} else {
372 		num_freed = 0;
373 	}
374 
375 	list_move_tail(&pt->shrinker_list, &shrinker_list);
376 	mutex_unlock(&shrinker_lock);
377 
378 	return num_freed;
379 }
380 
381 #ifdef notyet
382 
383 /* Return the allocation order based for a page */
384 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p)
385 {
386 	if (pool->use_dma_alloc) {
387 		struct ttm_pool_dma *dma = (void *)p->private;
388 
389 		return dma->vaddr & ~LINUX_PAGE_MASK;
390 	}
391 
392 	return p->private;
393 }
394 
395 #endif /* notyet */
396 
397 /**
398  * ttm_pool_alloc - Fill a ttm_tt object
399  *
400  * @pool: ttm_pool to use
401  * @tt: ttm_tt object to fill
402  * @ctx: operation context
403  *
404  * Fill the ttm_tt object with pages and also make sure to DMA map them when
405  * necessary.
406  *
407  * Returns: 0 on successe, negative error code otherwise.
408  */
409 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
410 		   struct ttm_operation_ctx *ctx)
411 {
412 	unsigned long num_pages = tt->num_pages;
413 	dma_addr_t *dma_addr = tt->dma_address;
414 	struct vm_page **caching = tt->pages;
415 	struct vm_page **pages = tt->pages;
416 	unsigned long *orders = tt->orders;
417 	gfp_t gfp_flags = GFP_USER;
418 	unsigned int i, order;
419 	struct vm_page *p;
420 	int r;
421 
422 	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
423 #ifdef __linux__
424 	WARN_ON(dma_addr && !pool->dev);
425 #endif
426 
427 	if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
428 		gfp_flags |= __GFP_ZERO;
429 
430 	if (ctx->gfp_retry_mayfail)
431 		gfp_flags |= __GFP_RETRY_MAYFAIL;
432 
433 	if (pool->use_dma32)
434 		gfp_flags |= GFP_DMA32;
435 	else
436 		gfp_flags |= GFP_HIGHUSER;
437 
438 	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
439 	     num_pages;
440 	     order = min_t(unsigned int, order, __fls(num_pages))) {
441 		bool apply_caching = false;
442 		struct ttm_pool_type *pt;
443 
444 		pt = ttm_pool_select_type(pool, tt->caching, order);
445 		p = pt ? ttm_pool_type_take(pt) : NULL;
446 		if (p) {
447 			apply_caching = true;
448 		} else {
449 			p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat);
450 			if (p && PageHighMem(p))
451 				apply_caching = true;
452 		}
453 
454 		if (!p) {
455 			if (order) {
456 				--order;
457 				continue;
458 			}
459 			r = -ENOMEM;
460 			goto error_free_all;
461 		}
462 
463 		if (apply_caching) {
464 			r = ttm_pool_apply_caching(caching, pages,
465 						   tt->caching);
466 			if (r)
467 				goto error_free_page;
468 			caching = pages + (1 << order);
469 		}
470 
471 		if (dma_addr) {
472 			r = ttm_pool_map(pool, order, p, &dma_addr);
473 			if (r)
474 				goto error_free_page;
475 		}
476 
477 		num_pages -= 1 << order;
478 		for (i = 1 << order; i; --i) {
479 			*(pages++) = p++;
480 			*(orders++) = order;
481 		}
482 	}
483 
484 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
485 	if (r)
486 		goto error_free_all;
487 
488 	return 0;
489 
490 error_free_page:
491 	ttm_pool_free_page(pool, tt->caching, order, p);
492 
493 error_free_all:
494 	num_pages = tt->num_pages - num_pages;
495 	for (i = 0; i < num_pages; ) {
496 		order = tt->orders[i];
497 		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
498 		i += 1 << order;
499 	}
500 
501 	return r;
502 }
503 EXPORT_SYMBOL(ttm_pool_alloc);
504 
505 /**
506  * ttm_pool_free - Free the backing pages from a ttm_tt object
507  *
508  * @pool: Pool to give pages back to.
509  * @tt: ttm_tt object to unpopulate
510  *
511  * Give the packing pages back to a pool or free them
512  */
513 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
514 {
515 	unsigned int i;
516 
517 	for (i = 0; i < tt->num_pages; ) {
518 		unsigned int order, num_pages;
519 		struct ttm_pool_type *pt;
520 
521 		order = tt->orders[i];
522 		num_pages = 1ULL << order;
523 		if (tt->dma_address)
524 			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
525 
526 		pt = ttm_pool_select_type(pool, tt->caching, order);
527 		if (pt)
528 			ttm_pool_type_give(pt, tt->pages[i]);
529 		else
530 			ttm_pool_free_page(pool, tt->caching, order,
531 					   tt->pages[i]);
532 
533 		i += num_pages;
534 	}
535 
536 	while (atomic_long_read(&allocated_pages) > page_pool_size)
537 		ttm_pool_shrink();
538 }
539 EXPORT_SYMBOL(ttm_pool_free);
540 
541 /**
542  * ttm_pool_init - Initialize a pool
543  *
544  * @pool: the pool to initialize
545  * @dev: device for DMA allocations and mappings
546  * @use_dma_alloc: true if coherent DMA alloc should be used
547  * @use_dma32: true if GFP_DMA32 should be used
548  *
549  * Initialize the pool and its pool types.
550  */
551 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
552 		   bool use_dma_alloc, bool use_dma32)
553 {
554 	unsigned int i, j;
555 
556 	WARN_ON(!dev && use_dma_alloc);
557 
558 	pool->dev = dev;
559 	pool->use_dma_alloc = use_dma_alloc;
560 	pool->use_dma32 = use_dma32;
561 
562 	if (use_dma_alloc) {
563 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
564 			for (j = 0; j < MAX_ORDER; ++j)
565 				ttm_pool_type_init(&pool->caching[i].orders[j],
566 						   pool, i, j);
567 	}
568 }
569 
570 /**
571  * ttm_pool_fini - Cleanup a pool
572  *
573  * @pool: the pool to clean up
574  *
575  * Free all pages in the pool and unregister the types from the global
576  * shrinker.
577  */
578 void ttm_pool_fini(struct ttm_pool *pool)
579 {
580 	unsigned int i, j;
581 
582 	if (pool->use_dma_alloc) {
583 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
584 			for (j = 0; j < MAX_ORDER; ++j)
585 				ttm_pool_type_fini(&pool->caching[i].orders[j]);
586 	}
587 }
588 
589 /* As long as pages are available make sure to release at least one */
590 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
591 					    struct shrink_control *sc)
592 {
593 	unsigned long num_freed = 0;
594 
595 	do
596 		num_freed += ttm_pool_shrink();
597 	while (!num_freed && atomic_long_read(&allocated_pages));
598 
599 	return num_freed;
600 }
601 
602 /* Return the number of pages available or SHRINK_EMPTY if we have none */
603 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
604 					     struct shrink_control *sc)
605 {
606 #ifdef notyet
607 	unsigned long num_pages = atomic_long_read(&allocated_pages);
608 
609 	return num_pages ? num_pages : SHRINK_EMPTY;
610 #else
611 	STUB();
612 	unsigned long num_pages = atomic_long_read(&allocated_pages);
613 
614 	return num_pages ? num_pages : 0;
615 #endif
616 }
617 
618 #ifdef CONFIG_DEBUG_FS
619 /* Count the number of pages available in a pool_type */
620 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
621 {
622 	unsigned int count = 0;
623 	struct ttm_pool_type_lru *entry;
624 
625 	spin_lock(&pt->lock);
626 	/* Only used for debugfs, the overhead doesn't matter */
627 	LIST_FOREACH(entry, &pt->lru, entries)
628 		++count;
629 	spin_unlock(&pt->lock);
630 
631 	return count;
632 }
633 
634 /* Print a nice header for the order */
635 static void ttm_pool_debugfs_header(struct seq_file *m)
636 {
637 	unsigned int i;
638 
639 	seq_puts(m, "\t ");
640 	for (i = 0; i < MAX_ORDER; ++i)
641 		seq_printf(m, " ---%2u---", i);
642 	seq_puts(m, "\n");
643 }
644 
645 /* Dump information about the different pool types */
646 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
647 				    struct seq_file *m)
648 {
649 	unsigned int i;
650 
651 	for (i = 0; i < MAX_ORDER; ++i)
652 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
653 	seq_puts(m, "\n");
654 }
655 
656 /* Dump the total amount of allocated pages */
657 static void ttm_pool_debugfs_footer(struct seq_file *m)
658 {
659 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
660 		   atomic_long_read(&allocated_pages), page_pool_size);
661 }
662 
663 /* Dump the information for the global pools */
664 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
665 {
666 	ttm_pool_debugfs_header(m);
667 
668 	mutex_lock(&shrinker_lock);
669 	seq_puts(m, "wc\t:");
670 	ttm_pool_debugfs_orders(global_write_combined, m);
671 	seq_puts(m, "uc\t:");
672 	ttm_pool_debugfs_orders(global_uncached, m);
673 	seq_puts(m, "wc 32\t:");
674 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
675 	seq_puts(m, "uc 32\t:");
676 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
677 	mutex_unlock(&shrinker_lock);
678 
679 	ttm_pool_debugfs_footer(m);
680 
681 	return 0;
682 }
683 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
684 
685 /**
686  * ttm_pool_debugfs - Debugfs dump function for a pool
687  *
688  * @pool: the pool to dump the information for
689  * @m: seq_file to dump to
690  *
691  * Make a debugfs dump with the per pool and global information.
692  */
693 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
694 {
695 	unsigned int i;
696 
697 	if (!pool->use_dma_alloc) {
698 		seq_puts(m, "unused\n");
699 		return 0;
700 	}
701 
702 	ttm_pool_debugfs_header(m);
703 
704 	mutex_lock(&shrinker_lock);
705 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
706 		seq_puts(m, "DMA ");
707 		switch (i) {
708 		case ttm_cached:
709 			seq_puts(m, "\t:");
710 			break;
711 		case ttm_write_combined:
712 			seq_puts(m, "wc\t:");
713 			break;
714 		case ttm_uncached:
715 			seq_puts(m, "uc\t:");
716 			break;
717 		}
718 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
719 	}
720 	mutex_unlock(&shrinker_lock);
721 
722 	ttm_pool_debugfs_footer(m);
723 	return 0;
724 }
725 EXPORT_SYMBOL(ttm_pool_debugfs);
726 
727 /* Test the shrinker functions and dump the result */
728 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
729 {
730 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
731 
732 	fs_reclaim_acquire(GFP_KERNEL);
733 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
734 		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
735 	fs_reclaim_release(GFP_KERNEL);
736 
737 	return 0;
738 }
739 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
740 
741 #endif
742 
743 /**
744  * ttm_pool_mgr_init - Initialize globals
745  *
746  * @num_pages: default number of pages
747  *
748  * Initialize the global locks and lists for the MM shrinker.
749  */
750 int ttm_pool_mgr_init(unsigned long num_pages)
751 {
752 	unsigned int i;
753 
754 	if (!page_pool_size)
755 		page_pool_size = num_pages;
756 
757 	rw_init(&shrinker_lock, "ttmshrlk");
758 	INIT_LIST_HEAD(&shrinker_list);
759 
760 	for (i = 0; i < MAX_ORDER; ++i) {
761 		ttm_pool_type_init(&global_write_combined[i], NULL,
762 				   ttm_write_combined, i);
763 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
764 
765 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
766 				   ttm_write_combined, i);
767 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
768 				   ttm_uncached, i);
769 	}
770 
771 #ifdef CONFIG_DEBUG_FS
772 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
773 			    &ttm_pool_debugfs_globals_fops);
774 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
775 			    &ttm_pool_debugfs_shrink_fops);
776 #endif
777 
778 	mm_shrinker.count_objects = ttm_pool_shrinker_count;
779 	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
780 	mm_shrinker.seeks = 1;
781 	return register_shrinker(&mm_shrinker);
782 }
783 
784 /**
785  * ttm_pool_mgr_fini - Finalize globals
786  *
787  * Cleanup the global pools and unregister the MM shrinker.
788  */
789 void ttm_pool_mgr_fini(void)
790 {
791 	unsigned int i;
792 
793 	for (i = 0; i < MAX_ORDER; ++i) {
794 		ttm_pool_type_fini(&global_write_combined[i]);
795 		ttm_pool_type_fini(&global_uncached[i]);
796 
797 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
798 		ttm_pool_type_fini(&global_dma32_uncached[i]);
799 	}
800 
801 	unregister_shrinker(&mm_shrinker);
802 	WARN_ON(!list_empty(&shrinker_list));
803 }
804