xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_pool.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/highmem.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43 
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_tt.h>
47 #include <drm/drm_legacy.h>
48 
49 #include "ttm_module.h"
50 
51 /**
52  * struct ttm_pool_dma - Helper object for coherent DMA mappings
53  *
54  * @addr: original DMA address returned for the mapping
55  * @vaddr: original vaddr return for the mapping and order in the lower bits
56  */
57 struct ttm_pool_dma {
58 	dma_addr_t addr;
59 	unsigned long vaddr;
60 	bus_dma_tag_t dmat;
61 	bus_dmamap_t map;
62 	bus_dma_segment_t seg;
63 };
64 
65 static unsigned long page_pool_size;
66 
67 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
68 module_param(page_pool_size, ulong, 0644);
69 
70 static atomic_long_t allocated_pages;
71 
72 static struct ttm_pool_type global_write_combined[MAX_ORDER];
73 static struct ttm_pool_type global_uncached[MAX_ORDER];
74 
75 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
76 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
77 
78 static spinlock_t shrinker_lock;
79 static struct list_head shrinker_list;
80 static struct shrinker mm_shrinker;
81 
82 #ifdef __linux__
83 
84 /* Allocate pages of size 1 << order with the given gfp_flags */
85 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
86 					unsigned int order)
87 {
88 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
89 	struct ttm_pool_dma *dma;
90 	struct page *p;
91 	void *vaddr;
92 
93 	/* Don't set the __GFP_COMP flag for higher order allocations.
94 	 * Mapping pages directly into an userspace process and calling
95 	 * put_page() on a TTM allocated page is illegal.
96 	 */
97 	if (order)
98 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
99 			__GFP_KSWAPD_RECLAIM;
100 
101 	if (!pool->use_dma_alloc) {
102 		p = alloc_pages(gfp_flags, order);
103 		if (p)
104 			p->private = order;
105 
106 		return p;
107 	}
108 
109 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
110 	if (!dma)
111 		return NULL;
112 
113 	if (order)
114 		attr |= DMA_ATTR_NO_WARN;
115 
116 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
117 				&dma->addr, gfp_flags, attr);
118 	if (!vaddr)
119 		goto error_free;
120 
121 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
122 	 * TTM page fault handling and extend the DMA API to clean this up.
123 	 */
124 	if (is_vmalloc_addr(vaddr))
125 		p = vmalloc_to_page(vaddr);
126 	else
127 		p = virt_to_page(vaddr);
128 
129 	dma->vaddr = (unsigned long)vaddr | order;
130 	p->private = (unsigned long)dma;
131 	return p;
132 
133 error_free:
134 	kfree(dma);
135 	return NULL;
136 }
137 
138 /* Reset the caching and pages of size 1 << order */
139 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
140 			       unsigned int order, struct page *p)
141 {
142 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
143 	struct ttm_pool_dma *dma;
144 	void *vaddr;
145 
146 #ifdef CONFIG_X86
147 	/* We don't care that set_pages_wb is inefficient here. This is only
148 	 * used when we have to shrink and CPU overhead is irrelevant then.
149 	 */
150 	if (caching != ttm_cached && !PageHighMem(p))
151 		set_pages_wb(p, 1 << order);
152 #endif
153 
154 	if (!pool || !pool->use_dma_alloc) {
155 		__free_pages(p, order);
156 		return;
157 	}
158 
159 	if (order)
160 		attr |= DMA_ATTR_NO_WARN;
161 
162 	dma = (void *)p->private;
163 	vaddr = (void *)(dma->vaddr & PAGE_MASK);
164 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
165 		       attr);
166 	kfree(dma);
167 }
168 
169 #else
170 
171 static struct vm_page *ttm_pool_alloc_page(struct ttm_pool *pool,
172 					   gfp_t gfp_flags, unsigned int order,
173 					   bus_dma_tag_t dmat)
174 {
175 	struct ttm_pool_dma *dma;
176 	struct vm_page *p;
177 	struct uvm_constraint_range *constraint = &no_constraint;
178 	int flags = (gfp_flags & M_NOWAIT) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
179 	int dmaflags = BUS_DMA_64BIT;
180 	int nsegs;
181 
182 	if (pool->use_dma32) {
183 		constraint = &dma_constraint;
184 		dmaflags &= ~BUS_DMA_64BIT;
185 	}
186 
187 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
188 	if (!dma)
189 		return NULL;
190 
191 	if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1,
192 	    (1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map))
193 		goto error_free;
194 #ifdef bus_dmamem_alloc_range
195 	if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE,
196 	    PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO,
197 	    constraint->ucr_low, constraint->ucr_high)) {
198 		bus_dmamap_destroy(dmat, dma->map);
199 		goto error_free;
200 	}
201 #else
202 	if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE,
203 	    PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO)) {
204 		bus_dmamap_destroy(dmat, dma->map);
205 		goto error_free;
206 	}
207 #endif
208 	if (bus_dmamap_load_raw(dmat, dma->map, &dma->seg, 1,
209 	    (1ULL << order) * PAGE_SIZE, flags)) {
210 		bus_dmamem_free(dmat, &dma->seg, 1);
211 		bus_dmamap_destroy(dmat, dma->map);
212 		goto error_free;
213 	}
214 	dma->dmat = dmat;
215 	dma->addr = dma->map->dm_segs[0].ds_addr;
216 
217 #ifndef __sparc64__
218 	p = PHYS_TO_VM_PAGE(dma->seg.ds_addr);
219 #else
220 	p = TAILQ_FIRST((struct pglist *)dma->seg._ds_mlist);
221 #endif
222 
223 	p->objt.rbt_parent = (struct rb_entry *)dma;
224 	return p;
225 
226 error_free:
227 	kfree(dma);
228 	return NULL;
229 }
230 
231 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
232 			       unsigned int order, struct vm_page *p)
233 {
234 	struct ttm_pool_dma *dma;
235 
236 #ifdef CONFIG_X86
237 	/* We don't care that set_pages_wb is inefficient here. This is only
238 	 * used when we have to shrink and CPU overhead is irrelevant then.
239 	 */
240 	if (caching != ttm_cached && !PageHighMem(p))
241 		set_pages_wb(p, 1 << order);
242 #endif
243 
244 	dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
245 	bus_dmamap_unload(dma->dmat, dma->map);
246 	bus_dmamem_free(dma->dmat, &dma->seg, 1);
247 	bus_dmamap_destroy(dma->dmat, dma->map);
248 	kfree(dma);
249 }
250 
251 #endif
252 
253 /* Apply a new caching to an array of pages */
254 static int ttm_pool_apply_caching(struct vm_page **first, struct vm_page **last,
255 				  enum ttm_caching caching)
256 {
257 #ifdef CONFIG_X86
258 	unsigned int num_pages = last - first;
259 
260 	if (!num_pages)
261 		return 0;
262 
263 	switch (caching) {
264 	case ttm_cached:
265 		break;
266 	case ttm_write_combined:
267 		return set_pages_array_wc(first, num_pages);
268 	case ttm_uncached:
269 		return set_pages_array_uc(first, num_pages);
270 	}
271 #endif
272 	return 0;
273 }
274 
275 #ifdef __linux__
276 
277 /* Map pages of 1 << order size and fill the DMA address array  */
278 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
279 			struct vm_page *p, dma_addr_t **dma_addr)
280 {
281 	dma_addr_t addr;
282 	unsigned int i;
283 
284 	if (pool->use_dma_alloc) {
285 		struct ttm_pool_dma *dma = (void *)p->private;
286 
287 		addr = dma->addr;
288 	} else {
289 		size_t size = (1ULL << order) * PAGE_SIZE;
290 
291 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
292 		if (dma_mapping_error(pool->dev, addr))
293 			return -EFAULT;
294 	}
295 
296 	for (i = 1 << order; i ; --i) {
297 		*(*dma_addr)++ = addr;
298 		addr += PAGE_SIZE;
299 	}
300 
301 	return 0;
302 }
303 
304 /* Unmap pages of 1 << order size */
305 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
306 			   unsigned int num_pages)
307 {
308 	/* Unmapped while freeing the page */
309 	if (pool->use_dma_alloc)
310 		return;
311 
312 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
313 		       DMA_BIDIRECTIONAL);
314 }
315 
316 #else
317 
318 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
319 			struct vm_page *p, dma_addr_t **dma_addr)
320 {
321 	struct ttm_pool_dma *dma;
322 	dma_addr_t addr;
323 	unsigned int i;
324 
325 	dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
326 	addr = dma->addr;
327 
328 	for (i = 1 << order; i ; --i) {
329 		*(*dma_addr)++ = addr;
330 		addr += PAGE_SIZE;
331 	}
332 
333 	return 0;
334 }
335 
336 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
337 			   unsigned int num_pages)
338 {
339 }
340 
341 #endif
342 
343 /* Give pages into a specific pool_type */
344 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct vm_page *p)
345 {
346 	unsigned int i, num_pages = 1 << pt->order;
347 	struct ttm_pool_type_lru *entry;
348 
349 	for (i = 0; i < num_pages; ++i) {
350 #ifdef notyet
351 		if (PageHighMem(p))
352 			clear_highpage(p + i);
353 		else
354 #endif
355 			pmap_zero_page(p + i);
356 	}
357 
358 	entry = malloc(sizeof(struct ttm_pool_type_lru), M_DRM, M_WAITOK);
359 	entry->pg = p;
360 	spin_lock(&pt->lock);
361 	LIST_INSERT_HEAD(&pt->lru, entry, entries);
362 	spin_unlock(&pt->lock);
363 	atomic_long_add(1 << pt->order, &allocated_pages);
364 }
365 
366 /* Take pages from a specific pool_type, return NULL when nothing available */
367 static struct vm_page *ttm_pool_type_take(struct ttm_pool_type *pt)
368 {
369 	struct vm_page *p = NULL;
370 	struct ttm_pool_type_lru *entry;
371 
372 	spin_lock(&pt->lock);
373 	if (!LIST_EMPTY(&pt->lru)) {
374 		entry = LIST_FIRST(&pt->lru);
375 		p = entry->pg;
376 		atomic_long_sub(1 << pt->order, &allocated_pages);
377 		LIST_REMOVE(entry, entries);
378 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
379 	}
380 	spin_unlock(&pt->lock);
381 
382 	return p;
383 }
384 
385 /* Initialize and add a pool type to the global shrinker list */
386 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
387 			       enum ttm_caching caching, unsigned int order)
388 {
389 	pt->pool = pool;
390 	pt->caching = caching;
391 	pt->order = order;
392 	mtx_init(&pt->lock, IPL_NONE);
393 	INIT_LIST_HEAD(&pt->pages);
394 	LIST_INIT(&pt->lru);
395 
396 	spin_lock(&shrinker_lock);
397 	list_add_tail(&pt->shrinker_list, &shrinker_list);
398 	spin_unlock(&shrinker_lock);
399 }
400 
401 /* Remove a pool_type from the global shrinker list and free all pages */
402 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
403 {
404 	struct vm_page *p;
405 	struct ttm_pool_type_lru *entry;
406 
407 	spin_lock(&shrinker_lock);
408 	list_del(&pt->shrinker_list);
409 	spin_unlock(&shrinker_lock);
410 
411 	while ((p = ttm_pool_type_take(pt)))
412 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
413 
414 	while (!LIST_EMPTY(&pt->lru)) {
415 		entry = LIST_FIRST(&pt->lru);
416 		LIST_REMOVE(entry, entries);
417 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
418 	}
419 }
420 
421 /* Return the pool_type to use for the given caching and order */
422 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
423 						  enum ttm_caching caching,
424 						  unsigned int order)
425 {
426 	if (pool->use_dma_alloc)
427 		return &pool->caching[caching].orders[order];
428 
429 #ifdef CONFIG_X86
430 	switch (caching) {
431 	case ttm_write_combined:
432 		if (pool->use_dma32)
433 			return &global_dma32_write_combined[order];
434 
435 		return &global_write_combined[order];
436 	case ttm_uncached:
437 		if (pool->use_dma32)
438 			return &global_dma32_uncached[order];
439 
440 		return &global_uncached[order];
441 	default:
442 		break;
443 	}
444 #endif
445 
446 	return NULL;
447 }
448 
449 /* Free pages using the global shrinker list */
450 static unsigned int ttm_pool_shrink(void)
451 {
452 	struct ttm_pool_type *pt;
453 	unsigned int num_pages;
454 	struct vm_page *p;
455 
456 	spin_lock(&shrinker_lock);
457 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
458 	list_move_tail(&pt->shrinker_list, &shrinker_list);
459 	spin_unlock(&shrinker_lock);
460 
461 	p = ttm_pool_type_take(pt);
462 	if (p) {
463 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
464 		num_pages = 1 << pt->order;
465 	} else {
466 		num_pages = 0;
467 	}
468 
469 	return num_pages;
470 }
471 
472 #ifdef notyet
473 
474 /* Return the allocation order based for a page */
475 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p)
476 {
477 	if (pool->use_dma_alloc) {
478 		struct ttm_pool_dma *dma = (void *)p->private;
479 
480 		return dma->vaddr & ~LINUX_PAGE_MASK;
481 	}
482 
483 	return p->private;
484 }
485 
486 #endif /* notyet */
487 
488 /**
489  * ttm_pool_alloc - Fill a ttm_tt object
490  *
491  * @pool: ttm_pool to use
492  * @tt: ttm_tt object to fill
493  * @ctx: operation context
494  *
495  * Fill the ttm_tt object with pages and also make sure to DMA map them when
496  * necessary.
497  *
498  * Returns: 0 on successe, negative error code otherwise.
499  */
500 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
501 		   struct ttm_operation_ctx *ctx)
502 {
503 	unsigned long num_pages = tt->num_pages;
504 	dma_addr_t *dma_addr = tt->dma_address;
505 	struct vm_page **caching = tt->pages;
506 	struct vm_page **pages = tt->pages;
507 	unsigned long *orders = tt->orders;
508 	gfp_t gfp_flags = GFP_USER;
509 	unsigned int i, order;
510 	struct vm_page *p;
511 	int r;
512 
513 	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
514 #ifdef __linux__
515 	WARN_ON(dma_addr && !pool->dev);
516 #endif
517 
518 	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
519 		gfp_flags |= __GFP_ZERO;
520 
521 	if (ctx->gfp_retry_mayfail)
522 		gfp_flags |= __GFP_RETRY_MAYFAIL;
523 
524 	if (pool->use_dma32)
525 		gfp_flags |= GFP_DMA32;
526 	else
527 		gfp_flags |= GFP_HIGHUSER;
528 
529 	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
530 	     num_pages;
531 	     order = min_t(unsigned int, order, __fls(num_pages))) {
532 		bool apply_caching = false;
533 		struct ttm_pool_type *pt;
534 
535 		pt = ttm_pool_select_type(pool, tt->caching, order);
536 		p = pt ? ttm_pool_type_take(pt) : NULL;
537 		if (p) {
538 			apply_caching = true;
539 		} else {
540 			p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat);
541 			if (p && PageHighMem(p))
542 				apply_caching = true;
543 		}
544 
545 		if (!p) {
546 			if (order) {
547 				--order;
548 				continue;
549 			}
550 			r = -ENOMEM;
551 			goto error_free_all;
552 		}
553 
554 		if (apply_caching) {
555 			r = ttm_pool_apply_caching(caching, pages,
556 						   tt->caching);
557 			if (r)
558 				goto error_free_page;
559 			caching = pages + (1 << order);
560 		}
561 
562 		if (dma_addr) {
563 			r = ttm_pool_map(pool, order, p, &dma_addr);
564 			if (r)
565 				goto error_free_page;
566 		}
567 
568 		num_pages -= 1 << order;
569 		for (i = 1 << order; i; --i) {
570 			*(pages++) = p++;
571 			*(orders++) = order;
572 		}
573 	}
574 
575 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
576 	if (r)
577 		goto error_free_all;
578 
579 	return 0;
580 
581 error_free_page:
582 	ttm_pool_free_page(pool, tt->caching, order, p);
583 
584 error_free_all:
585 	num_pages = tt->num_pages - num_pages;
586 	for (i = 0; i < num_pages; ) {
587 		order = tt->orders[i];
588 		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
589 		i += 1 << order;
590 	}
591 
592 	return r;
593 }
594 EXPORT_SYMBOL(ttm_pool_alloc);
595 
596 /**
597  * ttm_pool_free - Free the backing pages from a ttm_tt object
598  *
599  * @pool: Pool to give pages back to.
600  * @tt: ttm_tt object to unpopulate
601  *
602  * Give the packing pages back to a pool or free them
603  */
604 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
605 {
606 	unsigned int i;
607 
608 	for (i = 0; i < tt->num_pages; ) {
609 		unsigned int order, num_pages;
610 		struct ttm_pool_type *pt;
611 
612 		order = tt->orders[i];
613 		num_pages = 1ULL << order;
614 		if (tt->dma_address)
615 			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
616 
617 		pt = ttm_pool_select_type(pool, tt->caching, order);
618 		if (pt)
619 			ttm_pool_type_give(pt, tt->pages[i]);
620 		else
621 			ttm_pool_free_page(pool, tt->caching, order,
622 					   tt->pages[i]);
623 
624 		i += num_pages;
625 	}
626 
627 	while (atomic_long_read(&allocated_pages) > page_pool_size)
628 		ttm_pool_shrink();
629 }
630 EXPORT_SYMBOL(ttm_pool_free);
631 
632 /**
633  * ttm_pool_init - Initialize a pool
634  *
635  * @pool: the pool to initialize
636  * @dev: device for DMA allocations and mappings
637  * @use_dma_alloc: true if coherent DMA alloc should be used
638  * @use_dma32: true if GFP_DMA32 should be used
639  *
640  * Initialize the pool and its pool types.
641  */
642 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
643 		   bool use_dma_alloc, bool use_dma32)
644 {
645 	unsigned int i, j;
646 
647 	WARN_ON(!dev && use_dma_alloc);
648 
649 	pool->dev = dev;
650 	pool->use_dma_alloc = use_dma_alloc;
651 	pool->use_dma32 = use_dma32;
652 
653 	if (use_dma_alloc) {
654 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
655 			for (j = 0; j < MAX_ORDER; ++j)
656 				ttm_pool_type_init(&pool->caching[i].orders[j],
657 						   pool, i, j);
658 	}
659 }
660 
661 /**
662  * ttm_pool_fini - Cleanup a pool
663  *
664  * @pool: the pool to clean up
665  *
666  * Free all pages in the pool and unregister the types from the global
667  * shrinker.
668  */
669 void ttm_pool_fini(struct ttm_pool *pool)
670 {
671 	unsigned int i, j;
672 
673 	if (pool->use_dma_alloc) {
674 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
675 			for (j = 0; j < MAX_ORDER; ++j)
676 				ttm_pool_type_fini(&pool->caching[i].orders[j]);
677 	}
678 
679 	/* We removed the pool types from the LRU, but we need to also make sure
680 	 * that no shrinker is concurrently freeing pages from the pool.
681 	 */
682 	synchronize_shrinkers();
683 }
684 
685 /* As long as pages are available make sure to release at least one */
686 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
687 					    struct shrink_control *sc)
688 {
689 	unsigned long num_freed = 0;
690 
691 	do
692 		num_freed += ttm_pool_shrink();
693 	while (!num_freed && atomic_long_read(&allocated_pages));
694 
695 	return num_freed;
696 }
697 
698 /* Return the number of pages available or SHRINK_EMPTY if we have none */
699 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
700 					     struct shrink_control *sc)
701 {
702 #ifdef notyet
703 	unsigned long num_pages = atomic_long_read(&allocated_pages);
704 
705 	return num_pages ? num_pages : SHRINK_EMPTY;
706 #else
707 	STUB();
708 	unsigned long num_pages = atomic_long_read(&allocated_pages);
709 
710 	return num_pages ? num_pages : 0;
711 #endif
712 }
713 
714 #ifdef CONFIG_DEBUG_FS
715 /* Count the number of pages available in a pool_type */
716 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
717 {
718 	unsigned int count = 0;
719 	struct ttm_pool_type_lru *entry;
720 
721 	spin_lock(&pt->lock);
722 	/* Only used for debugfs, the overhead doesn't matter */
723 	LIST_FOREACH(entry, &pt->lru, entries)
724 		++count;
725 	spin_unlock(&pt->lock);
726 
727 	return count;
728 }
729 
730 /* Print a nice header for the order */
731 static void ttm_pool_debugfs_header(struct seq_file *m)
732 {
733 	unsigned int i;
734 
735 	seq_puts(m, "\t ");
736 	for (i = 0; i < MAX_ORDER; ++i)
737 		seq_printf(m, " ---%2u---", i);
738 	seq_puts(m, "\n");
739 }
740 
741 /* Dump information about the different pool types */
742 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
743 				    struct seq_file *m)
744 {
745 	unsigned int i;
746 
747 	for (i = 0; i < MAX_ORDER; ++i)
748 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
749 	seq_puts(m, "\n");
750 }
751 
752 /* Dump the total amount of allocated pages */
753 static void ttm_pool_debugfs_footer(struct seq_file *m)
754 {
755 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
756 		   atomic_long_read(&allocated_pages), page_pool_size);
757 }
758 
759 /* Dump the information for the global pools */
760 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
761 {
762 	ttm_pool_debugfs_header(m);
763 
764 	spin_lock(&shrinker_lock);
765 	seq_puts(m, "wc\t:");
766 	ttm_pool_debugfs_orders(global_write_combined, m);
767 	seq_puts(m, "uc\t:");
768 	ttm_pool_debugfs_orders(global_uncached, m);
769 	seq_puts(m, "wc 32\t:");
770 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
771 	seq_puts(m, "uc 32\t:");
772 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
773 	spin_unlock(&shrinker_lock);
774 
775 	ttm_pool_debugfs_footer(m);
776 
777 	return 0;
778 }
779 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
780 
781 /**
782  * ttm_pool_debugfs - Debugfs dump function for a pool
783  *
784  * @pool: the pool to dump the information for
785  * @m: seq_file to dump to
786  *
787  * Make a debugfs dump with the per pool and global information.
788  */
789 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
790 {
791 	unsigned int i;
792 
793 	if (!pool->use_dma_alloc) {
794 		seq_puts(m, "unused\n");
795 		return 0;
796 	}
797 
798 	ttm_pool_debugfs_header(m);
799 
800 	spin_lock(&shrinker_lock);
801 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
802 		seq_puts(m, "DMA ");
803 		switch (i) {
804 		case ttm_cached:
805 			seq_puts(m, "\t:");
806 			break;
807 		case ttm_write_combined:
808 			seq_puts(m, "wc\t:");
809 			break;
810 		case ttm_uncached:
811 			seq_puts(m, "uc\t:");
812 			break;
813 		}
814 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
815 	}
816 	spin_unlock(&shrinker_lock);
817 
818 	ttm_pool_debugfs_footer(m);
819 	return 0;
820 }
821 EXPORT_SYMBOL(ttm_pool_debugfs);
822 
823 /* Test the shrinker functions and dump the result */
824 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
825 {
826 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
827 
828 	fs_reclaim_acquire(GFP_KERNEL);
829 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
830 		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
831 	fs_reclaim_release(GFP_KERNEL);
832 
833 	return 0;
834 }
835 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
836 
837 #endif
838 
839 /**
840  * ttm_pool_mgr_init - Initialize globals
841  *
842  * @num_pages: default number of pages
843  *
844  * Initialize the global locks and lists for the MM shrinker.
845  */
846 int ttm_pool_mgr_init(unsigned long num_pages)
847 {
848 	unsigned int i;
849 
850 	if (!page_pool_size)
851 		page_pool_size = num_pages;
852 
853 	mtx_init(&shrinker_lock, IPL_NONE);
854 	INIT_LIST_HEAD(&shrinker_list);
855 
856 	for (i = 0; i < MAX_ORDER; ++i) {
857 		ttm_pool_type_init(&global_write_combined[i], NULL,
858 				   ttm_write_combined, i);
859 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
860 
861 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
862 				   ttm_write_combined, i);
863 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
864 				   ttm_uncached, i);
865 	}
866 
867 #ifdef CONFIG_DEBUG_FS
868 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
869 			    &ttm_pool_debugfs_globals_fops);
870 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
871 			    &ttm_pool_debugfs_shrink_fops);
872 #endif
873 
874 	mm_shrinker.count_objects = ttm_pool_shrinker_count;
875 	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
876 	mm_shrinker.seeks = 1;
877 	return register_shrinker(&mm_shrinker, "drm-ttm_pool");
878 }
879 
880 /**
881  * ttm_pool_mgr_fini - Finalize globals
882  *
883  * Cleanup the global pools and unregister the MM shrinker.
884  */
885 void ttm_pool_mgr_fini(void)
886 {
887 	unsigned int i;
888 
889 	for (i = 0; i < MAX_ORDER; ++i) {
890 		ttm_pool_type_fini(&global_write_combined[i]);
891 		ttm_pool_type_fini(&global_uncached[i]);
892 
893 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
894 		ttm_pool_type_fini(&global_dma32_uncached[i]);
895 	}
896 
897 	unregister_shrinker(&mm_shrinker);
898 	WARN_ON(!list_empty(&shrinker_list));
899 }
900