xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_pool.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/highmem.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43 
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_tt.h>
47 #include <drm/drm_legacy.h>
48 
49 #include "ttm_module.h"
50 
51 /**
52  * struct ttm_pool_dma - Helper object for coherent DMA mappings
53  *
54  * @addr: original DMA address returned for the mapping
55  * @vaddr: original vaddr return for the mapping and order in the lower bits
56  */
57 struct ttm_pool_dma {
58 	dma_addr_t addr;
59 	unsigned long vaddr;
60 	bus_dma_tag_t dmat;
61 	bus_dmamap_t map;
62 	bus_dma_segment_t seg;
63 };
64 
65 static unsigned long page_pool_size;
66 
67 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
68 module_param(page_pool_size, ulong, 0644);
69 
70 static atomic_long_t allocated_pages;
71 
72 static struct ttm_pool_type global_write_combined[MAX_ORDER];
73 static struct ttm_pool_type global_uncached[MAX_ORDER];
74 
75 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
76 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
77 
78 static spinlock_t shrinker_lock;
79 static struct list_head shrinker_list;
80 static struct shrinker mm_shrinker;
81 
82 #ifdef __linux__
83 
84 /* Allocate pages of size 1 << order with the given gfp_flags */
85 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
86 					unsigned int order)
87 {
88 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
89 	struct ttm_pool_dma *dma;
90 	struct page *p;
91 	void *vaddr;
92 
93 	/* Don't set the __GFP_COMP flag for higher order allocations.
94 	 * Mapping pages directly into an userspace process and calling
95 	 * put_page() on a TTM allocated page is illegal.
96 	 */
97 	if (order)
98 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
99 			__GFP_KSWAPD_RECLAIM;
100 
101 	if (!pool->use_dma_alloc) {
102 		p = alloc_pages(gfp_flags, order);
103 		if (p)
104 			p->private = order;
105 
106 		return p;
107 	}
108 
109 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
110 	if (!dma)
111 		return NULL;
112 
113 	if (order)
114 		attr |= DMA_ATTR_NO_WARN;
115 
116 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
117 				&dma->addr, gfp_flags, attr);
118 	if (!vaddr)
119 		goto error_free;
120 
121 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
122 	 * TTM page fault handling and extend the DMA API to clean this up.
123 	 */
124 	if (is_vmalloc_addr(vaddr))
125 		p = vmalloc_to_page(vaddr);
126 	else
127 		p = virt_to_page(vaddr);
128 
129 	dma->vaddr = (unsigned long)vaddr | order;
130 	p->private = (unsigned long)dma;
131 	return p;
132 
133 error_free:
134 	kfree(dma);
135 	return NULL;
136 }
137 
138 /* Reset the caching and pages of size 1 << order */
139 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
140 			       unsigned int order, struct page *p)
141 {
142 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
143 	struct ttm_pool_dma *dma;
144 	void *vaddr;
145 
146 #ifdef CONFIG_X86
147 	/* We don't care that set_pages_wb is inefficient here. This is only
148 	 * used when we have to shrink and CPU overhead is irrelevant then.
149 	 */
150 	if (caching != ttm_cached && !PageHighMem(p))
151 		set_pages_wb(p, 1 << order);
152 #endif
153 
154 	if (!pool || !pool->use_dma_alloc) {
155 		__free_pages(p, order);
156 		return;
157 	}
158 
159 	if (order)
160 		attr |= DMA_ATTR_NO_WARN;
161 
162 	dma = (void *)p->private;
163 	vaddr = (void *)(dma->vaddr & PAGE_MASK);
164 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
165 		       attr);
166 	kfree(dma);
167 }
168 
169 #else
170 
171 static struct vm_page *ttm_pool_alloc_page(struct ttm_pool *pool,
172 					   gfp_t gfp_flags, unsigned int order,
173 					   bus_dma_tag_t dmat)
174 {
175 	struct ttm_pool_dma *dma;
176 	struct vm_page *p;
177 	struct uvm_constraint_range *constraint = &no_constraint;
178 	int flags = (gfp_flags & M_NOWAIT) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
179 	int dmaflags = BUS_DMA_64BIT;
180 	int nsegs;
181 
182 	if (pool->use_dma32) {
183 		constraint = &dma_constraint;
184 		dmaflags &= ~BUS_DMA_64BIT;
185 	}
186 
187 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
188 	if (!dma)
189 		return NULL;
190 
191 	if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1,
192 	    (1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map))
193 		goto error_free;
194 #ifdef bus_dmamem_alloc_range
195 	if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE,
196 	    PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO,
197 	    constraint->ucr_low, constraint->ucr_high)) {
198 		bus_dmamap_destroy(dmat, dma->map);
199 		goto error_free;
200 	}
201 #else
202 	if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE,
203 	    PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO)) {
204 		bus_dmamap_destroy(dmat, dma->map);
205 		goto error_free;
206 	}
207 #endif
208 	if (bus_dmamap_load_raw(dmat, dma->map, &dma->seg, 1,
209 	    (1ULL << order) * PAGE_SIZE, flags)) {
210 		bus_dmamem_free(dmat, &dma->seg, 1);
211 		bus_dmamap_destroy(dmat, dma->map);
212 		goto error_free;
213 	}
214 	dma->dmat = dmat;
215 	dma->addr = dma->map->dm_segs[0].ds_addr;
216 
217 #ifndef __sparc64__
218 	p = PHYS_TO_VM_PAGE(dma->seg.ds_addr);
219 #else
220 	p = TAILQ_FIRST((struct pglist *)dma->seg._ds_mlist);
221 #endif
222 
223 	p->objt.rbt_parent = (struct rb_entry *)dma;
224 	return p;
225 
226 error_free:
227 	kfree(dma);
228 	return NULL;
229 }
230 
231 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
232 			       unsigned int order, struct vm_page *p)
233 {
234 	struct ttm_pool_dma *dma;
235 
236 #ifdef CONFIG_X86
237 	/* We don't care that set_pages_wb is inefficient here. This is only
238 	 * used when we have to shrink and CPU overhead is irrelevant then.
239 	 */
240 	if (caching != ttm_cached && !PageHighMem(p))
241 		set_pages_wb(p, 1 << order);
242 #endif
243 
244 	dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
245 	bus_dmamap_unload(dma->dmat, dma->map);
246 	bus_dmamem_free(dma->dmat, &dma->seg, 1);
247 	bus_dmamap_destroy(dma->dmat, dma->map);
248 	kfree(dma);
249 }
250 
251 #endif
252 
253 /* Apply a new caching to an array of pages */
254 static int ttm_pool_apply_caching(struct vm_page **first, struct vm_page **last,
255 				  enum ttm_caching caching)
256 {
257 #ifdef CONFIG_X86
258 	unsigned int num_pages = last - first;
259 
260 	if (!num_pages)
261 		return 0;
262 
263 	switch (caching) {
264 	case ttm_cached:
265 		break;
266 	case ttm_write_combined:
267 		return set_pages_array_wc(first, num_pages);
268 	case ttm_uncached:
269 		return set_pages_array_uc(first, num_pages);
270 	}
271 #endif
272 	return 0;
273 }
274 
275 #ifdef __linux__
276 
277 /* Map pages of 1 << order size and fill the DMA address array  */
278 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
279 			struct vm_page *p, dma_addr_t **dma_addr)
280 {
281 	dma_addr_t addr;
282 	unsigned int i;
283 
284 	if (pool->use_dma_alloc) {
285 		struct ttm_pool_dma *dma = (void *)p->private;
286 
287 		addr = dma->addr;
288 	} else {
289 		size_t size = (1ULL << order) * PAGE_SIZE;
290 
291 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
292 		if (dma_mapping_error(pool->dev, addr))
293 			return -EFAULT;
294 	}
295 
296 	for (i = 1 << order; i ; --i) {
297 		*(*dma_addr)++ = addr;
298 		addr += PAGE_SIZE;
299 	}
300 
301 	return 0;
302 }
303 
304 /* Unmap pages of 1 << order size */
305 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
306 			   unsigned int num_pages)
307 {
308 	/* Unmapped while freeing the page */
309 	if (pool->use_dma_alloc)
310 		return;
311 
312 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
313 		       DMA_BIDIRECTIONAL);
314 }
315 
316 #else
317 
318 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
319 			struct vm_page *p, dma_addr_t **dma_addr)
320 {
321 	struct ttm_pool_dma *dma;
322 	dma_addr_t addr;
323 	unsigned int i;
324 
325 	dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
326 	addr = dma->addr;
327 
328 	for (i = 1 << order; i ; --i) {
329 		*(*dma_addr)++ = addr;
330 		addr += PAGE_SIZE;
331 	}
332 
333 	return 0;
334 }
335 
336 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
337 			   unsigned int num_pages)
338 {
339 }
340 
341 #endif
342 
343 /* Give pages into a specific pool_type */
344 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct vm_page *p)
345 {
346 	unsigned int i, num_pages = 1 << pt->order;
347 	struct ttm_pool_type_lru *entry;
348 
349 	for (i = 0; i < num_pages; ++i) {
350 #ifdef notyet
351 		if (PageHighMem(p))
352 			clear_highpage(p + i);
353 		else
354 #endif
355 			pmap_zero_page(p + i);
356 	}
357 
358 	entry = malloc(sizeof(struct ttm_pool_type_lru), M_DRM, M_WAITOK);
359 	entry->pg = p;
360 	spin_lock(&pt->lock);
361 	LIST_INSERT_HEAD(&pt->lru, entry, entries);
362 	spin_unlock(&pt->lock);
363 	atomic_long_add(1 << pt->order, &allocated_pages);
364 }
365 
366 /* Take pages from a specific pool_type, return NULL when nothing available */
367 static struct vm_page *ttm_pool_type_take(struct ttm_pool_type *pt)
368 {
369 	struct vm_page *p = NULL;
370 	struct ttm_pool_type_lru *entry;
371 
372 	spin_lock(&pt->lock);
373 	if (!LIST_EMPTY(&pt->lru)) {
374 		entry = LIST_FIRST(&pt->lru);
375 		p = entry->pg;
376 		atomic_long_sub(1 << pt->order, &allocated_pages);
377 		LIST_REMOVE(entry, entries);
378 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
379 	}
380 	spin_unlock(&pt->lock);
381 
382 	return p;
383 }
384 
385 /* Initialize and add a pool type to the global shrinker list */
386 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
387 			       enum ttm_caching caching, unsigned int order)
388 {
389 	pt->pool = pool;
390 	pt->caching = caching;
391 	pt->order = order;
392 	mtx_init(&pt->lock, IPL_NONE);
393 	INIT_LIST_HEAD(&pt->pages);
394 	LIST_INIT(&pt->lru);
395 
396 	spin_lock(&shrinker_lock);
397 	list_add_tail(&pt->shrinker_list, &shrinker_list);
398 	spin_unlock(&shrinker_lock);
399 }
400 
401 /* Remove a pool_type from the global shrinker list and free all pages */
402 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
403 {
404 	struct vm_page *p;
405 	struct ttm_pool_type_lru *entry;
406 
407 	spin_lock(&shrinker_lock);
408 	list_del(&pt->shrinker_list);
409 	spin_unlock(&shrinker_lock);
410 
411 	while ((p = ttm_pool_type_take(pt)))
412 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
413 
414 	while (!LIST_EMPTY(&pt->lru)) {
415 		entry = LIST_FIRST(&pt->lru);
416 		LIST_REMOVE(entry, entries);
417 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
418 	}
419 }
420 
421 /* Return the pool_type to use for the given caching and order */
422 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
423 						  enum ttm_caching caching,
424 						  unsigned int order)
425 {
426 	if (pool->use_dma_alloc)
427 		return &pool->caching[caching].orders[order];
428 
429 #ifdef CONFIG_X86
430 	switch (caching) {
431 	case ttm_write_combined:
432 		if (pool->use_dma32)
433 			return &global_dma32_write_combined[order];
434 
435 		return &global_write_combined[order];
436 	case ttm_uncached:
437 		if (pool->use_dma32)
438 			return &global_dma32_uncached[order];
439 
440 		return &global_uncached[order];
441 	default:
442 		break;
443 	}
444 #endif
445 
446 	return NULL;
447 }
448 
449 /* Free pages using the global shrinker list */
450 static unsigned int ttm_pool_shrink(void)
451 {
452 	struct ttm_pool_type *pt;
453 	unsigned int num_pages;
454 	struct vm_page *p;
455 
456 	spin_lock(&shrinker_lock);
457 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
458 	list_move_tail(&pt->shrinker_list, &shrinker_list);
459 	spin_unlock(&shrinker_lock);
460 
461 	p = ttm_pool_type_take(pt);
462 	if (p) {
463 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
464 		num_pages = 1 << pt->order;
465 	} else {
466 		num_pages = 0;
467 	}
468 
469 	return num_pages;
470 }
471 
472 #ifdef notyet
473 
474 /* Return the allocation order based for a page */
475 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p)
476 {
477 	if (pool->use_dma_alloc) {
478 		struct ttm_pool_dma *dma = (void *)p->private;
479 
480 		return dma->vaddr & ~LINUX_PAGE_MASK;
481 	}
482 
483 	return p->private;
484 }
485 
486 #endif /* notyet */
487 
488 /* Called when we got a page, either from a pool or newly allocated */
489 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
490 				   struct vm_page *p, dma_addr_t **dma_addr,
491 				   unsigned long *num_pages,
492 				   struct vm_page ***pages,
493 				   unsigned long **orders)
494 {
495 	unsigned int i;
496 	int r;
497 
498 	if (*dma_addr) {
499 		r = ttm_pool_map(pool, order, p, dma_addr);
500 		if (r)
501 			return r;
502 	}
503 
504 	*num_pages -= 1 << order;
505 	for (i = 1 << order; i; --i, ++(*pages), ++p, ++(*orders)) {
506 		**pages = p;
507 		**orders = order;
508 	}
509 
510 	return 0;
511 }
512 
513 /**
514  * ttm_pool_free_range() - Free a range of TTM pages
515  * @pool: The pool used for allocating.
516  * @tt: The struct ttm_tt holding the page pointers.
517  * @caching: The page caching mode used by the range.
518  * @start_page: index for first page to free.
519  * @end_page: index for last page to free + 1.
520  *
521  * During allocation the ttm_tt page-vector may be populated with ranges of
522  * pages with different attributes if allocation hit an error without being
523  * able to completely fulfill the allocation. This function can be used
524  * to free these individual ranges.
525  */
526 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
527 				enum ttm_caching caching,
528 				pgoff_t start_page, pgoff_t end_page)
529 {
530 	struct vm_page **pages = tt->pages;
531 	unsigned int order;
532 	pgoff_t i, nr;
533 
534 	for (i = start_page; i < end_page; i += nr, pages += nr) {
535 		struct ttm_pool_type *pt = NULL;
536 
537 		order = tt->orders[i];
538 		nr = (1UL << order);
539 		if (tt->dma_address)
540 			ttm_pool_unmap(pool, tt->dma_address[i], nr);
541 
542 		pt = ttm_pool_select_type(pool, caching, order);
543 		if (pt)
544 			ttm_pool_type_give(pt, *pages);
545 		else
546 			ttm_pool_free_page(pool, caching, order, *pages);
547 	}
548 }
549 
550 /**
551  * ttm_pool_alloc - Fill a ttm_tt object
552  *
553  * @pool: ttm_pool to use
554  * @tt: ttm_tt object to fill
555  * @ctx: operation context
556  *
557  * Fill the ttm_tt object with pages and also make sure to DMA map them when
558  * necessary.
559  *
560  * Returns: 0 on successe, negative error code otherwise.
561  */
562 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
563 		   struct ttm_operation_ctx *ctx)
564 {
565 	pgoff_t num_pages = tt->num_pages;
566 	dma_addr_t *dma_addr = tt->dma_address;
567 	struct vm_page **caching = tt->pages;
568 	struct vm_page **pages = tt->pages;
569 	enum ttm_caching page_caching;
570 	gfp_t gfp_flags = GFP_USER;
571 	pgoff_t caching_divide;
572 	unsigned int order;
573 	struct vm_page *p;
574 	int r;
575 	unsigned long *orders = tt->orders;
576 
577 	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
578 #ifdef __linux__
579 	WARN_ON(dma_addr && !pool->dev);
580 #endif
581 
582 	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
583 		gfp_flags |= __GFP_ZERO;
584 
585 	if (ctx->gfp_retry_mayfail)
586 		gfp_flags |= __GFP_RETRY_MAYFAIL;
587 
588 	if (pool->use_dma32)
589 		gfp_flags |= GFP_DMA32;
590 	else
591 		gfp_flags |= GFP_HIGHUSER;
592 
593 	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
594 	     num_pages;
595 	     order = min_t(unsigned int, order, __fls(num_pages))) {
596 		struct ttm_pool_type *pt;
597 
598 		page_caching = tt->caching;
599 		pt = ttm_pool_select_type(pool, tt->caching, order);
600 		p = pt ? ttm_pool_type_take(pt) : NULL;
601 		if (p) {
602 			r = ttm_pool_apply_caching(caching, pages,
603 						   tt->caching);
604 			if (r)
605 				goto error_free_page;
606 
607 			caching = pages;
608 			do {
609 				r = ttm_pool_page_allocated(pool, order, p,
610 							    &dma_addr,
611 							    &num_pages,
612 							    &pages, &orders);
613 				if (r)
614 					goto error_free_page;
615 
616 				caching = pages;
617 				if (num_pages < (1 << order))
618 					break;
619 
620 				p = ttm_pool_type_take(pt);
621 			} while (p);
622 		}
623 
624 		page_caching = ttm_cached;
625 		while (num_pages >= (1 << order) &&
626 		       (p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat))) {
627 
628 			if (PageHighMem(p)) {
629 				r = ttm_pool_apply_caching(caching, pages,
630 							   tt->caching);
631 				if (r)
632 					goto error_free_page;
633 				caching = pages;
634 			}
635 			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
636 						    &num_pages, &pages, &orders);
637 			if (r)
638 				goto error_free_page;
639 			if (PageHighMem(p))
640 				caching = pages;
641 		}
642 
643 		if (!p) {
644 			if (order) {
645 				--order;
646 				continue;
647 			}
648 			r = -ENOMEM;
649 			goto error_free_all;
650 		}
651 	}
652 
653 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
654 	if (r)
655 		goto error_free_all;
656 
657 	return 0;
658 
659 error_free_page:
660 	ttm_pool_free_page(pool, page_caching, order, p);
661 
662 error_free_all:
663 	num_pages = tt->num_pages - num_pages;
664 	caching_divide = caching - tt->pages;
665 	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
666 	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
667 
668 	return r;
669 }
670 EXPORT_SYMBOL(ttm_pool_alloc);
671 
672 /**
673  * ttm_pool_free - Free the backing pages from a ttm_tt object
674  *
675  * @pool: Pool to give pages back to.
676  * @tt: ttm_tt object to unpopulate
677  *
678  * Give the packing pages back to a pool or free them
679  */
680 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
681 {
682 	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
683 
684 	while (atomic_long_read(&allocated_pages) > page_pool_size)
685 		ttm_pool_shrink();
686 }
687 EXPORT_SYMBOL(ttm_pool_free);
688 
689 /**
690  * ttm_pool_init - Initialize a pool
691  *
692  * @pool: the pool to initialize
693  * @dev: device for DMA allocations and mappings
694  * @use_dma_alloc: true if coherent DMA alloc should be used
695  * @use_dma32: true if GFP_DMA32 should be used
696  *
697  * Initialize the pool and its pool types.
698  */
699 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
700 		   bool use_dma_alloc, bool use_dma32)
701 {
702 	unsigned int i, j;
703 
704 	WARN_ON(!dev && use_dma_alloc);
705 
706 	pool->dev = dev;
707 	pool->use_dma_alloc = use_dma_alloc;
708 	pool->use_dma32 = use_dma32;
709 
710 	if (use_dma_alloc) {
711 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
712 			for (j = 0; j < MAX_ORDER; ++j)
713 				ttm_pool_type_init(&pool->caching[i].orders[j],
714 						   pool, i, j);
715 	}
716 }
717 
718 /**
719  * ttm_pool_fini - Cleanup a pool
720  *
721  * @pool: the pool to clean up
722  *
723  * Free all pages in the pool and unregister the types from the global
724  * shrinker.
725  */
726 void ttm_pool_fini(struct ttm_pool *pool)
727 {
728 	unsigned int i, j;
729 
730 	if (pool->use_dma_alloc) {
731 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
732 			for (j = 0; j < MAX_ORDER; ++j)
733 				ttm_pool_type_fini(&pool->caching[i].orders[j]);
734 	}
735 
736 	/* We removed the pool types from the LRU, but we need to also make sure
737 	 * that no shrinker is concurrently freeing pages from the pool.
738 	 */
739 	synchronize_shrinkers();
740 }
741 
742 /* As long as pages are available make sure to release at least one */
743 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
744 					    struct shrink_control *sc)
745 {
746 	unsigned long num_freed = 0;
747 
748 	do
749 		num_freed += ttm_pool_shrink();
750 	while (!num_freed && atomic_long_read(&allocated_pages));
751 
752 	return num_freed;
753 }
754 
755 /* Return the number of pages available or SHRINK_EMPTY if we have none */
756 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
757 					     struct shrink_control *sc)
758 {
759 #ifdef notyet
760 	unsigned long num_pages = atomic_long_read(&allocated_pages);
761 
762 	return num_pages ? num_pages : SHRINK_EMPTY;
763 #else
764 	STUB();
765 	unsigned long num_pages = atomic_long_read(&allocated_pages);
766 
767 	return num_pages ? num_pages : 0;
768 #endif
769 }
770 
771 #ifdef CONFIG_DEBUG_FS
772 /* Count the number of pages available in a pool_type */
773 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
774 {
775 	unsigned int count = 0;
776 	struct ttm_pool_type_lru *entry;
777 
778 	spin_lock(&pt->lock);
779 	/* Only used for debugfs, the overhead doesn't matter */
780 	LIST_FOREACH(entry, &pt->lru, entries)
781 		++count;
782 	spin_unlock(&pt->lock);
783 
784 	return count;
785 }
786 
787 /* Print a nice header for the order */
788 static void ttm_pool_debugfs_header(struct seq_file *m)
789 {
790 	unsigned int i;
791 
792 	seq_puts(m, "\t ");
793 	for (i = 0; i < MAX_ORDER; ++i)
794 		seq_printf(m, " ---%2u---", i);
795 	seq_puts(m, "\n");
796 }
797 
798 /* Dump information about the different pool types */
799 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
800 				    struct seq_file *m)
801 {
802 	unsigned int i;
803 
804 	for (i = 0; i < MAX_ORDER; ++i)
805 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
806 	seq_puts(m, "\n");
807 }
808 
809 /* Dump the total amount of allocated pages */
810 static void ttm_pool_debugfs_footer(struct seq_file *m)
811 {
812 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
813 		   atomic_long_read(&allocated_pages), page_pool_size);
814 }
815 
816 /* Dump the information for the global pools */
817 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
818 {
819 	ttm_pool_debugfs_header(m);
820 
821 	spin_lock(&shrinker_lock);
822 	seq_puts(m, "wc\t:");
823 	ttm_pool_debugfs_orders(global_write_combined, m);
824 	seq_puts(m, "uc\t:");
825 	ttm_pool_debugfs_orders(global_uncached, m);
826 	seq_puts(m, "wc 32\t:");
827 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
828 	seq_puts(m, "uc 32\t:");
829 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
830 	spin_unlock(&shrinker_lock);
831 
832 	ttm_pool_debugfs_footer(m);
833 
834 	return 0;
835 }
836 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
837 
838 /**
839  * ttm_pool_debugfs - Debugfs dump function for a pool
840  *
841  * @pool: the pool to dump the information for
842  * @m: seq_file to dump to
843  *
844  * Make a debugfs dump with the per pool and global information.
845  */
846 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
847 {
848 	unsigned int i;
849 
850 	if (!pool->use_dma_alloc) {
851 		seq_puts(m, "unused\n");
852 		return 0;
853 	}
854 
855 	ttm_pool_debugfs_header(m);
856 
857 	spin_lock(&shrinker_lock);
858 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
859 		seq_puts(m, "DMA ");
860 		switch (i) {
861 		case ttm_cached:
862 			seq_puts(m, "\t:");
863 			break;
864 		case ttm_write_combined:
865 			seq_puts(m, "wc\t:");
866 			break;
867 		case ttm_uncached:
868 			seq_puts(m, "uc\t:");
869 			break;
870 		}
871 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
872 	}
873 	spin_unlock(&shrinker_lock);
874 
875 	ttm_pool_debugfs_footer(m);
876 	return 0;
877 }
878 EXPORT_SYMBOL(ttm_pool_debugfs);
879 
880 /* Test the shrinker functions and dump the result */
881 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
882 {
883 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
884 
885 	fs_reclaim_acquire(GFP_KERNEL);
886 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
887 		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
888 	fs_reclaim_release(GFP_KERNEL);
889 
890 	return 0;
891 }
892 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
893 
894 #endif
895 
896 /**
897  * ttm_pool_mgr_init - Initialize globals
898  *
899  * @num_pages: default number of pages
900  *
901  * Initialize the global locks and lists for the MM shrinker.
902  */
903 int ttm_pool_mgr_init(unsigned long num_pages)
904 {
905 	unsigned int i;
906 
907 	if (!page_pool_size)
908 		page_pool_size = num_pages;
909 
910 	mtx_init(&shrinker_lock, IPL_NONE);
911 	INIT_LIST_HEAD(&shrinker_list);
912 
913 	for (i = 0; i < MAX_ORDER; ++i) {
914 		ttm_pool_type_init(&global_write_combined[i], NULL,
915 				   ttm_write_combined, i);
916 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
917 
918 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
919 				   ttm_write_combined, i);
920 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
921 				   ttm_uncached, i);
922 	}
923 
924 #ifdef CONFIG_DEBUG_FS
925 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
926 			    &ttm_pool_debugfs_globals_fops);
927 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
928 			    &ttm_pool_debugfs_shrink_fops);
929 #endif
930 
931 	mm_shrinker.count_objects = ttm_pool_shrinker_count;
932 	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
933 	mm_shrinker.seeks = 1;
934 	return register_shrinker(&mm_shrinker, "drm-ttm_pool");
935 }
936 
937 /**
938  * ttm_pool_mgr_fini - Finalize globals
939  *
940  * Cleanup the global pools and unregister the MM shrinker.
941  */
942 void ttm_pool_mgr_fini(void)
943 {
944 	unsigned int i;
945 
946 	for (i = 0; i < MAX_ORDER; ++i) {
947 		ttm_pool_type_fini(&global_write_combined[i]);
948 		ttm_pool_type_fini(&global_uncached[i]);
949 
950 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
951 		ttm_pool_type_fini(&global_dma32_uncached[i]);
952 	}
953 
954 	unregister_shrinker(&mm_shrinker);
955 	WARN_ON(!list_empty(&shrinker_list));
956 }
957