xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/ttm/ttm_page_alloc_dma.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: ttm_page_alloc_dma.c,v 1.3 2021/12/18 23:45:44 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2011 (c) Oracle Corp.
5 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  *
25  * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
26  */
27 
28 /*
29  * A simple DMA pool losely based on dmapool.c. It has certain advantages
30  * over the DMA pools:
31  * - Pool collects resently freed pages for reuse (and hooks up to
32  *   the shrinker).
33  * - Tracks currently in use pages
34  * - Tracks whether the page is UC, WB or cached (and reverts to WB
35  *   when freed).
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ttm_page_alloc_dma.c,v 1.3 2021/12/18 23:45:44 riastradh Exp $");
40 
41 #define pr_fmt(fmt) "[TTM] " fmt
42 
43 #include <linux/dma-mapping.h>
44 #include <linux/list.h>
45 #include <linux/seq_file.h> /* for seq_printf */
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
48 #include <linux/highmem.h>
49 #include <linux/mm_types.h>
50 #include <linux/module.h>
51 #include <linux/mm.h>
52 #include <linux/atomic.h>
53 #include <linux/device.h>
54 #include <linux/kthread.h>
55 #include <drm/ttm/ttm_bo_driver.h>
56 #include <drm/ttm/ttm_page_alloc.h>
57 #include <drm/ttm/ttm_set_memory.h>
58 
59 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
60 #define SMALL_ALLOCATION		4
61 #define FREE_ALL_PAGES			(~0U)
62 #define VADDR_FLAG_HUGE_POOL		1UL
63 #define VADDR_FLAG_UPDATED_COUNT	2UL
64 
65 enum pool_type {
66 	IS_UNDEFINED	= 0,
67 	IS_WC		= 1 << 1,
68 	IS_UC		= 1 << 2,
69 	IS_CACHED	= 1 << 3,
70 	IS_DMA32	= 1 << 4,
71 	IS_HUGE		= 1 << 5
72 };
73 
74 /*
75  * The pool structure. There are up to nine pools:
76  *  - generic (not restricted to DMA32):
77  *      - write combined, uncached, cached.
78  *  - dma32 (up to 2^32 - so up 4GB):
79  *      - write combined, uncached, cached.
80  *  - huge (not restricted to DMA32):
81  *      - write combined, uncached, cached.
82  * for each 'struct device'. The 'cached' is for pages that are actively used.
83  * The other ones can be shrunk by the shrinker API if neccessary.
84  * @pools: The 'struct device->dma_pools' link.
85  * @type: Type of the pool
86  * @lock: Protects the free_list from concurrnet access. Must be
87  * used with irqsave/irqrestore variants because pool allocator maybe called
88  * from delayed work.
89  * @free_list: Pool of pages that are free to be used. No order requirements.
90  * @dev: The device that is associated with these pools.
91  * @size: Size used during DMA allocation.
92  * @npages_free: Count of available pages for re-use.
93  * @npages_in_use: Count of pages that are in use.
94  * @nfrees: Stats when pool is shrinking.
95  * @nrefills: Stats when the pool is grown.
96  * @gfp_flags: Flags to pass for alloc_page.
97  * @name: Name of the pool.
98  * @dev_name: Name derieved from dev - similar to how dev_info works.
99  *   Used during shutdown as the dev_info during release is unavailable.
100  */
101 struct dma_pool {
102 	struct list_head pools; /* The 'struct device->dma_pools link */
103 	enum pool_type type;
104 	spinlock_t lock;
105 	struct list_head free_list;
106 	struct device *dev;
107 	unsigned size;
108 	unsigned npages_free;
109 	unsigned npages_in_use;
110 	unsigned long nfrees; /* Stats when shrunk. */
111 	unsigned long nrefills; /* Stats when grown. */
112 	gfp_t gfp_flags;
113 	char name[13]; /* "cached dma32" */
114 	char dev_name[64]; /* Constructed from dev */
115 };
116 
117 /*
118  * The accounting page keeping track of the allocated page along with
119  * the DMA address.
120  * @page_list: The link to the 'page_list' in 'struct dma_pool'.
121  * @vaddr: The virtual address of the page and a flag if the page belongs to a
122  * huge pool
123  * @dma: The bus address of the page. If the page is not allocated
124  *   via the DMA API, it will be -1.
125  */
126 struct dma_page {
127 	struct list_head page_list;
128 	unsigned long vaddr;
129 	struct page *p;
130 	dma_addr_t dma;
131 };
132 
133 /*
134  * Limits for the pool. They are handled without locks because only place where
135  * they may change is in sysfs store. They won't have immediate effect anyway
136  * so forcing serialization to access them is pointless.
137  */
138 
139 struct ttm_pool_opts {
140 	unsigned	alloc_size;
141 	unsigned	max_size;
142 	unsigned	small;
143 };
144 
145 /*
146  * Contains the list of all of the 'struct device' and their corresponding
147  * DMA pools. Guarded by _mutex->lock.
148  * @pools: The link to 'struct ttm_pool_manager->pools'
149  * @dev: The 'struct device' associated with the 'pool'
150  * @pool: The 'struct dma_pool' associated with the 'dev'
151  */
152 struct device_pools {
153 	struct list_head pools;
154 	struct device *dev;
155 	struct dma_pool *pool;
156 };
157 
158 /*
159  * struct ttm_pool_manager - Holds memory pools for fast allocation
160  *
161  * @lock: Lock used when adding/removing from pools
162  * @pools: List of 'struct device' and 'struct dma_pool' tuples.
163  * @options: Limits for the pool.
164  * @npools: Total amount of pools in existence.
165  * @shrinker: The structure used by [un|]register_shrinker
166  */
167 struct ttm_pool_manager {
168 	struct mutex		lock;
169 	struct list_head	pools;
170 	struct ttm_pool_opts	options;
171 	unsigned		npools;
172 	struct shrinker		mm_shrink;
173 	struct kobject		kobj;
174 };
175 
176 static struct ttm_pool_manager *_manager;
177 
178 static struct attribute ttm_page_pool_max = {
179 	.name = "pool_max_size",
180 	.mode = S_IRUGO | S_IWUSR
181 };
182 static struct attribute ttm_page_pool_small = {
183 	.name = "pool_small_allocation",
184 	.mode = S_IRUGO | S_IWUSR
185 };
186 static struct attribute ttm_page_pool_alloc_size = {
187 	.name = "pool_allocation_size",
188 	.mode = S_IRUGO | S_IWUSR
189 };
190 
191 static struct attribute *ttm_pool_attrs[] = {
192 	&ttm_page_pool_max,
193 	&ttm_page_pool_small,
194 	&ttm_page_pool_alloc_size,
195 	NULL
196 };
197 
ttm_pool_kobj_release(struct kobject * kobj)198 static void ttm_pool_kobj_release(struct kobject *kobj)
199 {
200 	struct ttm_pool_manager *m =
201 		container_of(kobj, struct ttm_pool_manager, kobj);
202 	kfree(m);
203 }
204 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)205 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
206 			      const char *buffer, size_t size)
207 {
208 	struct ttm_pool_manager *m =
209 		container_of(kobj, struct ttm_pool_manager, kobj);
210 	int chars;
211 	unsigned val;
212 
213 	chars = sscanf(buffer, "%u", &val);
214 	if (chars == 0)
215 		return size;
216 
217 	/* Convert kb to number of pages */
218 	val = val / (PAGE_SIZE >> 10);
219 
220 	if (attr == &ttm_page_pool_max) {
221 		m->options.max_size = val;
222 	} else if (attr == &ttm_page_pool_small) {
223 		m->options.small = val;
224 	} else if (attr == &ttm_page_pool_alloc_size) {
225 		if (val > NUM_PAGES_TO_ALLOC*8) {
226 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
227 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
228 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
229 			return size;
230 		} else if (val > NUM_PAGES_TO_ALLOC) {
231 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
232 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
233 		}
234 		m->options.alloc_size = val;
235 	}
236 
237 	return size;
238 }
239 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)240 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
241 			     char *buffer)
242 {
243 	struct ttm_pool_manager *m =
244 		container_of(kobj, struct ttm_pool_manager, kobj);
245 	unsigned val = 0;
246 
247 	if (attr == &ttm_page_pool_max)
248 		val = m->options.max_size;
249 	else if (attr == &ttm_page_pool_small)
250 		val = m->options.small;
251 	else if (attr == &ttm_page_pool_alloc_size)
252 		val = m->options.alloc_size;
253 
254 	val = val * (PAGE_SIZE >> 10);
255 
256 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
257 }
258 
259 static const struct sysfs_ops ttm_pool_sysfs_ops = {
260 	.show = &ttm_pool_show,
261 	.store = &ttm_pool_store,
262 };
263 
264 static struct kobj_type ttm_pool_kobj_type = {
265 	.release = &ttm_pool_kobj_release,
266 	.sysfs_ops = &ttm_pool_sysfs_ops,
267 	.default_attrs = ttm_pool_attrs,
268 };
269 
ttm_set_pages_caching(struct dma_pool * pool,struct page ** pages,unsigned cpages)270 static int ttm_set_pages_caching(struct dma_pool *pool,
271 				 struct page **pages, unsigned cpages)
272 {
273 	int r = 0;
274 	/* Set page caching */
275 	if (pool->type & IS_UC) {
276 		r = ttm_set_pages_array_uc(pages, cpages);
277 		if (r)
278 			pr_err("%s: Failed to set %d pages to uc!\n",
279 			       pool->dev_name, cpages);
280 	}
281 	if (pool->type & IS_WC) {
282 		r = ttm_set_pages_array_wc(pages, cpages);
283 		if (r)
284 			pr_err("%s: Failed to set %d pages to wc!\n",
285 			       pool->dev_name, cpages);
286 	}
287 	return r;
288 }
289 
__ttm_dma_free_page(struct dma_pool * pool,struct dma_page * d_page)290 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
291 {
292 	unsigned long attrs = 0;
293 	dma_addr_t dma = d_page->dma;
294 	d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
295 	if (pool->type & IS_HUGE)
296 		attrs = DMA_ATTR_NO_WARN;
297 
298 	dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
299 
300 	kfree(d_page);
301 	d_page = NULL;
302 }
__ttm_dma_alloc_page(struct dma_pool * pool)303 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
304 {
305 	struct dma_page *d_page;
306 	unsigned long attrs = 0;
307 	void *vaddr;
308 
309 	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
310 	if (!d_page)
311 		return NULL;
312 
313 	if (pool->type & IS_HUGE)
314 		attrs = DMA_ATTR_NO_WARN;
315 
316 	vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
317 				pool->gfp_flags, attrs);
318 	if (vaddr) {
319 		if (is_vmalloc_addr(vaddr))
320 			d_page->p = vmalloc_to_page(vaddr);
321 		else
322 			d_page->p = virt_to_page(vaddr);
323 		d_page->vaddr = (unsigned long)vaddr;
324 		if (pool->type & IS_HUGE)
325 			d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
326 	} else {
327 		kfree(d_page);
328 		d_page = NULL;
329 	}
330 	return d_page;
331 }
ttm_to_type(int flags,enum ttm_caching_state cstate)332 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
333 {
334 	enum pool_type type = IS_UNDEFINED;
335 
336 	if (flags & TTM_PAGE_FLAG_DMA32)
337 		type |= IS_DMA32;
338 	if (cstate == tt_cached)
339 		type |= IS_CACHED;
340 	else if (cstate == tt_uncached)
341 		type |= IS_UC;
342 	else
343 		type |= IS_WC;
344 
345 	return type;
346 }
347 
ttm_pool_update_free_locked(struct dma_pool * pool,unsigned freed_pages)348 static void ttm_pool_update_free_locked(struct dma_pool *pool,
349 					unsigned freed_pages)
350 {
351 	pool->npages_free -= freed_pages;
352 	pool->nfrees += freed_pages;
353 
354 }
355 
356 /* set memory back to wb and free the pages. */
ttm_dma_page_put(struct dma_pool * pool,struct dma_page * d_page)357 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
358 {
359 	struct page *page = d_page->p;
360 	unsigned num_pages;
361 
362 	/* Don't set WB on WB page pool. */
363 	if (!(pool->type & IS_CACHED)) {
364 		num_pages = pool->size / PAGE_SIZE;
365 		if (ttm_set_pages_wb(page, num_pages))
366 			pr_err("%s: Failed to set %d pages to wb!\n",
367 			       pool->dev_name, num_pages);
368 	}
369 
370 	list_del(&d_page->page_list);
371 	__ttm_dma_free_page(pool, d_page);
372 }
373 
ttm_dma_pages_put(struct dma_pool * pool,struct list_head * d_pages,struct page * pages[],unsigned npages)374 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
375 			      struct page *pages[], unsigned npages)
376 {
377 	struct dma_page *d_page, *tmp;
378 
379 	if (pool->type & IS_HUGE) {
380 		list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
381 			ttm_dma_page_put(pool, d_page);
382 
383 		return;
384 	}
385 
386 	/* Don't set WB on WB page pool. */
387 	if (npages && !(pool->type & IS_CACHED) &&
388 	    ttm_set_pages_array_wb(pages, npages))
389 		pr_err("%s: Failed to set %d pages to wb!\n",
390 		       pool->dev_name, npages);
391 
392 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
393 		list_del(&d_page->page_list);
394 		__ttm_dma_free_page(pool, d_page);
395 	}
396 }
397 
398 /*
399  * Free pages from pool.
400  *
401  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
402  * number of pages in one go.
403  *
404  * @pool: to free the pages from
405  * @nr_free: If set to true will free all pages in pool
406  * @use_static: Safe to use static buffer
407  **/
ttm_dma_page_pool_free(struct dma_pool * pool,unsigned nr_free,bool use_static)408 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
409 				       bool use_static)
410 {
411 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
412 	unsigned long irq_flags;
413 	struct dma_page *dma_p, *tmp;
414 	struct page **pages_to_free;
415 	struct list_head d_pages;
416 	unsigned freed_pages = 0,
417 		 npages_to_free = nr_free;
418 
419 	if (NUM_PAGES_TO_ALLOC < nr_free)
420 		npages_to_free = NUM_PAGES_TO_ALLOC;
421 
422 	if (use_static)
423 		pages_to_free = static_buf;
424 	else
425 		pages_to_free = kmalloc_array(npages_to_free,
426 					      sizeof(struct page *),
427 					      GFP_KERNEL);
428 
429 	if (!pages_to_free) {
430 		pr_debug("%s: Failed to allocate memory for pool free operation\n",
431 		       pool->dev_name);
432 		return 0;
433 	}
434 	INIT_LIST_HEAD(&d_pages);
435 restart:
436 	spin_lock_irqsave(&pool->lock, irq_flags);
437 
438 	/* We picking the oldest ones off the list */
439 	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
440 					 page_list) {
441 		if (freed_pages >= npages_to_free)
442 			break;
443 
444 		/* Move the dma_page from one list to another. */
445 		list_move(&dma_p->page_list, &d_pages);
446 
447 		pages_to_free[freed_pages++] = dma_p->p;
448 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
449 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
450 
451 			ttm_pool_update_free_locked(pool, freed_pages);
452 			/**
453 			 * Because changing page caching is costly
454 			 * we unlock the pool to prevent stalling.
455 			 */
456 			spin_unlock_irqrestore(&pool->lock, irq_flags);
457 
458 			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
459 					  freed_pages);
460 
461 			INIT_LIST_HEAD(&d_pages);
462 
463 			if (likely(nr_free != FREE_ALL_PAGES))
464 				nr_free -= freed_pages;
465 
466 			if (NUM_PAGES_TO_ALLOC >= nr_free)
467 				npages_to_free = nr_free;
468 			else
469 				npages_to_free = NUM_PAGES_TO_ALLOC;
470 
471 			freed_pages = 0;
472 
473 			/* free all so restart the processing */
474 			if (nr_free)
475 				goto restart;
476 
477 			/* Not allowed to fall through or break because
478 			 * following context is inside spinlock while we are
479 			 * outside here.
480 			 */
481 			goto out;
482 
483 		}
484 	}
485 
486 	/* remove range of pages from the pool */
487 	if (freed_pages) {
488 		ttm_pool_update_free_locked(pool, freed_pages);
489 		nr_free -= freed_pages;
490 	}
491 
492 	spin_unlock_irqrestore(&pool->lock, irq_flags);
493 
494 	if (freed_pages)
495 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
496 out:
497 	if (pages_to_free != static_buf)
498 		kfree(pages_to_free);
499 	return nr_free;
500 }
501 
ttm_dma_free_pool(struct device * dev,enum pool_type type)502 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
503 {
504 	struct device_pools *p;
505 	struct dma_pool *pool;
506 
507 	if (!dev)
508 		return;
509 
510 	mutex_lock(&_manager->lock);
511 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
512 		if (p->dev != dev)
513 			continue;
514 		pool = p->pool;
515 		if (pool->type != type)
516 			continue;
517 
518 		list_del(&p->pools);
519 		kfree(p);
520 		_manager->npools--;
521 		break;
522 	}
523 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
524 		if (pool->type != type)
525 			continue;
526 		/* Takes a spinlock.. */
527 		/* OK to use static buffer since global mutex is held. */
528 		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
529 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
530 		/* This code path is called after _all_ references to the
531 		 * struct device has been dropped - so nobody should be
532 		 * touching it. In case somebody is trying to _add_ we are
533 		 * guarded by the mutex. */
534 		list_del(&pool->pools);
535 		kfree(pool);
536 		break;
537 	}
538 	mutex_unlock(&_manager->lock);
539 }
540 
541 /*
542  * On free-ing of the 'struct device' this deconstructor is run.
543  * Albeit the pool might have already been freed earlier.
544  */
ttm_dma_pool_release(struct device * dev,void * res)545 static void ttm_dma_pool_release(struct device *dev, void *res)
546 {
547 	struct dma_pool *pool = *(struct dma_pool **)res;
548 
549 	if (pool)
550 		ttm_dma_free_pool(dev, pool->type);
551 }
552 
ttm_dma_pool_match(struct device * dev,void * res,void * match_data)553 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
554 {
555 	return *(struct dma_pool **)res == match_data;
556 }
557 
ttm_dma_pool_init(struct device * dev,gfp_t flags,enum pool_type type)558 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
559 					  enum pool_type type)
560 {
561 	const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
562 	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
563 	struct device_pools *sec_pool = NULL;
564 	struct dma_pool *pool = NULL, **ptr;
565 	unsigned i;
566 	int ret = -ENODEV;
567 	char *p;
568 
569 	if (!dev)
570 		return NULL;
571 
572 	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
573 	if (!ptr)
574 		return NULL;
575 
576 	ret = -ENOMEM;
577 
578 	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
579 			    dev_to_node(dev));
580 	if (!pool)
581 		goto err_mem;
582 
583 	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
584 				dev_to_node(dev));
585 	if (!sec_pool)
586 		goto err_mem;
587 
588 	INIT_LIST_HEAD(&sec_pool->pools);
589 	sec_pool->dev = dev;
590 	sec_pool->pool =  pool;
591 
592 	INIT_LIST_HEAD(&pool->free_list);
593 	INIT_LIST_HEAD(&pool->pools);
594 	spin_lock_init(&pool->lock);
595 	pool->dev = dev;
596 	pool->npages_free = pool->npages_in_use = 0;
597 	pool->nfrees = 0;
598 	pool->gfp_flags = flags;
599 	if (type & IS_HUGE)
600 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
601 		pool->size = HPAGE_PMD_SIZE;
602 #else
603 		BUG();
604 #endif
605 	else
606 		pool->size = PAGE_SIZE;
607 	pool->type = type;
608 	pool->nrefills = 0;
609 	p = pool->name;
610 	for (i = 0; i < ARRAY_SIZE(t); i++) {
611 		if (type & t[i]) {
612 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
613 				      "%s", n[i]);
614 		}
615 	}
616 	*p = 0;
617 	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
618 	 * - the kobj->name has already been deallocated.*/
619 	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
620 		 dev_driver_string(dev), dev_name(dev));
621 	mutex_lock(&_manager->lock);
622 	/* You can get the dma_pool from either the global: */
623 	list_add(&sec_pool->pools, &_manager->pools);
624 	_manager->npools++;
625 	/* or from 'struct device': */
626 	list_add(&pool->pools, &dev->dma_pools);
627 	mutex_unlock(&_manager->lock);
628 
629 	*ptr = pool;
630 	devres_add(dev, ptr);
631 
632 	return pool;
633 err_mem:
634 	devres_free(ptr);
635 	kfree(sec_pool);
636 	kfree(pool);
637 	return ERR_PTR(ret);
638 }
639 
ttm_dma_find_pool(struct device * dev,enum pool_type type)640 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
641 					  enum pool_type type)
642 {
643 	struct dma_pool *pool, *tmp;
644 
645 	if (type == IS_UNDEFINED)
646 		return NULL;
647 
648 	/* NB: We iterate on the 'struct dev' which has no spinlock, but
649 	 * it does have a kref which we have taken. The kref is taken during
650 	 * graphic driver loading - in the drm_pci_init it calls either
651 	 * pci_dev_get or pci_register_driver which both end up taking a kref
652 	 * on 'struct device'.
653 	 *
654 	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
655 	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
656 	 * thing is at that point of time there are no pages associated with the
657 	 * driver so this function will not be called.
658 	 */
659 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
660 		if (pool->type == type)
661 			return pool;
662 	return NULL;
663 }
664 
665 /*
666  * Free pages the pages that failed to change the caching state. If there
667  * are pages that have changed their caching state already put them to the
668  * pool.
669  */
ttm_dma_handle_caching_state_failure(struct dma_pool * pool,struct list_head * d_pages,struct page ** failed_pages,unsigned cpages)670 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
671 						 struct list_head *d_pages,
672 						 struct page **failed_pages,
673 						 unsigned cpages)
674 {
675 	struct dma_page *d_page, *tmp;
676 	struct page *p;
677 	unsigned i = 0;
678 
679 	p = failed_pages[0];
680 	if (!p)
681 		return;
682 	/* Find the failed page. */
683 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
684 		if (d_page->p != p)
685 			continue;
686 		/* .. and then progress over the full list. */
687 		list_del(&d_page->page_list);
688 		__ttm_dma_free_page(pool, d_page);
689 		if (++i < cpages)
690 			p = failed_pages[i];
691 		else
692 			break;
693 	}
694 
695 }
696 
697 /*
698  * Allocate 'count' pages, and put 'need' number of them on the
699  * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
700  * The full list of pages should also be on 'd_pages'.
701  * We return zero for success, and negative numbers as errors.
702  */
ttm_dma_pool_alloc_new_pages(struct dma_pool * pool,struct list_head * d_pages,unsigned count)703 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
704 					struct list_head *d_pages,
705 					unsigned count)
706 {
707 	struct page **caching_array;
708 	struct dma_page *dma_p;
709 	struct page *p;
710 	int r = 0;
711 	unsigned i, j, npages, cpages;
712 	unsigned max_cpages = min(count,
713 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
714 
715 	/* allocate array for page caching change */
716 	caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
717 				      GFP_KERNEL);
718 
719 	if (!caching_array) {
720 		pr_debug("%s: Unable to allocate table for new pages\n",
721 		       pool->dev_name);
722 		return -ENOMEM;
723 	}
724 
725 	if (count > 1)
726 		pr_debug("%s: (%s:%d) Getting %d pages\n",
727 			 pool->dev_name, pool->name, current->pid, count);
728 
729 	for (i = 0, cpages = 0; i < count; ++i) {
730 		dma_p = __ttm_dma_alloc_page(pool);
731 		if (!dma_p) {
732 			pr_debug("%s: Unable to get page %u\n",
733 				 pool->dev_name, i);
734 
735 			/* store already allocated pages in the pool after
736 			 * setting the caching state */
737 			if (cpages) {
738 				r = ttm_set_pages_caching(pool, caching_array,
739 							  cpages);
740 				if (r)
741 					ttm_dma_handle_caching_state_failure(
742 						pool, d_pages, caching_array,
743 						cpages);
744 			}
745 			r = -ENOMEM;
746 			goto out;
747 		}
748 		p = dma_p->p;
749 		list_add(&dma_p->page_list, d_pages);
750 
751 #ifdef CONFIG_HIGHMEM
752 		/* gfp flags of highmem page should never be dma32 so we
753 		 * we should be fine in such case
754 		 */
755 		if (PageHighMem(p))
756 			continue;
757 #endif
758 
759 		npages = pool->size / PAGE_SIZE;
760 		for (j = 0; j < npages; ++j) {
761 			caching_array[cpages++] = p + j;
762 			if (cpages == max_cpages) {
763 				/* Note: Cannot hold the spinlock */
764 				r = ttm_set_pages_caching(pool, caching_array,
765 							  cpages);
766 				if (r) {
767 					ttm_dma_handle_caching_state_failure(
768 					     pool, d_pages, caching_array,
769 					     cpages);
770 					goto out;
771 				}
772 				cpages = 0;
773 			}
774 		}
775 	}
776 
777 	if (cpages) {
778 		r = ttm_set_pages_caching(pool, caching_array, cpages);
779 		if (r)
780 			ttm_dma_handle_caching_state_failure(pool, d_pages,
781 					caching_array, cpages);
782 	}
783 out:
784 	kfree(caching_array);
785 	return r;
786 }
787 
788 /*
789  * @return count of pages still required to fulfill the request.
790  */
ttm_dma_page_pool_fill_locked(struct dma_pool * pool,unsigned long * irq_flags)791 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
792 					 unsigned long *irq_flags)
793 {
794 	unsigned count = _manager->options.small;
795 	int r = pool->npages_free;
796 
797 	if (count > pool->npages_free) {
798 		struct list_head d_pages;
799 
800 		INIT_LIST_HEAD(&d_pages);
801 
802 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
803 
804 		/* Returns how many more are neccessary to fulfill the
805 		 * request. */
806 		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
807 
808 		spin_lock_irqsave(&pool->lock, *irq_flags);
809 		if (!r) {
810 			/* Add the fresh to the end.. */
811 			list_splice(&d_pages, &pool->free_list);
812 			++pool->nrefills;
813 			pool->npages_free += count;
814 			r = count;
815 		} else {
816 			struct dma_page *d_page;
817 			unsigned cpages = 0;
818 
819 			pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
820 				 pool->dev_name, pool->name, r);
821 
822 			list_for_each_entry(d_page, &d_pages, page_list) {
823 				cpages++;
824 			}
825 			list_splice_tail(&d_pages, &pool->free_list);
826 			pool->npages_free += cpages;
827 			r = cpages;
828 		}
829 	}
830 	return r;
831 }
832 
833 /*
834  * The populate list is actually a stack (not that is matters as TTM
835  * allocates one page at a time.
836  * return dma_page pointer if success, otherwise NULL.
837  */
ttm_dma_pool_get_pages(struct dma_pool * pool,struct ttm_dma_tt * ttm_dma,unsigned index)838 static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
839 				  struct ttm_dma_tt *ttm_dma,
840 				  unsigned index)
841 {
842 	struct dma_page *d_page = NULL;
843 	struct ttm_tt *ttm = &ttm_dma->ttm;
844 	unsigned long irq_flags;
845 	int count;
846 
847 	spin_lock_irqsave(&pool->lock, irq_flags);
848 	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
849 	if (count) {
850 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
851 		ttm->pages[index] = d_page->p;
852 		ttm_dma->dma_address[index] = d_page->dma;
853 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
854 		pool->npages_in_use += 1;
855 		pool->npages_free -= 1;
856 	}
857 	spin_unlock_irqrestore(&pool->lock, irq_flags);
858 	return d_page;
859 }
860 
ttm_dma_pool_gfp_flags(struct ttm_dma_tt * ttm_dma,bool huge)861 static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
862 {
863 	struct ttm_tt *ttm = &ttm_dma->ttm;
864 	gfp_t gfp_flags;
865 
866 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
867 		gfp_flags = GFP_USER | GFP_DMA32;
868 	else
869 		gfp_flags = GFP_HIGHUSER;
870 	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
871 		gfp_flags |= __GFP_ZERO;
872 
873 	if (huge) {
874 		gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
875 			__GFP_KSWAPD_RECLAIM;
876 		gfp_flags &= ~__GFP_MOVABLE;
877 		gfp_flags &= ~__GFP_COMP;
878 	}
879 
880 	if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
881 		gfp_flags |= __GFP_RETRY_MAYFAIL;
882 
883 	return gfp_flags;
884 }
885 
886 /*
887  * On success pages list will hold count number of correctly
888  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
889  */
ttm_dma_populate(struct ttm_dma_tt * ttm_dma,struct device * dev,struct ttm_operation_ctx * ctx)890 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
891 			struct ttm_operation_ctx *ctx)
892 {
893 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
894 	struct ttm_tt *ttm = &ttm_dma->ttm;
895 	unsigned long num_pages = ttm->num_pages;
896 	struct dma_pool *pool;
897 	struct dma_page *d_page;
898 	enum pool_type type;
899 	unsigned i;
900 	int ret;
901 
902 	if (ttm->state != tt_unpopulated)
903 		return 0;
904 
905 	if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
906 		return -ENOMEM;
907 
908 	INIT_LIST_HEAD(&ttm_dma->pages_list);
909 	i = 0;
910 
911 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
912 
913 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
914 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
915 		goto skip_huge;
916 
917 	pool = ttm_dma_find_pool(dev, type | IS_HUGE);
918 	if (!pool) {
919 		gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
920 
921 		pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
922 		if (IS_ERR_OR_NULL(pool))
923 			goto skip_huge;
924 	}
925 
926 	while (num_pages >= HPAGE_PMD_NR) {
927 		unsigned j;
928 
929 		d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
930 		if (!d_page)
931 			break;
932 
933 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
934 						pool->size, ctx);
935 		if (unlikely(ret != 0)) {
936 			ttm_dma_unpopulate(ttm_dma, dev);
937 			return -ENOMEM;
938 		}
939 
940 		d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
941 		for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
942 			ttm->pages[j] = ttm->pages[j - 1] + 1;
943 			ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
944 				PAGE_SIZE;
945 		}
946 
947 		i += HPAGE_PMD_NR;
948 		num_pages -= HPAGE_PMD_NR;
949 	}
950 
951 skip_huge:
952 #endif
953 
954 	pool = ttm_dma_find_pool(dev, type);
955 	if (!pool) {
956 		gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
957 
958 		pool = ttm_dma_pool_init(dev, gfp_flags, type);
959 		if (IS_ERR_OR_NULL(pool))
960 			return -ENOMEM;
961 	}
962 
963 	while (num_pages) {
964 		d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
965 		if (!d_page) {
966 			ttm_dma_unpopulate(ttm_dma, dev);
967 			return -ENOMEM;
968 		}
969 
970 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
971 						pool->size, ctx);
972 		if (unlikely(ret != 0)) {
973 			ttm_dma_unpopulate(ttm_dma, dev);
974 			return -ENOMEM;
975 		}
976 
977 		d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
978 		++i;
979 		--num_pages;
980 	}
981 
982 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
983 		ret = ttm_tt_swapin(ttm);
984 		if (unlikely(ret != 0)) {
985 			ttm_dma_unpopulate(ttm_dma, dev);
986 			return ret;
987 		}
988 	}
989 
990 	ttm->state = tt_unbound;
991 	return 0;
992 }
993 EXPORT_SYMBOL_GPL(ttm_dma_populate);
994 
995 /* Put all pages in pages list to correct pool to wait for reuse */
ttm_dma_unpopulate(struct ttm_dma_tt * ttm_dma,struct device * dev)996 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
997 {
998 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
999 	struct ttm_tt *ttm = &ttm_dma->ttm;
1000 	struct dma_pool *pool;
1001 	struct dma_page *d_page, *next;
1002 	enum pool_type type;
1003 	bool is_cached = false;
1004 	unsigned count, i, npages = 0;
1005 	unsigned long irq_flags;
1006 
1007 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
1008 
1009 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1010 	pool = ttm_dma_find_pool(dev, type | IS_HUGE);
1011 	if (pool) {
1012 		count = 0;
1013 		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1014 					 page_list) {
1015 			if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
1016 				continue;
1017 
1018 			count++;
1019 			if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1020 				ttm_mem_global_free_page(mem_glob, d_page->p,
1021 							 pool->size);
1022 				d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1023 			}
1024 			ttm_dma_page_put(pool, d_page);
1025 		}
1026 
1027 		spin_lock_irqsave(&pool->lock, irq_flags);
1028 		pool->npages_in_use -= count;
1029 		pool->nfrees += count;
1030 		spin_unlock_irqrestore(&pool->lock, irq_flags);
1031 	}
1032 #endif
1033 
1034 	pool = ttm_dma_find_pool(dev, type);
1035 	if (!pool)
1036 		return;
1037 
1038 	is_cached = (ttm_dma_find_pool(pool->dev,
1039 		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
1040 
1041 	/* make sure pages array match list and count number of pages */
1042 	count = 0;
1043 	list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1044 				 page_list) {
1045 		ttm->pages[count] = d_page->p;
1046 		count++;
1047 
1048 		if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1049 			ttm_mem_global_free_page(mem_glob, d_page->p,
1050 						 pool->size);
1051 			d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1052 		}
1053 
1054 		if (is_cached)
1055 			ttm_dma_page_put(pool, d_page);
1056 	}
1057 
1058 	spin_lock_irqsave(&pool->lock, irq_flags);
1059 	pool->npages_in_use -= count;
1060 	if (is_cached) {
1061 		pool->nfrees += count;
1062 	} else {
1063 		pool->npages_free += count;
1064 		list_splice(&ttm_dma->pages_list, &pool->free_list);
1065 		/*
1066 		 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
1067 		 * to free in order to minimize calls to set_memory_wb().
1068 		 */
1069 		if (pool->npages_free >= (_manager->options.max_size +
1070 					  NUM_PAGES_TO_ALLOC))
1071 			npages = pool->npages_free - _manager->options.max_size;
1072 	}
1073 	spin_unlock_irqrestore(&pool->lock, irq_flags);
1074 
1075 	INIT_LIST_HEAD(&ttm_dma->pages_list);
1076 	for (i = 0; i < ttm->num_pages; i++) {
1077 		ttm->pages[i] = NULL;
1078 		ttm_dma->dma_address[i] = 0;
1079 	}
1080 
1081 	/* shrink pool if necessary (only on !is_cached pools)*/
1082 	if (npages)
1083 		ttm_dma_page_pool_free(pool, npages, false);
1084 	ttm->state = tt_unpopulated;
1085 }
1086 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1087 
1088 /**
1089  * Callback for mm to request pool to reduce number of page held.
1090  *
1091  * XXX: (dchinner) Deadlock warning!
1092  *
1093  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1094  * shrinkers
1095  */
1096 static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1097 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1098 {
1099 	static unsigned start_pool;
1100 	unsigned idx = 0;
1101 	unsigned pool_offset;
1102 	unsigned shrink_pages = sc->nr_to_scan;
1103 	struct device_pools *p;
1104 	unsigned long freed = 0;
1105 
1106 	if (list_empty(&_manager->pools))
1107 		return SHRINK_STOP;
1108 
1109 	if (!mutex_trylock(&_manager->lock))
1110 		return SHRINK_STOP;
1111 	if (!_manager->npools)
1112 		goto out;
1113 	pool_offset = ++start_pool % _manager->npools;
1114 	list_for_each_entry(p, &_manager->pools, pools) {
1115 		unsigned nr_free;
1116 
1117 		if (!p->dev)
1118 			continue;
1119 		if (shrink_pages == 0)
1120 			break;
1121 		/* Do it in round-robin fashion. */
1122 		if (++idx < pool_offset)
1123 			continue;
1124 		nr_free = shrink_pages;
1125 		/* OK to use static buffer since global mutex is held. */
1126 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1127 		freed += nr_free - shrink_pages;
1128 
1129 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1130 			 p->pool->dev_name, p->pool->name, current->pid,
1131 			 nr_free, shrink_pages);
1132 	}
1133 out:
1134 	mutex_unlock(&_manager->lock);
1135 	return freed;
1136 }
1137 
1138 static unsigned long
ttm_dma_pool_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1139 ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1140 {
1141 	struct device_pools *p;
1142 	unsigned long count = 0;
1143 
1144 	if (!mutex_trylock(&_manager->lock))
1145 		return 0;
1146 	list_for_each_entry(p, &_manager->pools, pools)
1147 		count += p->pool->npages_free;
1148 	mutex_unlock(&_manager->lock);
1149 	return count;
1150 }
1151 
ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager * manager)1152 static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1153 {
1154 	manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1155 	manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1156 	manager->mm_shrink.seeks = 1;
1157 	return register_shrinker(&manager->mm_shrink);
1158 }
1159 
ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager * manager)1160 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1161 {
1162 	unregister_shrinker(&manager->mm_shrink);
1163 }
1164 
ttm_dma_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)1165 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1166 {
1167 	int ret;
1168 
1169 	WARN_ON(_manager);
1170 
1171 	pr_info("Initializing DMA pool allocator\n");
1172 
1173 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1174 	if (!_manager)
1175 		return -ENOMEM;
1176 
1177 	mutex_init(&_manager->lock);
1178 	INIT_LIST_HEAD(&_manager->pools);
1179 
1180 	_manager->options.max_size = max_pages;
1181 	_manager->options.small = SMALL_ALLOCATION;
1182 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1183 
1184 	/* This takes care of auto-freeing the _manager */
1185 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1186 				   &glob->kobj, "dma_pool");
1187 	if (unlikely(ret != 0))
1188 		goto error;
1189 
1190 	ret = ttm_dma_pool_mm_shrink_init(_manager);
1191 	if (unlikely(ret != 0))
1192 		goto error;
1193 	return 0;
1194 
1195 error:
1196 	kobject_put(&_manager->kobj);
1197 	_manager = NULL;
1198 	return ret;
1199 }
1200 
ttm_dma_page_alloc_fini(void)1201 void ttm_dma_page_alloc_fini(void)
1202 {
1203 	struct device_pools *p, *t;
1204 
1205 	pr_info("Finalizing DMA pool allocator\n");
1206 	ttm_dma_pool_mm_shrink_fini(_manager);
1207 
1208 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1209 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1210 			current->pid);
1211 		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1212 			ttm_dma_pool_match, p->pool));
1213 		ttm_dma_free_pool(p->dev, p->pool->type);
1214 	}
1215 	kobject_put(&_manager->kobj);
1216 	_manager = NULL;
1217 }
1218 
ttm_dma_page_alloc_debugfs(struct seq_file * m,void * data)1219 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1220 {
1221 	struct device_pools *p;
1222 	struct dma_pool *pool = NULL;
1223 
1224 	if (!_manager) {
1225 		seq_printf(m, "No pool allocator running.\n");
1226 		return 0;
1227 	}
1228 	seq_printf(m, "         pool      refills   pages freed    inuse available     name\n");
1229 	mutex_lock(&_manager->lock);
1230 	list_for_each_entry(p, &_manager->pools, pools) {
1231 		struct device *dev = p->dev;
1232 		if (!dev)
1233 			continue;
1234 		pool = p->pool;
1235 		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1236 				pool->name, pool->nrefills,
1237 				pool->nfrees, pool->npages_in_use,
1238 				pool->npages_free,
1239 				pool->dev_name);
1240 	}
1241 	mutex_unlock(&_manager->lock);
1242 	return 0;
1243 }
1244 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1245