xref: /dflybsd-src/sys/dev/drm/ttm/ttm_page_alloc.c (revision 747e2961e929a839fd1f570e8b2c2fce73f0fe5c)
1 /*
2  * Copyright (c) Red Hat Inc.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 /*
28  * Copyright (c) 2013 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * Portions of this software were developed by Konstantin Belousov
32  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33  */
34 
35 /* simple list based uncached page pool
36  * - Pool collects resently freed pages for reuse
37  * - Use page->lru to keep a free list
38  * - doesn't track currently in use pages
39  */
40 
41 #define pr_fmt(fmt) "[TTM] " fmt
42 
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 #include <linux/highmem.h>
46 #include <linux/mm_types.h>
47 #include <linux/module.h>
48 #include <linux/mm.h>
49 #include <linux/seq_file.h> /* for seq_printf */
50 #include <linux/dma-mapping.h>
51 
52 #include <linux/atomic.h>
53 
54 #include <drm/ttm/ttm_bo_driver.h>
55 #include <drm/ttm/ttm_page_alloc.h>
56 
57 #include <sys/eventhandler.h>
58 
59 #ifdef TTM_HAS_AGP
60 #include <asm/agp.h>
61 #endif
62 
63 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
64 #define SMALL_ALLOCATION		16
65 #define FREE_ALL_PAGES			(~0U)
66 /* times are in msecs */
67 #define PAGE_FREE_INTERVAL		1000
68 
69 /**
70  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
71  *
72  * @lock: Protects the shared pool from concurrnet access. Must be used with
73  * irqsave/irqrestore variants because pool allocator maybe called from
74  * delayed work.
75  * @fill_lock: Prevent concurrent calls to fill.
76  * @list: Pool of free uc/wc pages for fast reuse.
77  * @gfp_flags: Flags to pass for alloc_page.
78  * @npages: Number of pages in pool.
79  */
80 struct ttm_page_pool {
81 	struct lock		lock;
82 	bool			fill_lock;
83 	struct pglist		list;
84 	gfp_t			gfp_flags;
85 	unsigned		npages;
86 	char			*name;
87 	unsigned long		nfrees;
88 	unsigned long		nrefills;
89 };
90 
91 /**
92  * Limits for the pool. They are handled without locks because only place where
93  * they may change is in sysfs store. They won't have immediate effect anyway
94  * so forcing serialization to access them is pointless.
95  */
96 
97 struct ttm_pool_opts {
98 	unsigned	alloc_size;
99 	unsigned	max_size;
100 	unsigned	small;
101 };
102 
103 #define NUM_POOLS 4
104 
105 /**
106  * struct ttm_pool_manager - Holds memory pools for fst allocation
107  *
108  * Manager is read only object for pool code so it doesn't need locking.
109  *
110  * @free_interval: minimum number of jiffies between freeing pages from pool.
111  * @page_alloc_inited: reference counting for pool allocation.
112  * @work: Work that is used to shrink the pool. Work is only run when there is
113  * some pages to free.
114  * @small_allocation: Limit in number of pages what is small allocation.
115  *
116  * @pools: All pool objects in use.
117  **/
118 struct ttm_pool_manager {
119 	struct kobject		kobj;
120 	struct shrinker		mm_shrink;
121 	eventhandler_tag lowmem_handler;
122 	struct ttm_pool_opts	options;
123 
124 	union {
125 		struct ttm_page_pool	pools[NUM_POOLS];
126 		struct {
127 			struct ttm_page_pool	wc_pool;
128 			struct ttm_page_pool	uc_pool;
129 			struct ttm_page_pool	wc_pool_dma32;
130 			struct ttm_page_pool	uc_pool_dma32;
131 		} ;
132 	};
133 };
134 
135 static struct attribute ttm_page_pool_max = {
136 	.name = "pool_max_size",
137 	.mode = S_IRUGO | S_IWUSR
138 };
139 static struct attribute ttm_page_pool_small = {
140 	.name = "pool_small_allocation",
141 	.mode = S_IRUGO | S_IWUSR
142 };
143 static struct attribute ttm_page_pool_alloc_size = {
144 	.name = "pool_allocation_size",
145 	.mode = S_IRUGO | S_IWUSR
146 };
147 
148 static struct attribute *ttm_pool_attrs[] = {
149 	&ttm_page_pool_max,
150 	&ttm_page_pool_small,
151 	&ttm_page_pool_alloc_size,
152 	NULL
153 };
154 
155 static void ttm_pool_kobj_release(struct kobject *kobj)
156 {
157 	struct ttm_pool_manager *m =
158 		container_of(kobj, struct ttm_pool_manager, kobj);
159 	kfree(m);
160 }
161 
162 static ssize_t ttm_pool_store(struct kobject *kobj,
163 		struct attribute *attr, const char *buffer, size_t size)
164 {
165 	struct ttm_pool_manager *m =
166 		container_of(kobj, struct ttm_pool_manager, kobj);
167 	int chars;
168 	unsigned val;
169 	chars = ksscanf(buffer, "%u", &val);
170 	if (chars == 0)
171 		return size;
172 
173 	/* Convert kb to number of pages */
174 	val = val / (PAGE_SIZE >> 10);
175 
176 	if (attr == &ttm_page_pool_max)
177 		m->options.max_size = val;
178 	else if (attr == &ttm_page_pool_small)
179 		m->options.small = val;
180 	else if (attr == &ttm_page_pool_alloc_size) {
181 		if (val > NUM_PAGES_TO_ALLOC*8) {
182 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
183 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
184 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
185 			return size;
186 		} else if (val > NUM_PAGES_TO_ALLOC) {
187 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
188 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
189 		}
190 		m->options.alloc_size = val;
191 	}
192 
193 	return size;
194 }
195 
196 static ssize_t ttm_pool_show(struct kobject *kobj,
197 		struct attribute *attr, char *buffer)
198 {
199 	struct ttm_pool_manager *m =
200 		container_of(kobj, struct ttm_pool_manager, kobj);
201 	unsigned val = 0;
202 
203 	if (attr == &ttm_page_pool_max)
204 		val = m->options.max_size;
205 	else if (attr == &ttm_page_pool_small)
206 		val = m->options.small;
207 	else if (attr == &ttm_page_pool_alloc_size)
208 		val = m->options.alloc_size;
209 
210 	val = val * (PAGE_SIZE >> 10);
211 
212 	return ksnprintf(buffer, PAGE_SIZE, "%u\n", val);
213 }
214 
215 static const struct sysfs_ops ttm_pool_sysfs_ops = {
216 	.show = &ttm_pool_show,
217 	.store = &ttm_pool_store,
218 };
219 
220 static struct kobj_type ttm_pool_kobj_type = {
221 	.release = &ttm_pool_kobj_release,
222 	.sysfs_ops = &ttm_pool_sysfs_ops,
223 	.default_attrs = ttm_pool_attrs,
224 };
225 
226 static struct ttm_pool_manager *_manager;
227 
228 #ifndef CONFIG_X86
229 static int set_pages_array_wb(struct page **pages, int addrinarray)
230 {
231 #ifdef TTM_HAS_AGP
232 	int i;
233 
234 	for (i = 0; i < addrinarray; i++)
235 		unmap_page_from_agp(pages[i]);
236 #endif
237 	return 0;
238 }
239 
240 static int set_pages_array_wc(struct page **pages, int addrinarray)
241 {
242 #ifdef TTM_HAS_AGP
243 	int i;
244 
245 	for (i = 0; i < addrinarray; i++)
246 		map_page_into_agp(pages[i]);
247 #endif
248 	return 0;
249 }
250 
251 static int set_pages_array_uc(struct page **pages, int addrinarray)
252 {
253 #ifdef TTM_HAS_AGP
254 	int i;
255 
256 	for (i = 0; i < addrinarray; i++)
257 		map_page_into_agp(pages[i]);
258 #endif
259 	return 0;
260 }
261 #endif
262 
263 /**
264  * Select the right pool or requested caching state and ttm flags. */
265 static struct ttm_page_pool *ttm_get_pool(int flags,
266 		enum ttm_caching_state cstate)
267 {
268 	int pool_index;
269 
270 	if (cstate == tt_cached)
271 		return NULL;
272 
273 	if (cstate == tt_wc)
274 		pool_index = 0x0;
275 	else
276 		pool_index = 0x1;
277 
278 	if (flags & TTM_PAGE_FLAG_DMA32)
279 		pool_index |= 0x2;
280 
281 	return &_manager->pools[pool_index];
282 }
283 
284 /* set memory back to wb and free the pages. */
285 static void ttm_pages_put(struct page *pages[], unsigned npages)
286 {
287 	unsigned i;
288 	if (set_pages_array_wb(pages, npages))
289 		pr_err("Failed to set %d pages to wb!\n", npages);
290 	for (i = 0; i < npages; ++i)
291 		__free_page(pages[i]);
292 }
293 
294 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
295 		unsigned freed_pages)
296 {
297 	pool->npages -= freed_pages;
298 	pool->nfrees += freed_pages;
299 }
300 
301 /**
302  * Free pages from pool.
303  *
304  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
305  * number of pages in one go.
306  *
307  * @pool: to free the pages from
308  * @free_all: If set to true will free all pages in pool
309  * @gfp: GFP flags.
310  **/
311 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
312 			      gfp_t gfp)
313 {
314 	unsigned long irq_flags;
315 	struct vm_page *p, *p1;
316 	struct page **pages_to_free;
317 	unsigned freed_pages = 0,
318 		 npages_to_free = nr_free;
319 	unsigned i;
320 
321 	if (NUM_PAGES_TO_ALLOC < nr_free)
322 		npages_to_free = NUM_PAGES_TO_ALLOC;
323 
324 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), M_DRM, gfp);
325 	if (!pages_to_free) {
326 		pr_err("Failed to allocate memory for pool free operation\n");
327 		return 0;
328 	}
329 
330 restart:
331 	spin_lock_irqsave(&pool->lock, irq_flags);
332 
333 	TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
334 		if (freed_pages >= npages_to_free)
335 			break;
336 
337 		pages_to_free[freed_pages++] = (struct page *)p;
338 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
339 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
340 			/* remove range of pages from the pool */
341 			for (i = 0; i < freed_pages; i++)
342 				TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
343 
344 			ttm_pool_update_free_locked(pool, freed_pages);
345 			/**
346 			 * Because changing page caching is costly
347 			 * we unlock the pool to prevent stalling.
348 			 */
349 			spin_unlock_irqrestore(&pool->lock, irq_flags);
350 
351 			ttm_pages_put(pages_to_free, freed_pages);
352 			if (likely(nr_free != FREE_ALL_PAGES))
353 				nr_free -= freed_pages;
354 
355 			if (NUM_PAGES_TO_ALLOC >= nr_free)
356 				npages_to_free = nr_free;
357 			else
358 				npages_to_free = NUM_PAGES_TO_ALLOC;
359 
360 			freed_pages = 0;
361 
362 			/* free all so restart the processing */
363 			if (nr_free)
364 				goto restart;
365 
366 			/* Not allowed to fall through or break because
367 			 * following context is inside spinlock while we are
368 			 * outside here.
369 			 */
370 			goto out;
371 
372 		}
373 	}
374 
375 	/* remove range of pages from the pool */
376 	if (freed_pages) {
377 		for (i = 0; i < freed_pages; i++)
378 			TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
379 
380 		ttm_pool_update_free_locked(pool, freed_pages);
381 		nr_free -= freed_pages;
382 	}
383 
384 	spin_unlock_irqrestore(&pool->lock, irq_flags);
385 
386 	if (freed_pages)
387 		ttm_pages_put(pages_to_free, freed_pages);
388 out:
389 	kfree(pages_to_free);
390 	return nr_free;
391 }
392 
393 /**
394  * Callback for mm to request pool to reduce number of page held.
395  *
396  * XXX: (dchinner) Deadlock warning!
397  *
398  * We need to pass sc->gfp_mask to ttm_page_pool_free().
399  *
400  * This code is crying out for a shrinker per pool....
401  */
402 static unsigned long
403 ttm_pool_shrink_scan(void *arg)
404 {
405 #ifdef __DragonFly__
406 	static struct shrink_control __sc;
407 	struct shrink_control *sc = &__sc;
408 #endif
409 	static DEFINE_MUTEX(lock);
410 	static unsigned start_pool;
411 	unsigned i;
412 	unsigned pool_offset;
413 	struct ttm_page_pool *pool;
414 	int shrink_pages = 100; /* XXXKIB */
415 	unsigned long freed = 0;
416 
417 #ifdef __DragonFly__
418 	sc->gfp_mask = M_WAITOK;
419 #endif
420 
421 	if (!mutex_trylock(&lock))
422 		return SHRINK_STOP;
423 	pool_offset = ++start_pool % NUM_POOLS;
424 	/* select start pool in round robin fashion */
425 	for (i = 0; i < NUM_POOLS; ++i) {
426 		unsigned nr_free = shrink_pages;
427 		if (shrink_pages == 0)
428 			break;
429 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
430 		shrink_pages = ttm_page_pool_free(pool, nr_free,
431 						  sc->gfp_mask);
432 		freed += nr_free - shrink_pages;
433 	}
434 	mutex_unlock(&lock);
435 	return freed;
436 }
437 
438 
439 static unsigned long
440 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
441 {
442 	unsigned i;
443 	unsigned long count = 0;
444 
445 	for (i = 0; i < NUM_POOLS; ++i)
446 		count += _manager->pools[i].npages;
447 
448 	return count;
449 }
450 
451 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
452 {
453 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
454 	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
455 	    ttm_pool_shrink_scan, manager, EVENTHANDLER_PRI_ANY);
456 }
457 
458 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
459 {
460 	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
461 }
462 
463 static int ttm_set_pages_caching(struct page **pages,
464 		enum ttm_caching_state cstate, unsigned cpages)
465 {
466 	int r = 0;
467 	/* Set page caching */
468 	switch (cstate) {
469 	case tt_uncached:
470 		r = set_pages_array_uc(pages, cpages);
471 		if (r)
472 			pr_err("Failed to set %d pages to uc!\n", cpages);
473 		break;
474 	case tt_wc:
475 		r = set_pages_array_wc(pages, cpages);
476 		if (r)
477 			pr_err("Failed to set %d pages to wc!\n", cpages);
478 		break;
479 	default:
480 		break;
481 	}
482 	return r;
483 }
484 
485 /**
486  * Free pages the pages that failed to change the caching state. If there is
487  * any pages that have changed their caching state already put them to the
488  * pool.
489  */
490 static void ttm_handle_caching_state_failure(struct pglist *pages,
491 		int ttm_flags, enum ttm_caching_state cstate,
492 		struct page **failed_pages, unsigned cpages)
493 {
494 	unsigned i;
495 	/* Failed pages have to be freed */
496 	for (i = 0; i < cpages; ++i) {
497 		TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq);
498 		__free_page(failed_pages[i]);
499 	}
500 }
501 
502 /**
503  * Allocate new pages with correct caching.
504  *
505  * This function is reentrant if caller updates count depending on number of
506  * pages returned in pages array.
507  */
508 static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
509 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
510 {
511 	struct page **caching_array;
512 	struct page *p;
513 	int r = 0;
514 	unsigned i, cpages;
515 	unsigned max_cpages = min(count,
516 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
517 
518 	/* allocate array for page caching change */
519 	caching_array = kmalloc(max_cpages*sizeof(struct page *), M_DRM, M_WAITOK);
520 
521 	if (!caching_array) {
522 		pr_err("Unable to allocate table for new pages\n");
523 		return -ENOMEM;
524 	}
525 
526 	for (i = 0, cpages = 0; i < count; ++i) {
527 		p = alloc_page(gfp_flags);
528 
529 		if (!p) {
530 			pr_err("Unable to get page %u\n", i);
531 
532 			/* store already allocated pages in the pool after
533 			 * setting the caching state */
534 			if (cpages) {
535 				r = ttm_set_pages_caching(caching_array,
536 							  cstate, cpages);
537 				if (r)
538 					ttm_handle_caching_state_failure(pages,
539 						ttm_flags, cstate,
540 						caching_array, cpages);
541 			}
542 			r = -ENOMEM;
543 			goto out;
544 		}
545 		((struct vm_page *)p)->flags |= PG_FICTITIOUS;
546 
547 #ifdef CONFIG_HIGHMEM
548 		/* gfp flags of highmem page should never be dma32 so we
549 		 * we should be fine in such case
550 		 */
551 		if (!PageHighMem(p))
552 #endif
553 		{
554 			caching_array[cpages++] = p;
555 			if (cpages == max_cpages) {
556 
557 				r = ttm_set_pages_caching(caching_array,
558 						cstate, cpages);
559 				if (r) {
560 					ttm_handle_caching_state_failure(pages,
561 						ttm_flags, cstate,
562 						caching_array, cpages);
563 					goto out;
564 				}
565 				cpages = 0;
566 			}
567 		}
568 
569 		TAILQ_INSERT_HEAD(pages, (struct vm_page *)p, pageq);
570 	}
571 
572 	if (cpages) {
573 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
574 		if (r)
575 			ttm_handle_caching_state_failure(pages,
576 					ttm_flags, cstate,
577 					caching_array, cpages);
578 	}
579 out:
580 	kfree(caching_array);
581 
582 	return r;
583 }
584 
585 /**
586  * Fill the given pool if there aren't enough pages and the requested number of
587  * pages is small.
588  */
589 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
590 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
591 		unsigned long *irq_flags)
592 {
593 	vm_page_t p;
594 	int r;
595 	unsigned cpages = 0;
596 	/**
597 	 * Only allow one pool fill operation at a time.
598 	 * If pool doesn't have enough pages for the allocation new pages are
599 	 * allocated from outside of pool.
600 	 */
601 	if (pool->fill_lock)
602 		return;
603 
604 	pool->fill_lock = true;
605 
606 	/* If allocation request is small and there are not enough
607 	 * pages in a pool we fill the pool up first. */
608 	if (count < _manager->options.small
609 		&& count > pool->npages) {
610 		struct pglist new_pages;
611 		unsigned alloc_size = _manager->options.alloc_size;
612 
613 		/**
614 		 * Can't change page caching if in irqsave context. We have to
615 		 * drop the pool->lock.
616 		 */
617 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
618 
619 		TAILQ_INIT(&new_pages);
620 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
621 				cstate,	alloc_size);
622 		spin_lock_irqsave(&pool->lock, *irq_flags);
623 
624 		if (!r) {
625 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
626 			++pool->nrefills;
627 			pool->npages += alloc_size;
628 		} else {
629 			pr_err("Failed to fill pool (%p)\n", pool);
630 			/* If we have any pages left put them to the pool. */
631 			TAILQ_FOREACH(p, &pool->list, pageq) {
632 				++cpages;
633 			}
634 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
635 			pool->npages += cpages;
636 		}
637 
638 	}
639 	pool->fill_lock = false;
640 }
641 
642 /**
643  * Cut 'count' number of pages from the pool and put them on the return list.
644  *
645  * @return count of pages still required to fulfill the request.
646  */
647 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
648 					struct pglist *pages,
649 					int ttm_flags,
650 					enum ttm_caching_state cstate,
651 					unsigned count)
652 {
653 	unsigned long irq_flags;
654 	vm_page_t p;
655 	unsigned i;
656 
657 	spin_lock_irqsave(&pool->lock, irq_flags);
658 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
659 
660 	if (count >= pool->npages) {
661 		/* take all pages from the pool */
662 		TAILQ_CONCAT(pages, &pool->list, pageq);
663 		count -= pool->npages;
664 		pool->npages = 0;
665 		goto out;
666 	}
667 	for (i = 0; i < count; i++) {
668 		p = TAILQ_FIRST(&pool->list);
669 		TAILQ_REMOVE(&pool->list, p, pageq);
670 		TAILQ_INSERT_TAIL(pages, p, pageq);
671 	}
672 	pool->npages -= count;
673 	count = 0;
674 out:
675 	spin_unlock_irqrestore(&pool->lock, irq_flags);
676 	return count;
677 }
678 
679 /* Put all pages in pages list to correct pool to wait for reuse */
680 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
681 			  enum ttm_caching_state cstate)
682 {
683 	unsigned long irq_flags;
684 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
685 	unsigned i;
686 	struct vm_page *page;
687 
688 	if (pool == NULL) {
689 		/* No pool for this memory type so free the pages */
690 		for (i = 0; i < npages; i++) {
691 			if (pages[i]) {
692 #if 0
693 				if (page_count(pages[i]) != 1)
694 					pr_err("Erroneous page count. Leaking pages.\n");
695 #endif
696 				__free_page(pages[i]);
697 				pages[i] = NULL;
698 			}
699 		}
700 		return;
701 	}
702 
703 	spin_lock_irqsave(&pool->lock, irq_flags);
704 	for (i = 0; i < npages; i++) {
705 		if (pages[i]) {
706 			page = (struct vm_page *)pages[i];
707 			TAILQ_INSERT_TAIL(&pool->list, page, pageq);
708 			pages[i] = NULL;
709 			pool->npages++;
710 		}
711 	}
712 	/* Check that we don't go over the pool limit */
713 	npages = 0;
714 	if (pool->npages > _manager->options.max_size) {
715 		npages = pool->npages - _manager->options.max_size;
716 		/* free at least NUM_PAGES_TO_ALLOC number of pages
717 		 * to reduce calls to set_memory_wb */
718 		if (npages < NUM_PAGES_TO_ALLOC)
719 			npages = NUM_PAGES_TO_ALLOC;
720 	}
721 	spin_unlock_irqrestore(&pool->lock, irq_flags);
722 	if (npages)
723 		ttm_page_pool_free(pool, npages, GFP_KERNEL);
724 }
725 
726 /*
727  * On success pages list will hold count number of correctly
728  * cached pages.
729  */
730 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
731 			 enum ttm_caching_state cstate)
732 {
733 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
734 	struct pglist plist;
735 	struct vm_page *p = NULL;
736 	gfp_t gfp_flags = GFP_USER;
737 	unsigned count;
738 	int r;
739 
740 	/* set zero flag for page allocation if required */
741 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
742 		gfp_flags |= __GFP_ZERO;
743 
744 	/* No pool for cached pages */
745 	if (pool == NULL) {
746 		if (flags & TTM_PAGE_FLAG_DMA32)
747 			gfp_flags |= GFP_DMA32;
748 		else
749 			gfp_flags |= GFP_HIGHUSER;
750 
751 		for (r = 0; r < npages; ++r) {
752 			p = (struct vm_page *)alloc_page(gfp_flags);
753 			if (!p) {
754 
755 				pr_err("Unable to allocate page\n");
756 				return -ENOMEM;
757 			}
758 			p->flags |= PG_FICTITIOUS;
759 
760 			pages[r] = (struct page *)p;
761 		}
762 		return 0;
763 	}
764 
765 	/* combine zero flag to pool flags */
766 	gfp_flags |= pool->gfp_flags;
767 
768 	/* First we take pages from the pool */
769 	TAILQ_INIT(&plist);
770 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
771 	count = 0;
772 	TAILQ_FOREACH(p, &plist, pageq) {
773 		pages[count++] = (struct page *)p;
774 	}
775 
776 	/* clear the pages coming from the pool if requested */
777 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
778 		TAILQ_FOREACH(p, &plist, pageq) {
779 			pmap_zero_page(VM_PAGE_TO_PHYS(p));
780 		}
781 	}
782 
783 	/* If pool didn't have enough pages allocate new one. */
784 	if (npages > 0) {
785 		/* ttm_alloc_new_pages doesn't reference pool so we can run
786 		 * multiple requests in parallel.
787 		 **/
788 		TAILQ_INIT(&plist);
789 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
790 		TAILQ_FOREACH(p, &plist, pageq) {
791 			pages[count++] = (struct page *)p;
792 		}
793 		if (r) {
794 			/* If there is any pages in the list put them back to
795 			 * the pool. */
796 			pr_err("Failed to allocate extra pages for large request\n");
797 			ttm_put_pages(pages, count, flags, cstate);
798 			return r;
799 		}
800 	}
801 
802 	return 0;
803 }
804 
805 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
806 		char *name)
807 {
808 	lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
809 	pool->fill_lock = false;
810 	TAILQ_INIT(&pool->list);
811 	pool->npages = pool->nfrees = 0;
812 	pool->gfp_flags = flags;
813 	pool->name = name;
814 }
815 
816 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
817 {
818 	int ret;
819 
820 	WARN_ON(_manager);
821 
822 	pr_info("Initializing pool allocator\n");
823 
824 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
825 
826 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
827 
828 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
829 
830 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
831 				  GFP_USER | GFP_DMA32, "wc dma");
832 
833 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
834 				  GFP_USER | GFP_DMA32, "uc dma");
835 
836 	_manager->options.max_size = max_pages;
837 	_manager->options.small = SMALL_ALLOCATION;
838 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
839 
840 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
841 				   &glob->kobj, "pool");
842 	if (unlikely(ret != 0)) {
843 		kobject_put(&_manager->kobj);
844 		_manager = NULL;
845 		return ret;
846 	}
847 
848 	ttm_pool_mm_shrink_init(_manager);
849 
850 	return 0;
851 }
852 
853 void ttm_page_alloc_fini(void)
854 {
855 	int i;
856 
857 	pr_info("Finalizing pool allocator\n");
858 	ttm_pool_mm_shrink_fini(_manager);
859 
860 	for (i = 0; i < NUM_POOLS; ++i)
861 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
862 				   GFP_KERNEL);
863 
864 	kobject_put(&_manager->kobj);
865 	_manager = NULL;
866 }
867 
868 int ttm_pool_populate(struct ttm_tt *ttm)
869 {
870 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
871 	unsigned i;
872 	int ret;
873 
874 	if (ttm->state != tt_unpopulated)
875 		return 0;
876 
877 	for (i = 0; i < ttm->num_pages; ++i) {
878 		ret = ttm_get_pages(&ttm->pages[i], 1,
879 				    ttm->page_flags,
880 				    ttm->caching_state);
881 		if (ret != 0) {
882 			ttm_pool_unpopulate(ttm);
883 			return -ENOMEM;
884 		}
885 
886 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
887 						false, false);
888 		if (unlikely(ret != 0)) {
889 			ttm_pool_unpopulate(ttm);
890 			return -ENOMEM;
891 		}
892 	}
893 
894 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
895 		ret = ttm_tt_swapin(ttm);
896 		if (unlikely(ret != 0)) {
897 			ttm_pool_unpopulate(ttm);
898 			return ret;
899 		}
900 	}
901 
902 	ttm->state = tt_unbound;
903 	return 0;
904 }
905 EXPORT_SYMBOL(ttm_pool_populate);
906 
907 void ttm_pool_unpopulate(struct ttm_tt *ttm)
908 {
909 	unsigned i;
910 
911 	for (i = 0; i < ttm->num_pages; ++i) {
912 		if (ttm->pages[i]) {
913 			ttm_mem_global_free_page(ttm->glob->mem_glob,
914 						 ttm->pages[i]);
915 			ttm_put_pages(&ttm->pages[i], 1,
916 				      ttm->page_flags,
917 				      ttm->caching_state);
918 		}
919 	}
920 	ttm->state = tt_unpopulated;
921 }
922 EXPORT_SYMBOL(ttm_pool_unpopulate);
923 
924 #if 0
925 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
926 {
927 	struct ttm_page_pool *p;
928 	unsigned i;
929 	char *h[] = {"pool", "refills", "pages freed", "size"};
930 	if (!_manager) {
931 		seq_printf(m, "No pool allocator running.\n");
932 		return 0;
933 	}
934 	seq_printf(m, "%6s %12s %13s %8s\n",
935 			h[0], h[1], h[2], h[3]);
936 	for (i = 0; i < NUM_POOLS; ++i) {
937 		p = &_manager->pools[i];
938 
939 		seq_printf(m, "%6s %12ld %13ld %8d\n",
940 				p->name, p->nrefills,
941 				p->nfrees, p->npages);
942 	}
943 	return 0;
944 }
945 #endif
946 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
947