xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/ttm/ttm_page_alloc.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: ttm_page_alloc.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) Red Hat Inc.
5 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  *
25  * Authors: Dave Airlie <airlied@redhat.com>
26  *          Jerome Glisse <jglisse@redhat.com>
27  *          Pauli Nieminen <suokkos@gmail.com>
28  */
29 
30 /* simple list based uncached page pool
31  * - Pool collects resently freed pages for reuse
32  * - Use page->lru to keep a free list
33  * - doesn't track currently in use pages
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ttm_page_alloc.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
38 
39 #define pr_fmt(fmt) "[TTM] " fmt
40 
41 #include <linux/list.h>
42 #include <linux/spinlock.h>
43 #include <linux/highmem.h>
44 #include <linux/mm_types.h>
45 #include <linux/module.h>
46 #include <linux/mm.h>
47 #include <linux/seq_file.h> /* for seq_printf */
48 #include <linux/slab.h>
49 #include <linux/dma-mapping.h>
50 
51 #include <linux/atomic.h>
52 
53 #include <drm/ttm/ttm_bo_driver.h>
54 #include <drm/ttm/ttm_page_alloc.h>
55 
56 #ifdef TTM_HAS_AGP
57 #include <asm/agp.h>
58 #endif
59 
60 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
61 #define SMALL_ALLOCATION		16
62 #define FREE_ALL_PAGES			(~0U)
63 /* times are in msecs */
64 #define PAGE_FREE_INTERVAL		1000
65 
66 /**
67  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
68  *
69  * @lock: Protects the shared pool from concurrnet access. Must be used with
70  * irqsave/irqrestore variants because pool allocator maybe called from
71  * delayed work.
72  * @fill_lock: Prevent concurrent calls to fill.
73  * @list: Pool of free uc/wc pages for fast reuse.
74  * @gfp_flags: Flags to pass for alloc_page.
75  * @npages: Number of pages in pool.
76  */
77 struct ttm_page_pool {
78 	spinlock_t		lock;
79 	bool			fill_lock;
80 	struct list_head	list;
81 	gfp_t			gfp_flags;
82 	unsigned		npages;
83 	char			*name;
84 	unsigned long		nfrees;
85 	unsigned long		nrefills;
86 };
87 
88 /**
89  * Limits for the pool. They are handled without locks because only place where
90  * they may change is in sysfs store. They won't have immediate effect anyway
91  * so forcing serialization to access them is pointless.
92  */
93 
94 struct ttm_pool_opts {
95 	unsigned	alloc_size;
96 	unsigned	max_size;
97 	unsigned	small;
98 };
99 
100 #define NUM_POOLS 4
101 
102 /**
103  * struct ttm_pool_manager - Holds memory pools for fst allocation
104  *
105  * Manager is read only object for pool code so it doesn't need locking.
106  *
107  * @free_interval: minimum number of jiffies between freeing pages from pool.
108  * @page_alloc_inited: reference counting for pool allocation.
109  * @work: Work that is used to shrink the pool. Work is only run when there is
110  * some pages to free.
111  * @small_allocation: Limit in number of pages what is small allocation.
112  *
113  * @pools: All pool objects in use.
114  **/
115 struct ttm_pool_manager {
116 	struct kobject		kobj;
117 	struct shrinker		mm_shrink;
118 	struct ttm_pool_opts	options;
119 
120 	union {
121 		struct ttm_page_pool	pools[NUM_POOLS];
122 		struct {
123 			struct ttm_page_pool	wc_pool;
124 			struct ttm_page_pool	uc_pool;
125 			struct ttm_page_pool	wc_pool_dma32;
126 			struct ttm_page_pool	uc_pool_dma32;
127 		} ;
128 	};
129 };
130 
131 static struct attribute ttm_page_pool_max = {
132 	.name = "pool_max_size",
133 	.mode = S_IRUGO | S_IWUSR
134 };
135 static struct attribute ttm_page_pool_small = {
136 	.name = "pool_small_allocation",
137 	.mode = S_IRUGO | S_IWUSR
138 };
139 static struct attribute ttm_page_pool_alloc_size = {
140 	.name = "pool_allocation_size",
141 	.mode = S_IRUGO | S_IWUSR
142 };
143 
144 static struct attribute *ttm_pool_attrs[] = {
145 	&ttm_page_pool_max,
146 	&ttm_page_pool_small,
147 	&ttm_page_pool_alloc_size,
148 	NULL
149 };
150 
151 static void ttm_pool_kobj_release(struct kobject *kobj)
152 {
153 	struct ttm_pool_manager *m =
154 		container_of(kobj, struct ttm_pool_manager, kobj);
155 	kfree(m);
156 }
157 
158 static ssize_t ttm_pool_store(struct kobject *kobj,
159 		struct attribute *attr, const char *buffer, size_t size)
160 {
161 	struct ttm_pool_manager *m =
162 		container_of(kobj, struct ttm_pool_manager, kobj);
163 	int chars;
164 	unsigned val;
165 	chars = sscanf(buffer, "%u", &val);
166 	if (chars == 0)
167 		return size;
168 
169 	/* Convert kb to number of pages */
170 	val = val / (PAGE_SIZE >> 10);
171 
172 	if (attr == &ttm_page_pool_max)
173 		m->options.max_size = val;
174 	else if (attr == &ttm_page_pool_small)
175 		m->options.small = val;
176 	else if (attr == &ttm_page_pool_alloc_size) {
177 		if (val > NUM_PAGES_TO_ALLOC*8) {
178 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
179 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
180 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
181 			return size;
182 		} else if (val > NUM_PAGES_TO_ALLOC) {
183 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
184 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
185 		}
186 		m->options.alloc_size = val;
187 	}
188 
189 	return size;
190 }
191 
192 static ssize_t ttm_pool_show(struct kobject *kobj,
193 		struct attribute *attr, char *buffer)
194 {
195 	struct ttm_pool_manager *m =
196 		container_of(kobj, struct ttm_pool_manager, kobj);
197 	unsigned val = 0;
198 
199 	if (attr == &ttm_page_pool_max)
200 		val = m->options.max_size;
201 	else if (attr == &ttm_page_pool_small)
202 		val = m->options.small;
203 	else if (attr == &ttm_page_pool_alloc_size)
204 		val = m->options.alloc_size;
205 
206 	val = val * (PAGE_SIZE >> 10);
207 
208 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
209 }
210 
211 static const struct sysfs_ops ttm_pool_sysfs_ops = {
212 	.show = &ttm_pool_show,
213 	.store = &ttm_pool_store,
214 };
215 
216 static struct kobj_type ttm_pool_kobj_type = {
217 	.release = &ttm_pool_kobj_release,
218 	.sysfs_ops = &ttm_pool_sysfs_ops,
219 	.default_attrs = ttm_pool_attrs,
220 };
221 
222 static struct ttm_pool_manager *_manager;
223 
224 #ifndef CONFIG_X86
225 static int set_pages_array_wb(struct page **pages, int addrinarray)
226 {
227 #ifdef TTM_HAS_AGP
228 	int i;
229 
230 	for (i = 0; i < addrinarray; i++)
231 		unmap_page_from_agp(pages[i]);
232 #endif
233 	return 0;
234 }
235 
236 static int set_pages_array_wc(struct page **pages, int addrinarray)
237 {
238 #ifdef TTM_HAS_AGP
239 	int i;
240 
241 	for (i = 0; i < addrinarray; i++)
242 		map_page_into_agp(pages[i]);
243 #endif
244 	return 0;
245 }
246 
247 static int set_pages_array_uc(struct page **pages, int addrinarray)
248 {
249 #ifdef TTM_HAS_AGP
250 	int i;
251 
252 	for (i = 0; i < addrinarray; i++)
253 		map_page_into_agp(pages[i]);
254 #endif
255 	return 0;
256 }
257 #endif
258 
259 /**
260  * Select the right pool or requested caching state and ttm flags. */
261 static struct ttm_page_pool *ttm_get_pool(int flags,
262 		enum ttm_caching_state cstate)
263 {
264 	int pool_index;
265 
266 	if (cstate == tt_cached)
267 		return NULL;
268 
269 	if (cstate == tt_wc)
270 		pool_index = 0x0;
271 	else
272 		pool_index = 0x1;
273 
274 	if (flags & TTM_PAGE_FLAG_DMA32)
275 		pool_index |= 0x2;
276 
277 	return &_manager->pools[pool_index];
278 }
279 
280 /* set memory back to wb and free the pages. */
281 static void ttm_pages_put(struct page *pages[], unsigned npages)
282 {
283 	unsigned i;
284 	if (set_pages_array_wb(pages, npages))
285 		pr_err("Failed to set %d pages to wb!\n", npages);
286 	for (i = 0; i < npages; ++i)
287 		__free_page(pages[i]);
288 }
289 
290 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
291 		unsigned freed_pages)
292 {
293 	pool->npages -= freed_pages;
294 	pool->nfrees += freed_pages;
295 }
296 
297 /**
298  * Free pages from pool.
299  *
300  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
301  * number of pages in one go.
302  *
303  * @pool: to free the pages from
304  * @free_all: If set to true will free all pages in pool
305  * @use_static: Safe to use static buffer
306  **/
307 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
308 			      bool use_static)
309 {
310 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
311 	unsigned long irq_flags;
312 	struct page *p;
313 	struct page **pages_to_free;
314 	unsigned freed_pages = 0,
315 		 npages_to_free = nr_free;
316 
317 	if (NUM_PAGES_TO_ALLOC < nr_free)
318 		npages_to_free = NUM_PAGES_TO_ALLOC;
319 
320 	if (use_static)
321 		pages_to_free = static_buf;
322 	else
323 		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
324 					GFP_KERNEL);
325 	if (!pages_to_free) {
326 		pr_err("Failed to allocate memory for pool free operation\n");
327 		return 0;
328 	}
329 
330 restart:
331 	spin_lock_irqsave(&pool->lock, irq_flags);
332 
333 	list_for_each_entry_reverse(p, &pool->list, lru) {
334 		if (freed_pages >= npages_to_free)
335 			break;
336 
337 		pages_to_free[freed_pages++] = p;
338 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
339 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
340 			/* remove range of pages from the pool */
341 			__list_del(p->lru.prev, &pool->list);
342 
343 			ttm_pool_update_free_locked(pool, freed_pages);
344 			/**
345 			 * Because changing page caching is costly
346 			 * we unlock the pool to prevent stalling.
347 			 */
348 			spin_unlock_irqrestore(&pool->lock, irq_flags);
349 
350 			ttm_pages_put(pages_to_free, freed_pages);
351 			if (likely(nr_free != FREE_ALL_PAGES))
352 				nr_free -= freed_pages;
353 
354 			if (NUM_PAGES_TO_ALLOC >= nr_free)
355 				npages_to_free = nr_free;
356 			else
357 				npages_to_free = NUM_PAGES_TO_ALLOC;
358 
359 			freed_pages = 0;
360 
361 			/* free all so restart the processing */
362 			if (nr_free)
363 				goto restart;
364 
365 			/* Not allowed to fall through or break because
366 			 * following context is inside spinlock while we are
367 			 * outside here.
368 			 */
369 			goto out;
370 
371 		}
372 	}
373 
374 	/* remove range of pages from the pool */
375 	if (freed_pages) {
376 		__list_del(&p->lru, &pool->list);
377 
378 		ttm_pool_update_free_locked(pool, freed_pages);
379 		nr_free -= freed_pages;
380 	}
381 
382 	spin_unlock_irqrestore(&pool->lock, irq_flags);
383 
384 	if (freed_pages)
385 		ttm_pages_put(pages_to_free, freed_pages);
386 out:
387 	if (pages_to_free != static_buf)
388 		kfree(pages_to_free);
389 	return nr_free;
390 }
391 
392 /**
393  * Callback for mm to request pool to reduce number of page held.
394  *
395  * XXX: (dchinner) Deadlock warning!
396  *
397  * This code is crying out for a shrinker per pool....
398  */
399 static unsigned long
400 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
401 {
402 	static DEFINE_MUTEX(lock);
403 	static unsigned start_pool;
404 	unsigned i;
405 	unsigned pool_offset;
406 	struct ttm_page_pool *pool;
407 	int shrink_pages = sc->nr_to_scan;
408 	unsigned long freed = 0;
409 
410 	if (!mutex_trylock(&lock))
411 		return SHRINK_STOP;
412 	pool_offset = ++start_pool % NUM_POOLS;
413 	/* select start pool in round robin fashion */
414 	for (i = 0; i < NUM_POOLS; ++i) {
415 		unsigned nr_free = shrink_pages;
416 		if (shrink_pages == 0)
417 			break;
418 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
419 		/* OK to use static buffer since global mutex is held. */
420 		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
421 		freed += nr_free - shrink_pages;
422 	}
423 	mutex_unlock(&lock);
424 	return freed;
425 }
426 
427 
428 static unsigned long
429 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
430 {
431 	unsigned i;
432 	unsigned long count = 0;
433 
434 	for (i = 0; i < NUM_POOLS; ++i)
435 		count += _manager->pools[i].npages;
436 
437 	return count;
438 }
439 
440 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
441 {
442 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
443 	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
444 	manager->mm_shrink.seeks = 1;
445 	register_shrinker(&manager->mm_shrink);
446 }
447 
448 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
449 {
450 	unregister_shrinker(&manager->mm_shrink);
451 }
452 
453 static int ttm_set_pages_caching(struct page **pages,
454 		enum ttm_caching_state cstate, unsigned cpages)
455 {
456 	int r = 0;
457 	/* Set page caching */
458 	switch (cstate) {
459 	case tt_uncached:
460 		r = set_pages_array_uc(pages, cpages);
461 		if (r)
462 			pr_err("Failed to set %d pages to uc!\n", cpages);
463 		break;
464 	case tt_wc:
465 		r = set_pages_array_wc(pages, cpages);
466 		if (r)
467 			pr_err("Failed to set %d pages to wc!\n", cpages);
468 		break;
469 	default:
470 		break;
471 	}
472 	return r;
473 }
474 
475 /**
476  * Free pages the pages that failed to change the caching state. If there is
477  * any pages that have changed their caching state already put them to the
478  * pool.
479  */
480 static void ttm_handle_caching_state_failure(struct list_head *pages,
481 		int ttm_flags, enum ttm_caching_state cstate,
482 		struct page **failed_pages, unsigned cpages)
483 {
484 	unsigned i;
485 	/* Failed pages have to be freed */
486 	for (i = 0; i < cpages; ++i) {
487 		list_del(&failed_pages[i]->lru);
488 		__free_page(failed_pages[i]);
489 	}
490 }
491 
492 /**
493  * Allocate new pages with correct caching.
494  *
495  * This function is reentrant if caller updates count depending on number of
496  * pages returned in pages array.
497  */
498 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
499 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
500 {
501 	struct page **caching_array;
502 	struct page *p;
503 	int r = 0;
504 	unsigned i, cpages;
505 	unsigned max_cpages = min(count,
506 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
507 
508 	/* allocate array for page caching change */
509 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
510 
511 	if (!caching_array) {
512 		pr_err("Unable to allocate table for new pages\n");
513 		return -ENOMEM;
514 	}
515 
516 	for (i = 0, cpages = 0; i < count; ++i) {
517 		p = alloc_page(gfp_flags);
518 
519 		if (!p) {
520 			pr_err("Unable to get page %u\n", i);
521 
522 			/* store already allocated pages in the pool after
523 			 * setting the caching state */
524 			if (cpages) {
525 				r = ttm_set_pages_caching(caching_array,
526 							  cstate, cpages);
527 				if (r)
528 					ttm_handle_caching_state_failure(pages,
529 						ttm_flags, cstate,
530 						caching_array, cpages);
531 			}
532 			r = -ENOMEM;
533 			goto out;
534 		}
535 
536 #ifdef CONFIG_HIGHMEM
537 		/* gfp flags of highmem page should never be dma32 so we
538 		 * we should be fine in such case
539 		 */
540 		if (!PageHighMem(p))
541 #endif
542 		{
543 			caching_array[cpages++] = p;
544 			if (cpages == max_cpages) {
545 
546 				r = ttm_set_pages_caching(caching_array,
547 						cstate, cpages);
548 				if (r) {
549 					ttm_handle_caching_state_failure(pages,
550 						ttm_flags, cstate,
551 						caching_array, cpages);
552 					goto out;
553 				}
554 				cpages = 0;
555 			}
556 		}
557 
558 		list_add(&p->lru, pages);
559 	}
560 
561 	if (cpages) {
562 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
563 		if (r)
564 			ttm_handle_caching_state_failure(pages,
565 					ttm_flags, cstate,
566 					caching_array, cpages);
567 	}
568 out:
569 	kfree(caching_array);
570 
571 	return r;
572 }
573 
574 /**
575  * Fill the given pool if there aren't enough pages and the requested number of
576  * pages is small.
577  */
578 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
579 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
580 		unsigned long *irq_flags)
581 {
582 	struct page *p;
583 	int r;
584 	unsigned cpages = 0;
585 	/**
586 	 * Only allow one pool fill operation at a time.
587 	 * If pool doesn't have enough pages for the allocation new pages are
588 	 * allocated from outside of pool.
589 	 */
590 	if (pool->fill_lock)
591 		return;
592 
593 	pool->fill_lock = true;
594 
595 	/* If allocation request is small and there are not enough
596 	 * pages in a pool we fill the pool up first. */
597 	if (count < _manager->options.small
598 		&& count > pool->npages) {
599 		struct list_head new_pages;
600 		unsigned alloc_size = _manager->options.alloc_size;
601 
602 		/**
603 		 * Can't change page caching if in irqsave context. We have to
604 		 * drop the pool->lock.
605 		 */
606 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
607 
608 		INIT_LIST_HEAD(&new_pages);
609 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
610 				cstate,	alloc_size);
611 		spin_lock_irqsave(&pool->lock, *irq_flags);
612 
613 		if (!r) {
614 			list_splice(&new_pages, &pool->list);
615 			++pool->nrefills;
616 			pool->npages += alloc_size;
617 		} else {
618 			pr_err("Failed to fill pool (%p)\n", pool);
619 			/* If we have any pages left put them to the pool. */
620 			list_for_each_entry(p, &new_pages, lru) {
621 				++cpages;
622 			}
623 			list_splice(&new_pages, &pool->list);
624 			pool->npages += cpages;
625 		}
626 
627 	}
628 	pool->fill_lock = false;
629 }
630 
631 /**
632  * Cut 'count' number of pages from the pool and put them on the return list.
633  *
634  * @return count of pages still required to fulfill the request.
635  */
636 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
637 					struct list_head *pages,
638 					int ttm_flags,
639 					enum ttm_caching_state cstate,
640 					unsigned count)
641 {
642 	unsigned long irq_flags;
643 	struct list_head *p;
644 	unsigned i;
645 
646 	spin_lock_irqsave(&pool->lock, irq_flags);
647 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
648 
649 	if (count >= pool->npages) {
650 		/* take all pages from the pool */
651 		list_splice_init(&pool->list, pages);
652 		count -= pool->npages;
653 		pool->npages = 0;
654 		goto out;
655 	}
656 	/* find the last pages to include for requested number of pages. Split
657 	 * pool to begin and halve it to reduce search space. */
658 	if (count <= pool->npages/2) {
659 		i = 0;
660 		list_for_each(p, &pool->list) {
661 			if (++i == count)
662 				break;
663 		}
664 	} else {
665 		i = pool->npages + 1;
666 		list_for_each_prev(p, &pool->list) {
667 			if (--i == count)
668 				break;
669 		}
670 	}
671 	/* Cut 'count' number of pages from the pool */
672 	list_cut_position(pages, &pool->list, p);
673 	pool->npages -= count;
674 	count = 0;
675 out:
676 	spin_unlock_irqrestore(&pool->lock, irq_flags);
677 	return count;
678 }
679 
680 /* Put all pages in pages list to correct pool to wait for reuse */
681 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
682 			  enum ttm_caching_state cstate)
683 {
684 	unsigned long irq_flags;
685 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
686 	unsigned i;
687 
688 	if (pool == NULL) {
689 		/* No pool for this memory type so free the pages */
690 		for (i = 0; i < npages; i++) {
691 			if (pages[i]) {
692 				if (page_count(pages[i]) != 1)
693 					pr_err("Erroneous page count. Leaking pages.\n");
694 				__free_page(pages[i]);
695 				pages[i] = NULL;
696 			}
697 		}
698 		return;
699 	}
700 
701 	spin_lock_irqsave(&pool->lock, irq_flags);
702 	for (i = 0; i < npages; i++) {
703 		if (pages[i]) {
704 			if (page_count(pages[i]) != 1)
705 				pr_err("Erroneous page count. Leaking pages.\n");
706 			list_add_tail(&pages[i]->lru, &pool->list);
707 			pages[i] = NULL;
708 			pool->npages++;
709 		}
710 	}
711 	/* Check that we don't go over the pool limit */
712 	npages = 0;
713 	if (pool->npages > _manager->options.max_size) {
714 		npages = pool->npages - _manager->options.max_size;
715 		/* free at least NUM_PAGES_TO_ALLOC number of pages
716 		 * to reduce calls to set_memory_wb */
717 		if (npages < NUM_PAGES_TO_ALLOC)
718 			npages = NUM_PAGES_TO_ALLOC;
719 	}
720 	spin_unlock_irqrestore(&pool->lock, irq_flags);
721 	if (npages)
722 		ttm_page_pool_free(pool, npages, false);
723 }
724 
725 /*
726  * On success pages list will hold count number of correctly
727  * cached pages.
728  */
729 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
730 			 enum ttm_caching_state cstate)
731 {
732 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
733 	struct list_head plist;
734 	struct page *p = NULL;
735 	gfp_t gfp_flags = GFP_USER;
736 	unsigned count;
737 	int r;
738 
739 	/* set zero flag for page allocation if required */
740 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
741 		gfp_flags |= __GFP_ZERO;
742 
743 	/* No pool for cached pages */
744 	if (pool == NULL) {
745 		if (flags & TTM_PAGE_FLAG_DMA32)
746 			gfp_flags |= GFP_DMA32;
747 		else
748 			gfp_flags |= GFP_HIGHUSER;
749 
750 		for (r = 0; r < npages; ++r) {
751 			p = alloc_page(gfp_flags);
752 			if (!p) {
753 
754 				pr_err("Unable to allocate page\n");
755 				return -ENOMEM;
756 			}
757 
758 			pages[r] = p;
759 		}
760 		return 0;
761 	}
762 
763 	/* combine zero flag to pool flags */
764 	gfp_flags |= pool->gfp_flags;
765 
766 	/* First we take pages from the pool */
767 	INIT_LIST_HEAD(&plist);
768 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
769 	count = 0;
770 	list_for_each_entry(p, &plist, lru) {
771 		pages[count++] = p;
772 	}
773 
774 	/* clear the pages coming from the pool if requested */
775 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
776 		list_for_each_entry(p, &plist, lru) {
777 			if (PageHighMem(p))
778 				clear_highpage(p);
779 			else
780 				clear_page(page_address(p));
781 		}
782 	}
783 
784 	/* If pool didn't have enough pages allocate new one. */
785 	if (npages > 0) {
786 		/* ttm_alloc_new_pages doesn't reference pool so we can run
787 		 * multiple requests in parallel.
788 		 **/
789 		INIT_LIST_HEAD(&plist);
790 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
791 		list_for_each_entry(p, &plist, lru) {
792 			pages[count++] = p;
793 		}
794 		if (r) {
795 			/* If there is any pages in the list put them back to
796 			 * the pool. */
797 			pr_err("Failed to allocate extra pages for large request\n");
798 			ttm_put_pages(pages, count, flags, cstate);
799 			return r;
800 		}
801 	}
802 
803 	return 0;
804 }
805 
806 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
807 		char *name)
808 {
809 	spin_lock_init(&pool->lock);
810 	pool->fill_lock = false;
811 	INIT_LIST_HEAD(&pool->list);
812 	pool->npages = pool->nfrees = 0;
813 	pool->gfp_flags = flags;
814 	pool->name = name;
815 }
816 
817 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 {
819 	int ret;
820 
821 	WARN_ON(_manager);
822 
823 	pr_info("Initializing pool allocator\n");
824 
825 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
826 	if (!_manager)
827 		return -ENOMEM;
828 
829 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
830 
831 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
832 
833 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
834 				  GFP_USER | GFP_DMA32, "wc dma");
835 
836 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
837 				  GFP_USER | GFP_DMA32, "uc dma");
838 
839 	_manager->options.max_size = max_pages;
840 	_manager->options.small = SMALL_ALLOCATION;
841 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
842 
843 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
844 				   &glob->kobj, "pool");
845 	if (unlikely(ret != 0)) {
846 		kobject_put(&_manager->kobj);
847 		_manager = NULL;
848 		return ret;
849 	}
850 
851 	ttm_pool_mm_shrink_init(_manager);
852 
853 	return 0;
854 }
855 
856 void ttm_page_alloc_fini(void)
857 {
858 	int i;
859 
860 	pr_info("Finalizing pool allocator\n");
861 	ttm_pool_mm_shrink_fini(_manager);
862 
863 	/* OK to use static buffer since global mutex is no longer used. */
864 	for (i = 0; i < NUM_POOLS; ++i)
865 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
866 
867 	kobject_put(&_manager->kobj);
868 	_manager = NULL;
869 }
870 
871 int ttm_pool_populate(struct ttm_tt *ttm)
872 {
873 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
874 	unsigned i;
875 	int ret;
876 
877 	if (ttm->state != tt_unpopulated)
878 		return 0;
879 
880 	for (i = 0; i < ttm->num_pages; ++i) {
881 		ret = ttm_get_pages(&ttm->pages[i], 1,
882 				    ttm->page_flags,
883 				    ttm->caching_state);
884 		if (ret != 0) {
885 			ttm_pool_unpopulate(ttm);
886 			return -ENOMEM;
887 		}
888 
889 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
890 						false, false);
891 		if (unlikely(ret != 0)) {
892 			ttm_pool_unpopulate(ttm);
893 			return -ENOMEM;
894 		}
895 	}
896 
897 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
898 		ret = ttm_tt_swapin(ttm);
899 		if (unlikely(ret != 0)) {
900 			ttm_pool_unpopulate(ttm);
901 			return ret;
902 		}
903 	}
904 
905 	ttm->state = tt_unbound;
906 	return 0;
907 }
908 EXPORT_SYMBOL(ttm_pool_populate);
909 
910 void ttm_pool_unpopulate(struct ttm_tt *ttm)
911 {
912 	unsigned i;
913 
914 	for (i = 0; i < ttm->num_pages; ++i) {
915 		if (ttm->pages[i]) {
916 			ttm_mem_global_free_page(ttm->glob->mem_glob,
917 						 ttm->pages[i]);
918 			ttm_put_pages(&ttm->pages[i], 1,
919 				      ttm->page_flags,
920 				      ttm->caching_state);
921 		}
922 	}
923 	ttm->state = tt_unpopulated;
924 }
925 EXPORT_SYMBOL(ttm_pool_unpopulate);
926 
927 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
928 {
929 	struct ttm_page_pool *p;
930 	unsigned i;
931 	char *h[] = {"pool", "refills", "pages freed", "size"};
932 	if (!_manager) {
933 		seq_printf(m, "No pool allocator running.\n");
934 		return 0;
935 	}
936 	seq_printf(m, "%6s %12s %13s %8s\n",
937 			h[0], h[1], h[2], h[3]);
938 	for (i = 0; i < NUM_POOLS; ++i) {
939 		p = &_manager->pools[i];
940 
941 		seq_printf(m, "%6s %12ld %13ld %8d\n",
942 				p->name, p->nrefills,
943 				p->nfrees, p->npages);
944 	}
945 	return 0;
946 }
947 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
948