xref: /dflybsd-src/sys/dev/drm/ttm/ttm_page_alloc.c (revision 932d855e0922ed9e1decd9e1557d1ad3c065b76b)
15718399fSFrançois Tigeot /*
25718399fSFrançois Tigeot  * Copyright (c) Red Hat Inc.
35718399fSFrançois Tigeot 
45718399fSFrançois Tigeot  * Permission is hereby granted, free of charge, to any person obtaining a
55718399fSFrançois Tigeot  * copy of this software and associated documentation files (the "Software"),
65718399fSFrançois Tigeot  * to deal in the Software without restriction, including without limitation
75718399fSFrançois Tigeot  * the rights to use, copy, modify, merge, publish, distribute, sub license,
85718399fSFrançois Tigeot  * and/or sell copies of the Software, and to permit persons to whom the
95718399fSFrançois Tigeot  * Software is furnished to do so, subject to the following conditions:
105718399fSFrançois Tigeot  *
115718399fSFrançois Tigeot  * The above copyright notice and this permission notice (including the
125718399fSFrançois Tigeot  * next paragraph) shall be included in all copies or substantial portions
135718399fSFrançois Tigeot  * of the Software.
145718399fSFrançois Tigeot  *
155718399fSFrançois Tigeot  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
165718399fSFrançois Tigeot  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
175718399fSFrançois Tigeot  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
185718399fSFrançois Tigeot  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
195718399fSFrançois Tigeot  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
205718399fSFrançois Tigeot  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
215718399fSFrançois Tigeot  * DEALINGS IN THE SOFTWARE.
225718399fSFrançois Tigeot  *
235718399fSFrançois Tigeot  * Authors: Dave Airlie <airlied@redhat.com>
245718399fSFrançois Tigeot  *          Jerome Glisse <jglisse@redhat.com>
255718399fSFrançois Tigeot  *          Pauli Nieminen <suokkos@gmail.com>
265718399fSFrançois Tigeot  */
275718399fSFrançois Tigeot /*
285718399fSFrançois Tigeot  * Copyright (c) 2013 The FreeBSD Foundation
295718399fSFrançois Tigeot  * All rights reserved.
305718399fSFrançois Tigeot  *
315718399fSFrançois Tigeot  * Portions of this software were developed by Konstantin Belousov
325718399fSFrançois Tigeot  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
335718399fSFrançois Tigeot  */
345718399fSFrançois Tigeot 
355718399fSFrançois Tigeot /* simple list based uncached page pool
365718399fSFrançois Tigeot  * - Pool collects resently freed pages for reuse
375718399fSFrançois Tigeot  * - Use page->lru to keep a free list
385718399fSFrançois Tigeot  * - doesn't track currently in use pages
395718399fSFrançois Tigeot  */
405718399fSFrançois Tigeot 
410bece63dSImre Vadasz #define pr_fmt(fmt) "[TTM] " fmt
420bece63dSImre Vadasz 
436af927c2SFrançois Tigeot #include <linux/list.h>
446af927c2SFrançois Tigeot #include <linux/spinlock.h>
456af927c2SFrançois Tigeot #include <linux/highmem.h>
466af927c2SFrançois Tigeot #include <linux/mm_types.h>
476af927c2SFrançois Tigeot #include <linux/module.h>
486af927c2SFrançois Tigeot #include <linux/mm.h>
496af927c2SFrançois Tigeot #include <linux/seq_file.h> /* for seq_printf */
506af927c2SFrançois Tigeot #include <linux/dma-mapping.h>
515718399fSFrançois Tigeot 
526af927c2SFrançois Tigeot #include <linux/atomic.h>
536af927c2SFrançois Tigeot 
54216f7a2cSFrançois Tigeot #include <drm/ttm/ttm_bo_driver.h>
55216f7a2cSFrançois Tigeot #include <drm/ttm/ttm_page_alloc.h>
56*932d855eSSergey Zigachev #include <drm/ttm/ttm_set_memory.h>
575718399fSFrançois Tigeot 
58f0bba3d1SFrançois Tigeot #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
595718399fSFrançois Tigeot #define SMALL_ALLOCATION		16
605718399fSFrançois Tigeot #define FREE_ALL_PAGES			(~0U)
615718399fSFrançois Tigeot /* times are in msecs */
625718399fSFrançois Tigeot #define PAGE_FREE_INTERVAL		1000
635718399fSFrançois Tigeot 
645718399fSFrançois Tigeot /**
655718399fSFrançois Tigeot  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
665718399fSFrançois Tigeot  *
675718399fSFrançois Tigeot  * @lock: Protects the shared pool from concurrnet access. Must be used with
685718399fSFrançois Tigeot  * irqsave/irqrestore variants because pool allocator maybe called from
695718399fSFrançois Tigeot  * delayed work.
705718399fSFrançois Tigeot  * @fill_lock: Prevent concurrent calls to fill.
715718399fSFrançois Tigeot  * @list: Pool of free uc/wc pages for fast reuse.
725718399fSFrançois Tigeot  * @gfp_flags: Flags to pass for alloc_page.
735718399fSFrançois Tigeot  * @npages: Number of pages in pool.
745718399fSFrançois Tigeot  */
755718399fSFrançois Tigeot struct ttm_page_pool {
765718399fSFrançois Tigeot 	struct lock		lock;
775718399fSFrançois Tigeot 	bool			fill_lock;
785718399fSFrançois Tigeot 	struct pglist		list;
796af927c2SFrançois Tigeot 	gfp_t			gfp_flags;
805718399fSFrançois Tigeot 	unsigned		npages;
815718399fSFrançois Tigeot 	char			*name;
825718399fSFrançois Tigeot 	unsigned long		nfrees;
835718399fSFrançois Tigeot 	unsigned long		nrefills;
84*932d855eSSergey Zigachev 	unsigned int		order;
855718399fSFrançois Tigeot };
865718399fSFrançois Tigeot 
875718399fSFrançois Tigeot /**
885718399fSFrançois Tigeot  * Limits for the pool. They are handled without locks because only place where
895718399fSFrançois Tigeot  * they may change is in sysfs store. They won't have immediate effect anyway
905718399fSFrançois Tigeot  * so forcing serialization to access them is pointless.
915718399fSFrançois Tigeot  */
925718399fSFrançois Tigeot 
935718399fSFrançois Tigeot struct ttm_pool_opts {
945718399fSFrançois Tigeot 	unsigned	alloc_size;
955718399fSFrançois Tigeot 	unsigned	max_size;
965718399fSFrançois Tigeot 	unsigned	small;
975718399fSFrançois Tigeot };
985718399fSFrançois Tigeot 
995718399fSFrançois Tigeot #define NUM_POOLS 4
1005718399fSFrançois Tigeot 
1015718399fSFrançois Tigeot /**
1025718399fSFrançois Tigeot  * struct ttm_pool_manager - Holds memory pools for fst allocation
1035718399fSFrançois Tigeot  *
1045718399fSFrançois Tigeot  * Manager is read only object for pool code so it doesn't need locking.
1055718399fSFrançois Tigeot  *
1065718399fSFrançois Tigeot  * @free_interval: minimum number of jiffies between freeing pages from pool.
1075718399fSFrançois Tigeot  * @page_alloc_inited: reference counting for pool allocation.
1085718399fSFrançois Tigeot  * @work: Work that is used to shrink the pool. Work is only run when there is
1095718399fSFrançois Tigeot  * some pages to free.
1105718399fSFrançois Tigeot  * @small_allocation: Limit in number of pages what is small allocation.
1115718399fSFrançois Tigeot  *
1125718399fSFrançois Tigeot  * @pools: All pool objects in use.
1135718399fSFrançois Tigeot  **/
1145718399fSFrançois Tigeot struct ttm_pool_manager {
1153a2096e8SFrançois Tigeot 	struct kobject		kobj;
11643e748b9SFrançois Tigeot 	struct shrinker		mm_shrink;
1175718399fSFrançois Tigeot 	eventhandler_tag lowmem_handler;
1185718399fSFrançois Tigeot 	struct ttm_pool_opts	options;
1195718399fSFrançois Tigeot 
1205718399fSFrançois Tigeot 	union {
1216af927c2SFrançois Tigeot 		struct ttm_page_pool	pools[NUM_POOLS];
1226af927c2SFrançois Tigeot 		struct {
1236af927c2SFrançois Tigeot 			struct ttm_page_pool	wc_pool;
1246af927c2SFrançois Tigeot 			struct ttm_page_pool	uc_pool;
1256af927c2SFrançois Tigeot 			struct ttm_page_pool	wc_pool_dma32;
1266af927c2SFrançois Tigeot 			struct ttm_page_pool	uc_pool_dma32;
127*932d855eSSergey Zigachev 			struct ttm_page_pool	wc_pool_huge;
128*932d855eSSergey Zigachev 			struct ttm_page_pool	uc_pool_huge;
1295718399fSFrançois Tigeot 		} ;
1306af927c2SFrançois Tigeot 	};
1316af927c2SFrançois Tigeot };
1325718399fSFrançois Tigeot 
1333a2096e8SFrançois Tigeot static struct attribute ttm_page_pool_max = {
1343a2096e8SFrançois Tigeot 	.name = "pool_max_size",
1353a2096e8SFrançois Tigeot 	.mode = S_IRUGO | S_IWUSR
1363a2096e8SFrançois Tigeot };
1373a2096e8SFrançois Tigeot static struct attribute ttm_page_pool_small = {
1383a2096e8SFrançois Tigeot 	.name = "pool_small_allocation",
1393a2096e8SFrançois Tigeot 	.mode = S_IRUGO | S_IWUSR
1403a2096e8SFrançois Tigeot };
1413a2096e8SFrançois Tigeot static struct attribute ttm_page_pool_alloc_size = {
1423a2096e8SFrançois Tigeot 	.name = "pool_allocation_size",
1433a2096e8SFrançois Tigeot 	.mode = S_IRUGO | S_IWUSR
1443a2096e8SFrançois Tigeot };
1453a2096e8SFrançois Tigeot 
1463a2096e8SFrançois Tigeot static struct attribute *ttm_pool_attrs[] = {
1473a2096e8SFrançois Tigeot 	&ttm_page_pool_max,
1483a2096e8SFrançois Tigeot 	&ttm_page_pool_small,
1493a2096e8SFrançois Tigeot 	&ttm_page_pool_alloc_size,
1503a2096e8SFrançois Tigeot 	NULL
1513a2096e8SFrançois Tigeot };
1523a2096e8SFrançois Tigeot 
ttm_pool_kobj_release(struct kobject * kobj)1533a2096e8SFrançois Tigeot static void ttm_pool_kobj_release(struct kobject *kobj)
1545718399fSFrançois Tigeot {
1553a2096e8SFrançois Tigeot 	struct ttm_pool_manager *m =
1563a2096e8SFrançois Tigeot 		container_of(kobj, struct ttm_pool_manager, kobj);
157175896dfSzrj 	kfree(m);
1585718399fSFrançois Tigeot }
1595718399fSFrançois Tigeot 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)1603a2096e8SFrançois Tigeot static ssize_t ttm_pool_store(struct kobject *kobj,
1615718399fSFrançois Tigeot 		struct attribute *attr, const char *buffer, size_t size)
1625718399fSFrançois Tigeot {
1633a2096e8SFrançois Tigeot 	struct ttm_pool_manager *m =
1643a2096e8SFrançois Tigeot 		container_of(kobj, struct ttm_pool_manager, kobj);
1655718399fSFrançois Tigeot 	int chars;
1665718399fSFrançois Tigeot 	unsigned val;
1673a2096e8SFrançois Tigeot 	chars = ksscanf(buffer, "%u", &val);
1685718399fSFrançois Tigeot 	if (chars == 0)
1695718399fSFrançois Tigeot 		return size;
1705718399fSFrançois Tigeot 
1715718399fSFrançois Tigeot 	/* Convert kb to number of pages */
1725718399fSFrançois Tigeot 	val = val / (PAGE_SIZE >> 10);
1735718399fSFrançois Tigeot 
1745718399fSFrançois Tigeot 	if (attr == &ttm_page_pool_max)
1755718399fSFrançois Tigeot 		m->options.max_size = val;
1765718399fSFrançois Tigeot 	else if (attr == &ttm_page_pool_small)
1775718399fSFrançois Tigeot 		m->options.small = val;
1785718399fSFrançois Tigeot 	else if (attr == &ttm_page_pool_alloc_size) {
1795718399fSFrançois Tigeot 		if (val > NUM_PAGES_TO_ALLOC*8) {
1805718399fSFrançois Tigeot 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
1815718399fSFrançois Tigeot 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
1825718399fSFrançois Tigeot 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
1835718399fSFrançois Tigeot 			return size;
1845718399fSFrançois Tigeot 		} else if (val > NUM_PAGES_TO_ALLOC) {
1855718399fSFrançois Tigeot 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
1865718399fSFrançois Tigeot 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
1875718399fSFrançois Tigeot 		}
1885718399fSFrançois Tigeot 		m->options.alloc_size = val;
1895718399fSFrançois Tigeot 	}
1905718399fSFrançois Tigeot 
1915718399fSFrançois Tigeot 	return size;
1925718399fSFrançois Tigeot }
1935718399fSFrançois Tigeot 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)1943a2096e8SFrançois Tigeot static ssize_t ttm_pool_show(struct kobject *kobj,
1955718399fSFrançois Tigeot 		struct attribute *attr, char *buffer)
1965718399fSFrançois Tigeot {
1973a2096e8SFrançois Tigeot 	struct ttm_pool_manager *m =
1983a2096e8SFrançois Tigeot 		container_of(kobj, struct ttm_pool_manager, kobj);
1995718399fSFrançois Tigeot 	unsigned val = 0;
2005718399fSFrançois Tigeot 
2015718399fSFrançois Tigeot 	if (attr == &ttm_page_pool_max)
2025718399fSFrançois Tigeot 		val = m->options.max_size;
2035718399fSFrançois Tigeot 	else if (attr == &ttm_page_pool_small)
2045718399fSFrançois Tigeot 		val = m->options.small;
2055718399fSFrançois Tigeot 	else if (attr == &ttm_page_pool_alloc_size)
2065718399fSFrançois Tigeot 		val = m->options.alloc_size;
2075718399fSFrançois Tigeot 
2085718399fSFrançois Tigeot 	val = val * (PAGE_SIZE >> 10);
2095718399fSFrançois Tigeot 
2103a2096e8SFrançois Tigeot 	return ksnprintf(buffer, PAGE_SIZE, "%u\n", val);
2115718399fSFrançois Tigeot }
2123a2096e8SFrançois Tigeot 
2133a2096e8SFrançois Tigeot static const struct sysfs_ops ttm_pool_sysfs_ops = {
2143a2096e8SFrançois Tigeot 	.show = &ttm_pool_show,
2153a2096e8SFrançois Tigeot 	.store = &ttm_pool_store,
2163a2096e8SFrançois Tigeot };
2173a2096e8SFrançois Tigeot 
2183a2096e8SFrançois Tigeot static struct kobj_type ttm_pool_kobj_type = {
2193a2096e8SFrançois Tigeot 	.release = &ttm_pool_kobj_release,
2203a2096e8SFrançois Tigeot 	.sysfs_ops = &ttm_pool_sysfs_ops,
2213a2096e8SFrançois Tigeot 	.default_attrs = ttm_pool_attrs,
2223a2096e8SFrançois Tigeot };
2235718399fSFrançois Tigeot 
2245718399fSFrançois Tigeot static struct ttm_pool_manager *_manager;
2255718399fSFrançois Tigeot 
2265718399fSFrançois Tigeot /**
2275718399fSFrançois Tigeot  * Select the right pool or requested caching state and ttm flags. */
ttm_get_pool(int flags,bool huge,enum ttm_caching_state cstate)228*932d855eSSergey Zigachev static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
2295718399fSFrançois Tigeot 					  enum ttm_caching_state cstate)
2305718399fSFrançois Tigeot {
2315718399fSFrançois Tigeot 	int pool_index;
2325718399fSFrançois Tigeot 
2335718399fSFrançois Tigeot 	if (cstate == tt_cached)
2345718399fSFrançois Tigeot 		return NULL;
2355718399fSFrançois Tigeot 
2365718399fSFrançois Tigeot 	if (cstate == tt_wc)
2375718399fSFrançois Tigeot 		pool_index = 0x0;
2385718399fSFrançois Tigeot 	else
2395718399fSFrançois Tigeot 		pool_index = 0x1;
2405718399fSFrançois Tigeot 
241*932d855eSSergey Zigachev 	if (flags & TTM_PAGE_FLAG_DMA32) {
242*932d855eSSergey Zigachev 		if (huge)
243*932d855eSSergey Zigachev 			return NULL;
2445718399fSFrançois Tigeot 		pool_index |= 0x2;
2455718399fSFrançois Tigeot 
246*932d855eSSergey Zigachev 	} else if (huge) {
247*932d855eSSergey Zigachev 		pool_index |= 0x4;
248*932d855eSSergey Zigachev 	}
249*932d855eSSergey Zigachev 
2505718399fSFrançois Tigeot 	return &_manager->pools[pool_index];
2515718399fSFrançois Tigeot }
2525718399fSFrançois Tigeot 
2535718399fSFrançois Tigeot /* set memory back to wb and free the pages. */
ttm_pages_put(struct page * pages[],unsigned npages,unsigned int order)254*932d855eSSergey Zigachev static void ttm_pages_put(struct page *pages[], unsigned npages,
255*932d855eSSergey Zigachev 		unsigned int order)
2565718399fSFrançois Tigeot {
257*932d855eSSergey Zigachev 	unsigned int i, pages_nr = (1 << order);
258*932d855eSSergey Zigachev 
259*932d855eSSergey Zigachev 	if (order == 0) {
260*932d855eSSergey Zigachev 		if (ttm_set_pages_array_wb(pages, npages))
2610bece63dSImre Vadasz 			pr_err("Failed to set %d pages to wb!\n", npages);
262*932d855eSSergey Zigachev 	}
263*932d855eSSergey Zigachev 
264f16f9121SMatthew Dillon 	for (i = 0; i < npages; ++i) {
265*932d855eSSergey Zigachev 		if (order > 0) {
266*932d855eSSergey Zigachev 			if (ttm_set_pages_wb(pages[i], pages_nr))
267*932d855eSSergey Zigachev 				pr_err("Failed to set %d pages to wb!\n", pages_nr);
268*932d855eSSergey Zigachev 		}
269*932d855eSSergey Zigachev 		__free_pages(pages[i], order);
2705718399fSFrançois Tigeot 	}
271f16f9121SMatthew Dillon }
2725718399fSFrançois Tigeot 
ttm_pool_update_free_locked(struct ttm_page_pool * pool,unsigned freed_pages)2735718399fSFrançois Tigeot static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
2745718399fSFrançois Tigeot 		unsigned freed_pages)
2755718399fSFrançois Tigeot {
2765718399fSFrançois Tigeot 	pool->npages -= freed_pages;
2775718399fSFrançois Tigeot 	pool->nfrees += freed_pages;
2785718399fSFrançois Tigeot }
2795718399fSFrançois Tigeot 
2805718399fSFrançois Tigeot /**
2815718399fSFrançois Tigeot  * Free pages from pool.
2825718399fSFrançois Tigeot  *
2835718399fSFrançois Tigeot  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
2845718399fSFrançois Tigeot  * number of pages in one go.
2855718399fSFrançois Tigeot  *
2865718399fSFrançois Tigeot  * @pool: to free the pages from
2875718399fSFrançois Tigeot  * @free_all: If set to true will free all pages in pool
2887dcf36dcSFrançois Tigeot  * @use_static: Safe to use static buffer
2895718399fSFrançois Tigeot  **/
ttm_page_pool_free(struct ttm_page_pool * pool,unsigned nr_free,bool use_static)2901cfef1a5SFrançois Tigeot static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
2917dcf36dcSFrançois Tigeot 			      bool use_static)
2925718399fSFrançois Tigeot {
2937dcf36dcSFrançois Tigeot 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
2946af927c2SFrançois Tigeot 	unsigned long irq_flags;
2956af927c2SFrançois Tigeot 	struct vm_page *p, *p1;
296f0bba3d1SFrançois Tigeot 	struct page **pages_to_free;
2975718399fSFrançois Tigeot 	unsigned freed_pages = 0,
2985718399fSFrançois Tigeot 		 npages_to_free = nr_free;
2996f486c69SFrançois Tigeot 	unsigned i;
3005718399fSFrançois Tigeot 
3015718399fSFrançois Tigeot 	if (NUM_PAGES_TO_ALLOC < nr_free)
3025718399fSFrançois Tigeot 		npages_to_free = NUM_PAGES_TO_ALLOC;
3035718399fSFrançois Tigeot 
3047dcf36dcSFrançois Tigeot 	if (use_static)
3057dcf36dcSFrançois Tigeot 		pages_to_free = static_buf;
3067dcf36dcSFrançois Tigeot 	else
3077dcf36dcSFrançois Tigeot 		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
3087dcf36dcSFrançois Tigeot 					M_DRM, GFP_KERNEL);
3096af927c2SFrançois Tigeot 	if (!pages_to_free) {
3106af927c2SFrançois Tigeot 		pr_err("Failed to allocate memory for pool free operation\n");
3116af927c2SFrançois Tigeot 		return 0;
3126af927c2SFrançois Tigeot 	}
3135718399fSFrançois Tigeot 
3145718399fSFrançois Tigeot restart:
3156af927c2SFrançois Tigeot 	spin_lock_irqsave(&pool->lock, irq_flags);
3165718399fSFrançois Tigeot 
3175718399fSFrançois Tigeot 	TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
3185718399fSFrançois Tigeot 		if (freed_pages >= npages_to_free)
3195718399fSFrançois Tigeot 			break;
3205718399fSFrançois Tigeot 
321f0bba3d1SFrançois Tigeot 		pages_to_free[freed_pages++] = (struct page *)p;
3225718399fSFrançois Tigeot 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
3235718399fSFrançois Tigeot 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
3245718399fSFrançois Tigeot 			/* remove range of pages from the pool */
3256f486c69SFrançois Tigeot 			for (i = 0; i < freed_pages; i++)
326f0bba3d1SFrançois Tigeot 				TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
3275718399fSFrançois Tigeot 
3285718399fSFrançois Tigeot 			ttm_pool_update_free_locked(pool, freed_pages);
3295718399fSFrançois Tigeot 			/**
3305718399fSFrançois Tigeot 			 * Because changing page caching is costly
3315718399fSFrançois Tigeot 			 * we unlock the pool to prevent stalling.
3325718399fSFrançois Tigeot 			 */
3336af927c2SFrançois Tigeot 			spin_unlock_irqrestore(&pool->lock, irq_flags);
3345718399fSFrançois Tigeot 
335*932d855eSSergey Zigachev 			ttm_pages_put(pages_to_free, freed_pages, pool->order);
3365718399fSFrançois Tigeot 			if (likely(nr_free != FREE_ALL_PAGES))
3375718399fSFrançois Tigeot 				nr_free -= freed_pages;
3385718399fSFrançois Tigeot 
3395718399fSFrançois Tigeot 			if (NUM_PAGES_TO_ALLOC >= nr_free)
3405718399fSFrançois Tigeot 				npages_to_free = nr_free;
3415718399fSFrançois Tigeot 			else
3425718399fSFrançois Tigeot 				npages_to_free = NUM_PAGES_TO_ALLOC;
3435718399fSFrançois Tigeot 
3445718399fSFrançois Tigeot 			freed_pages = 0;
3455718399fSFrançois Tigeot 
3465718399fSFrançois Tigeot 			/* free all so restart the processing */
3475718399fSFrançois Tigeot 			if (nr_free)
3485718399fSFrançois Tigeot 				goto restart;
3495718399fSFrançois Tigeot 
3505718399fSFrançois Tigeot 			/* Not allowed to fall through or break because
3515718399fSFrançois Tigeot 			 * following context is inside spinlock while we are
3525718399fSFrançois Tigeot 			 * outside here.
3535718399fSFrançois Tigeot 			 */
3545718399fSFrançois Tigeot 			goto out;
3555718399fSFrançois Tigeot 
3565718399fSFrançois Tigeot 		}
3575718399fSFrançois Tigeot 	}
3585718399fSFrançois Tigeot 
3595718399fSFrançois Tigeot 	/* remove range of pages from the pool */
3605718399fSFrançois Tigeot 	if (freed_pages) {
3616f486c69SFrançois Tigeot 		for (i = 0; i < freed_pages; i++)
362f0bba3d1SFrançois Tigeot 			TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
3635718399fSFrançois Tigeot 
3645718399fSFrançois Tigeot 		ttm_pool_update_free_locked(pool, freed_pages);
3655718399fSFrançois Tigeot 		nr_free -= freed_pages;
3665718399fSFrançois Tigeot 	}
3675718399fSFrançois Tigeot 
3686af927c2SFrançois Tigeot 	spin_unlock_irqrestore(&pool->lock, irq_flags);
3695718399fSFrançois Tigeot 
3705718399fSFrançois Tigeot 	if (freed_pages)
371*932d855eSSergey Zigachev 		ttm_pages_put(pages_to_free, freed_pages, pool->order);
3725718399fSFrançois Tigeot out:
3737dcf36dcSFrançois Tigeot 	if (pages_to_free != static_buf)
3746af927c2SFrançois Tigeot 		kfree(pages_to_free);
3755718399fSFrançois Tigeot 	return nr_free;
3765718399fSFrançois Tigeot }
3775718399fSFrançois Tigeot 
3785718399fSFrançois Tigeot /**
3795718399fSFrançois Tigeot  * Callback for mm to request pool to reduce number of page held.
38043e748b9SFrançois Tigeot  *
38143e748b9SFrançois Tigeot  * XXX: (dchinner) Deadlock warning!
38243e748b9SFrançois Tigeot  *
38343e748b9SFrançois Tigeot  * This code is crying out for a shrinker per pool....
3845718399fSFrançois Tigeot  */
38543e748b9SFrançois Tigeot static unsigned long
ttm_pool_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)386*932d855eSSergey Zigachev ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3875718399fSFrançois Tigeot {
3881cfef1a5SFrançois Tigeot 	static DEFINE_MUTEX(lock);
3891cfef1a5SFrançois Tigeot 	static unsigned start_pool;
3905718399fSFrançois Tigeot 	unsigned i;
3911cfef1a5SFrançois Tigeot 	unsigned pool_offset;
3925718399fSFrançois Tigeot 	struct ttm_page_pool *pool;
3935718399fSFrançois Tigeot 	int shrink_pages = 100; /* XXXKIB */
39443e748b9SFrançois Tigeot 	unsigned long freed = 0;
395*932d855eSSergey Zigachev 	unsigned int nr_free_pool;
3965718399fSFrançois Tigeot 
3971cfef1a5SFrançois Tigeot #ifdef __DragonFly__
3981cfef1a5SFrançois Tigeot 	sc->gfp_mask = M_WAITOK;
3991cfef1a5SFrançois Tigeot #endif
4001cfef1a5SFrançois Tigeot 
4011cfef1a5SFrançois Tigeot 	if (!mutex_trylock(&lock))
4021cfef1a5SFrançois Tigeot 		return SHRINK_STOP;
4031cfef1a5SFrançois Tigeot 	pool_offset = ++start_pool % NUM_POOLS;
4045718399fSFrançois Tigeot 	/* select start pool in round robin fashion */
4055718399fSFrançois Tigeot 	for (i = 0; i < NUM_POOLS; ++i) {
4065718399fSFrançois Tigeot 		unsigned nr_free = shrink_pages;
407*932d855eSSergey Zigachev 		unsigned page_nr;
408*932d855eSSergey Zigachev 
4095718399fSFrançois Tigeot 		if (shrink_pages == 0)
4105718399fSFrançois Tigeot 			break;
411*932d855eSSergey Zigachev 
4125718399fSFrançois Tigeot 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
413*932d855eSSergey Zigachev 		page_nr = (1 << pool->order);
4147dcf36dcSFrançois Tigeot 		/* OK to use static buffer since global mutex is held. */
415*932d855eSSergey Zigachev 		nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
416*932d855eSSergey Zigachev 		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
417*932d855eSSergey Zigachev 		freed += (nr_free_pool - shrink_pages) << pool->order;
418*932d855eSSergey Zigachev 		if (freed >= sc->nr_to_scan)
419*932d855eSSergey Zigachev 			break;
420*932d855eSSergey Zigachev 		shrink_pages <<= pool->order;
4215718399fSFrançois Tigeot 	}
4221cfef1a5SFrançois Tigeot 	mutex_unlock(&lock);
42343e748b9SFrançois Tigeot 	return freed;
42443e748b9SFrançois Tigeot }
42543e748b9SFrançois Tigeot 
42643e748b9SFrançois Tigeot 
42743e748b9SFrançois Tigeot static unsigned long
ttm_pool_shrink_count(struct shrinker * shrink,struct shrink_control * sc)42843e748b9SFrançois Tigeot ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
42943e748b9SFrançois Tigeot {
43043e748b9SFrançois Tigeot 	unsigned i;
43143e748b9SFrançois Tigeot 	unsigned long count = 0;
432*932d855eSSergey Zigachev 	struct ttm_page_pool *pool;
43343e748b9SFrançois Tigeot 
434*932d855eSSergey Zigachev 	for (i = 0; i < NUM_POOLS; ++i) {
435*932d855eSSergey Zigachev 		pool = &_manager->pools[i];
436*932d855eSSergey Zigachev 		count += (pool->npages << pool->order);
437*932d855eSSergey Zigachev 	}
43843e748b9SFrançois Tigeot 
43943e748b9SFrançois Tigeot 	return count;
4405718399fSFrançois Tigeot }
4415718399fSFrançois Tigeot 
ttm_pool_mm_shrink_init(struct ttm_pool_manager * manager)4425718399fSFrançois Tigeot static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
4435718399fSFrançois Tigeot {
44443e748b9SFrançois Tigeot 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
445*932d855eSSergey Zigachev 	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
446*932d855eSSergey Zigachev 	manager->mm_shrink.seeks = 1;
4475718399fSFrançois Tigeot 	manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
44843e748b9SFrançois Tigeot 	    ttm_pool_shrink_scan, manager, EVENTHANDLER_PRI_ANY);
4495718399fSFrançois Tigeot }
4505718399fSFrançois Tigeot 
ttm_pool_mm_shrink_fini(struct ttm_pool_manager * manager)4515718399fSFrançois Tigeot static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
4525718399fSFrançois Tigeot {
4535718399fSFrançois Tigeot 	EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
4545718399fSFrançois Tigeot }
4555718399fSFrançois Tigeot 
ttm_set_pages_caching(struct page ** pages,enum ttm_caching_state cstate,unsigned cpages)456f0bba3d1SFrançois Tigeot static int ttm_set_pages_caching(struct page **pages,
4575718399fSFrançois Tigeot 		enum ttm_caching_state cstate, unsigned cpages)
4585718399fSFrançois Tigeot {
4595718399fSFrançois Tigeot 	int r = 0;
4605718399fSFrançois Tigeot 	/* Set page caching */
4615718399fSFrançois Tigeot 	switch (cstate) {
4625718399fSFrançois Tigeot 	case tt_uncached:
463*932d855eSSergey Zigachev 		r = ttm_set_pages_array_uc(pages, cpages);
4645718399fSFrançois Tigeot 		if (r)
4650bece63dSImre Vadasz 			pr_err("Failed to set %d pages to uc!\n", cpages);
4665718399fSFrançois Tigeot 		break;
4675718399fSFrançois Tigeot 	case tt_wc:
468*932d855eSSergey Zigachev 		r = ttm_set_pages_array_wc(pages, cpages);
4695718399fSFrançois Tigeot 		if (r)
4700bece63dSImre Vadasz 			pr_err("Failed to set %d pages to wc!\n", cpages);
4715718399fSFrançois Tigeot 		break;
4725718399fSFrançois Tigeot 	default:
4735718399fSFrançois Tigeot 		break;
4745718399fSFrançois Tigeot 	}
4755718399fSFrançois Tigeot 	return r;
4765718399fSFrançois Tigeot }
4775718399fSFrançois Tigeot 
4785718399fSFrançois Tigeot /**
4795718399fSFrançois Tigeot  * Free pages the pages that failed to change the caching state. If there is
4805718399fSFrançois Tigeot  * any pages that have changed their caching state already put them to the
4815718399fSFrançois Tigeot  * pool.
4825718399fSFrançois Tigeot  */
ttm_handle_caching_state_failure(struct pglist * pages,int ttm_flags,enum ttm_caching_state cstate,struct page ** failed_pages,unsigned cpages)4835718399fSFrançois Tigeot static void ttm_handle_caching_state_failure(struct pglist *pages,
4845718399fSFrançois Tigeot 		int ttm_flags, enum ttm_caching_state cstate,
485f0bba3d1SFrançois Tigeot 		struct page **failed_pages, unsigned cpages)
4865718399fSFrançois Tigeot {
4875718399fSFrançois Tigeot 	unsigned i;
4885718399fSFrançois Tigeot 	/* Failed pages have to be freed */
4895718399fSFrançois Tigeot 	for (i = 0; i < cpages; ++i) {
490f0bba3d1SFrançois Tigeot 		TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq);
491e5c1d8f1SFrançois Tigeot 		__free_page(failed_pages[i]);
4925718399fSFrançois Tigeot 	}
4935718399fSFrançois Tigeot }
4945718399fSFrançois Tigeot 
4955718399fSFrançois Tigeot /**
4965718399fSFrançois Tigeot  * Allocate new pages with correct caching.
4975718399fSFrançois Tigeot  *
4985718399fSFrançois Tigeot  * This function is reentrant if caller updates count depending on number of
4995718399fSFrançois Tigeot  * pages returned in pages array.
5005718399fSFrançois Tigeot  */
ttm_alloc_new_pages(struct pglist * pages,gfp_t gfp_flags,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned order)5016af927c2SFrançois Tigeot static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
502*932d855eSSergey Zigachev 			       int ttm_flags, enum ttm_caching_state cstate,
503*932d855eSSergey Zigachev 			       unsigned count, unsigned order)
5045718399fSFrançois Tigeot {
505f0bba3d1SFrançois Tigeot 	struct page **caching_array;
5066af927c2SFrançois Tigeot 	struct page *p;
5075718399fSFrançois Tigeot 	int r = 0;
508*932d855eSSergey Zigachev 	unsigned i, j, cpages;
509*932d855eSSergey Zigachev 	unsigned npages = 1 << order;
510*932d855eSSergey Zigachev 	unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
5115718399fSFrançois Tigeot 
5125718399fSFrançois Tigeot 	/* allocate array for page caching change */
5136af927c2SFrançois Tigeot 	caching_array = kmalloc(max_cpages*sizeof(struct page *), M_DRM, M_WAITOK);
5146af927c2SFrançois Tigeot 
5156af927c2SFrançois Tigeot 	if (!caching_array) {
516*932d855eSSergey Zigachev 		pr_debug("Unable to allocate table for new pages\n");
5176af927c2SFrançois Tigeot 		return -ENOMEM;
5186af927c2SFrançois Tigeot 	}
5195718399fSFrançois Tigeot 
5205718399fSFrançois Tigeot 	for (i = 0, cpages = 0; i < count; ++i) {
521*932d855eSSergey Zigachev 		p = alloc_pages(gfp_flags, order);
5226af927c2SFrançois Tigeot 
5235718399fSFrançois Tigeot 		if (!p) {
524*932d855eSSergey Zigachev 			pr_debug("Unable to get page %u\n", i);
5255718399fSFrançois Tigeot 
5265718399fSFrançois Tigeot 			/* store already allocated pages in the pool after
5275718399fSFrançois Tigeot 			 * setting the caching state */
5285718399fSFrançois Tigeot 			if (cpages) {
5295718399fSFrançois Tigeot 				r = ttm_set_pages_caching(caching_array,
5305718399fSFrançois Tigeot 							  cstate, cpages);
5315718399fSFrançois Tigeot 				if (r)
5325718399fSFrançois Tigeot 					ttm_handle_caching_state_failure(pages,
5335718399fSFrançois Tigeot 						ttm_flags, cstate,
5345718399fSFrançois Tigeot 						caching_array, cpages);
5355718399fSFrançois Tigeot 			}
5365718399fSFrançois Tigeot 			r = -ENOMEM;
5375718399fSFrançois Tigeot 			goto out;
5385718399fSFrançois Tigeot 		}
5395718399fSFrançois Tigeot 
540*932d855eSSergey Zigachev 		TAILQ_INSERT_HEAD(pages, (struct vm_page *)p, pageq);
541*932d855eSSergey Zigachev 
5426af927c2SFrançois Tigeot #ifdef CONFIG_HIGHMEM
5435718399fSFrançois Tigeot 		/* gfp flags of highmem page should never be dma32 so we
5445718399fSFrançois Tigeot 		 * we should be fine in such case
5455718399fSFrançois Tigeot 		 */
546*932d855eSSergey Zigachev 		if (PageHighMem(p))
547*932d855eSSergey Zigachev 			continue;
548*932d855eSSergey Zigachev 
5495718399fSFrançois Tigeot #endif
550*932d855eSSergey Zigachev 		for (j = 0; j < npages; ++j) {
551*932d855eSSergey Zigachev 			caching_array[cpages++] = p++;
5525718399fSFrançois Tigeot 			if (cpages == max_cpages) {
5535718399fSFrançois Tigeot 
5545718399fSFrançois Tigeot 				r = ttm_set_pages_caching(caching_array,
5555718399fSFrançois Tigeot 						cstate, cpages);
5565718399fSFrançois Tigeot 				if (r) {
5575718399fSFrançois Tigeot 					ttm_handle_caching_state_failure(pages,
5585718399fSFrançois Tigeot 						ttm_flags, cstate,
5595718399fSFrançois Tigeot 						caching_array, cpages);
5605718399fSFrançois Tigeot 					goto out;
5615718399fSFrançois Tigeot 				}
5625718399fSFrançois Tigeot 				cpages = 0;
5635718399fSFrançois Tigeot 			}
5645718399fSFrançois Tigeot 		}
5655718399fSFrançois Tigeot 	}
5665718399fSFrançois Tigeot 
5675718399fSFrançois Tigeot 	if (cpages) {
5685718399fSFrançois Tigeot 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
5695718399fSFrançois Tigeot 		if (r)
5705718399fSFrançois Tigeot 			ttm_handle_caching_state_failure(pages,
5715718399fSFrançois Tigeot 					ttm_flags, cstate,
5725718399fSFrançois Tigeot 					caching_array, cpages);
5735718399fSFrançois Tigeot 	}
5745718399fSFrançois Tigeot out:
5756af927c2SFrançois Tigeot 	kfree(caching_array);
5765718399fSFrançois Tigeot 
5775718399fSFrançois Tigeot 	return r;
5785718399fSFrançois Tigeot }
5795718399fSFrançois Tigeot 
5805718399fSFrançois Tigeot /**
5815718399fSFrançois Tigeot  * Fill the given pool if there aren't enough pages and the requested number of
5825718399fSFrançois Tigeot  * pages is small.
5835718399fSFrançois Tigeot  */
ttm_page_pool_fill_locked(struct ttm_page_pool * pool,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned long * irq_flags)584*932d855eSSergey Zigachev static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
585*932d855eSSergey Zigachev 				      enum ttm_caching_state cstate,
586*932d855eSSergey Zigachev 				      unsigned count, unsigned long *irq_flags)
5875718399fSFrançois Tigeot {
5885718399fSFrançois Tigeot 	vm_page_t p;
5895718399fSFrançois Tigeot 	int r;
5905718399fSFrançois Tigeot 	unsigned cpages = 0;
5915718399fSFrançois Tigeot 	/**
5925718399fSFrançois Tigeot 	 * Only allow one pool fill operation at a time.
5935718399fSFrançois Tigeot 	 * If pool doesn't have enough pages for the allocation new pages are
5945718399fSFrançois Tigeot 	 * allocated from outside of pool.
5955718399fSFrançois Tigeot 	 */
5965718399fSFrançois Tigeot 	if (pool->fill_lock)
5975718399fSFrançois Tigeot 		return;
5985718399fSFrançois Tigeot 
5995718399fSFrançois Tigeot 	pool->fill_lock = true;
6005718399fSFrançois Tigeot 
6015718399fSFrançois Tigeot 	/* If allocation request is small and there are not enough
6025718399fSFrançois Tigeot 	 * pages in a pool we fill the pool up first. */
6035718399fSFrançois Tigeot 	if (count < _manager->options.small
6045718399fSFrançois Tigeot 		&& count > pool->npages) {
6055718399fSFrançois Tigeot 		struct pglist new_pages;
6065718399fSFrançois Tigeot 		unsigned alloc_size = _manager->options.alloc_size;
6075718399fSFrançois Tigeot 
6085718399fSFrançois Tigeot 		/**
6095718399fSFrançois Tigeot 		 * Can't change page caching if in irqsave context. We have to
6105718399fSFrançois Tigeot 		 * drop the pool->lock.
6115718399fSFrançois Tigeot 		 */
6126af927c2SFrançois Tigeot 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
6135718399fSFrançois Tigeot 
6145718399fSFrançois Tigeot 		TAILQ_INIT(&new_pages);
6156af927c2SFrançois Tigeot 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
616*932d855eSSergey Zigachev 					cstate, alloc_size, 0);
6176af927c2SFrançois Tigeot 		spin_lock_irqsave(&pool->lock, *irq_flags);
6185718399fSFrançois Tigeot 
6195718399fSFrançois Tigeot 		if (!r) {
6205718399fSFrançois Tigeot 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
6215718399fSFrançois Tigeot 			++pool->nrefills;
6225718399fSFrançois Tigeot 			pool->npages += alloc_size;
6235718399fSFrançois Tigeot 		} else {
624*932d855eSSergey Zigachev 			pr_debug("Failed to fill pool (%p)\n", pool);
6255718399fSFrançois Tigeot 			/* If we have any pages left put them to the pool. */
626a85cb24fSFrançois Tigeot 			TAILQ_FOREACH(p, &new_pages, pageq) {
6275718399fSFrançois Tigeot 				++cpages;
6285718399fSFrançois Tigeot 			}
6295718399fSFrançois Tigeot 			TAILQ_CONCAT(&pool->list, &new_pages, pageq);
6305718399fSFrançois Tigeot 			pool->npages += cpages;
6315718399fSFrançois Tigeot 		}
6325718399fSFrançois Tigeot 
6335718399fSFrançois Tigeot 	}
6345718399fSFrançois Tigeot 	pool->fill_lock = false;
6355718399fSFrançois Tigeot }
6365718399fSFrançois Tigeot 
6375718399fSFrançois Tigeot /**
6385718399fSFrançois Tigeot  * Cut 'count' number of pages from the pool and put them on the return list.
6395718399fSFrançois Tigeot  *
6405718399fSFrançois Tigeot  * @return count of pages still required to fulfill the request.
6415718399fSFrançois Tigeot  */
ttm_page_pool_get_pages(struct ttm_page_pool * pool,struct pglist * pages,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned order)6425718399fSFrançois Tigeot static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
6435718399fSFrançois Tigeot 					struct pglist *pages,
6445718399fSFrançois Tigeot 					int ttm_flags,
6455718399fSFrançois Tigeot 					enum ttm_caching_state cstate,
646*932d855eSSergey Zigachev 					unsigned count, unsigned order)
6475718399fSFrançois Tigeot {
6486af927c2SFrançois Tigeot 	unsigned long irq_flags;
6495718399fSFrançois Tigeot 	vm_page_t p;
6505718399fSFrançois Tigeot 	unsigned i;
6515718399fSFrançois Tigeot 
6526af927c2SFrançois Tigeot 	spin_lock_irqsave(&pool->lock, irq_flags);
653*932d855eSSergey Zigachev 	if (!order)
654*932d855eSSergey Zigachev 		ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
655*932d855eSSergey Zigachev 					  &irq_flags);
6565718399fSFrançois Tigeot 
6575718399fSFrançois Tigeot 	if (count >= pool->npages) {
6585718399fSFrançois Tigeot 		/* take all pages from the pool */
6595718399fSFrançois Tigeot 		TAILQ_CONCAT(pages, &pool->list, pageq);
6605718399fSFrançois Tigeot 		count -= pool->npages;
6615718399fSFrançois Tigeot 		pool->npages = 0;
6625718399fSFrançois Tigeot 		goto out;
6635718399fSFrançois Tigeot 	}
6645718399fSFrançois Tigeot 	for (i = 0; i < count; i++) {
6655718399fSFrançois Tigeot 		p = TAILQ_FIRST(&pool->list);
6665718399fSFrançois Tigeot 		TAILQ_REMOVE(&pool->list, p, pageq);
6675718399fSFrançois Tigeot 		TAILQ_INSERT_TAIL(pages, p, pageq);
6685718399fSFrançois Tigeot 	}
6695718399fSFrançois Tigeot 	pool->npages -= count;
6705718399fSFrançois Tigeot 	count = 0;
6715718399fSFrançois Tigeot out:
6726af927c2SFrançois Tigeot 	spin_unlock_irqrestore(&pool->lock, irq_flags);
6735718399fSFrançois Tigeot 	return count;
6745718399fSFrançois Tigeot }
6755718399fSFrançois Tigeot 
6765718399fSFrançois Tigeot /* Put all pages in pages list to correct pool to wait for reuse */
ttm_put_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)677f0bba3d1SFrançois Tigeot static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
6785718399fSFrançois Tigeot 			  enum ttm_caching_state cstate)
6795718399fSFrançois Tigeot {
680*932d855eSSergey Zigachev 	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
6816af927c2SFrançois Tigeot 	unsigned long irq_flags;
6825718399fSFrançois Tigeot 	unsigned i;
683f0bba3d1SFrançois Tigeot 	struct vm_page *page;
6845718399fSFrançois Tigeot 
6855718399fSFrançois Tigeot 	if (pool == NULL) {
6865718399fSFrançois Tigeot 		/* No pool for this memory type so free the pages */
6875718399fSFrançois Tigeot 		for (i = 0; i < npages; i++) {
6885718399fSFrançois Tigeot 			if (pages[i]) {
689e5c1d8f1SFrançois Tigeot #if 0
690e5c1d8f1SFrançois Tigeot 				if (page_count(pages[i]) != 1)
691e5c1d8f1SFrançois Tigeot 					pr_err("Erroneous page count. Leaking pages.\n");
692e5c1d8f1SFrançois Tigeot #endif
693e5c1d8f1SFrançois Tigeot 				__free_page(pages[i]);
6945718399fSFrançois Tigeot 				pages[i] = NULL;
6955718399fSFrançois Tigeot 			}
6965718399fSFrançois Tigeot 		}
6975718399fSFrançois Tigeot 		return;
6985718399fSFrançois Tigeot 	}
6995718399fSFrançois Tigeot 
7006af927c2SFrançois Tigeot 	spin_lock_irqsave(&pool->lock, irq_flags);
7015718399fSFrançois Tigeot 	for (i = 0; i < npages; i++) {
7025718399fSFrançois Tigeot 		if (pages[i]) {
703f0bba3d1SFrançois Tigeot 			page = (struct vm_page *)pages[i];
704f0bba3d1SFrançois Tigeot 			TAILQ_INSERT_TAIL(&pool->list, page, pageq);
7055718399fSFrançois Tigeot 			pages[i] = NULL;
7065718399fSFrançois Tigeot 			pool->npages++;
7075718399fSFrançois Tigeot 		}
7085718399fSFrançois Tigeot 	}
7095718399fSFrançois Tigeot 	/* Check that we don't go over the pool limit */
7105718399fSFrançois Tigeot 	npages = 0;
7115718399fSFrançois Tigeot 	if (pool->npages > _manager->options.max_size) {
7125718399fSFrançois Tigeot 		npages = pool->npages - _manager->options.max_size;
7135718399fSFrançois Tigeot 		/* free at least NUM_PAGES_TO_ALLOC number of pages
7145718399fSFrançois Tigeot 		 * to reduce calls to set_memory_wb */
7155718399fSFrançois Tigeot 		if (npages < NUM_PAGES_TO_ALLOC)
7165718399fSFrançois Tigeot 			npages = NUM_PAGES_TO_ALLOC;
7175718399fSFrançois Tigeot 	}
7186af927c2SFrançois Tigeot 	spin_unlock_irqrestore(&pool->lock, irq_flags);
7195718399fSFrançois Tigeot 	if (npages)
7207dcf36dcSFrançois Tigeot 		ttm_page_pool_free(pool, npages, false);
7215718399fSFrançois Tigeot }
7225718399fSFrançois Tigeot 
7235718399fSFrançois Tigeot /*
7245718399fSFrançois Tigeot  * On success pages list will hold count number of correctly
7255718399fSFrançois Tigeot  * cached pages.
7265718399fSFrançois Tigeot  */
ttm_get_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)727f0bba3d1SFrançois Tigeot static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
7285718399fSFrançois Tigeot 			 enum ttm_caching_state cstate)
7295718399fSFrançois Tigeot {
730*932d855eSSergey Zigachev 	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
7315718399fSFrançois Tigeot 	struct pglist plist;
732f0bba3d1SFrançois Tigeot 	struct vm_page *p = NULL;
7336af927c2SFrançois Tigeot 	gfp_t gfp_flags = GFP_USER;
7345718399fSFrançois Tigeot 	unsigned count;
7355718399fSFrançois Tigeot 	int r;
7365718399fSFrançois Tigeot 
7376af927c2SFrançois Tigeot 	/* set zero flag for page allocation if required */
7386af927c2SFrançois Tigeot 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
7396af927c2SFrançois Tigeot 		gfp_flags |= __GFP_ZERO;
7405718399fSFrançois Tigeot 
7415718399fSFrançois Tigeot 	/* No pool for cached pages */
7425718399fSFrançois Tigeot 	if (pool == NULL) {
7436af927c2SFrançois Tigeot 		if (flags & TTM_PAGE_FLAG_DMA32)
7446af927c2SFrançois Tigeot 			gfp_flags |= GFP_DMA32;
7456af927c2SFrançois Tigeot 		else
7466af927c2SFrançois Tigeot 			gfp_flags |= GFP_HIGHUSER;
7476af927c2SFrançois Tigeot 
7485718399fSFrançois Tigeot 		for (r = 0; r < npages; ++r) {
7496af927c2SFrançois Tigeot 			p = (struct vm_page *)alloc_page(gfp_flags);
7505718399fSFrançois Tigeot 			if (!p) {
7516af927c2SFrançois Tigeot 
7520bece63dSImre Vadasz 				pr_err("Unable to allocate page\n");
7535718399fSFrançois Tigeot 				return -ENOMEM;
7545718399fSFrançois Tigeot 			}
755f0bba3d1SFrançois Tigeot 			pages[r] = (struct page *)p;
7565718399fSFrançois Tigeot 		}
7575718399fSFrançois Tigeot 		return 0;
7585718399fSFrançois Tigeot 	}
7595718399fSFrançois Tigeot 
7605718399fSFrançois Tigeot 	/* combine zero flag to pool flags */
7616af927c2SFrançois Tigeot 	gfp_flags |= pool->gfp_flags;
7625718399fSFrançois Tigeot 
7635718399fSFrançois Tigeot 	/* First we take pages from the pool */
7645718399fSFrançois Tigeot 	TAILQ_INIT(&plist);
765*932d855eSSergey Zigachev 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages, 0);
7665718399fSFrançois Tigeot 	count = 0;
7675718399fSFrançois Tigeot 	TAILQ_FOREACH(p, &plist, pageq) {
768f0bba3d1SFrançois Tigeot 		pages[count++] = (struct page *)p;
7695718399fSFrançois Tigeot 	}
7705718399fSFrançois Tigeot 
7715718399fSFrançois Tigeot 	/* clear the pages coming from the pool if requested */
7725718399fSFrançois Tigeot 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
7735718399fSFrançois Tigeot 		TAILQ_FOREACH(p, &plist, pageq) {
7745718399fSFrançois Tigeot 			pmap_zero_page(VM_PAGE_TO_PHYS(p));
7755718399fSFrançois Tigeot 		}
7765718399fSFrançois Tigeot 	}
7775718399fSFrançois Tigeot 
7785718399fSFrançois Tigeot 	/* If pool didn't have enough pages allocate new one. */
7795718399fSFrançois Tigeot 	if (npages > 0) {
7805718399fSFrançois Tigeot 		/* ttm_alloc_new_pages doesn't reference pool so we can run
7815718399fSFrançois Tigeot 		 * multiple requests in parallel.
7825718399fSFrançois Tigeot 		 **/
7835718399fSFrançois Tigeot 		TAILQ_INIT(&plist);
784*932d855eSSergey Zigachev 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages, 0);
7855718399fSFrançois Tigeot 		TAILQ_FOREACH(p, &plist, pageq) {
786f0bba3d1SFrançois Tigeot 			pages[count++] = (struct page *)p;
7875718399fSFrançois Tigeot 		}
7885718399fSFrançois Tigeot 		if (r) {
7895718399fSFrançois Tigeot 			/* If there is any pages in the list put them back to
7905718399fSFrançois Tigeot 			 * the pool. */
7910bece63dSImre Vadasz 			pr_err("Failed to allocate extra pages for large request\n");
7925718399fSFrançois Tigeot 			ttm_put_pages(pages, count, flags, cstate);
7935718399fSFrançois Tigeot 			return r;
7945718399fSFrançois Tigeot 		}
7955718399fSFrançois Tigeot 	}
7965718399fSFrançois Tigeot 
7975718399fSFrançois Tigeot 	return 0;
7985718399fSFrançois Tigeot }
7995718399fSFrançois Tigeot 
ttm_page_pool_init_locked(struct ttm_page_pool * pool,gfp_t flags,char * name,unsigned int order)8001cfef1a5SFrançois Tigeot static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
801*932d855eSSergey Zigachev 		char *name, unsigned int order)
8025718399fSFrançois Tigeot {
8035718399fSFrançois Tigeot 	lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
8045718399fSFrançois Tigeot 	pool->fill_lock = false;
8055718399fSFrançois Tigeot 	TAILQ_INIT(&pool->list);
8065718399fSFrançois Tigeot 	pool->npages = pool->nfrees = 0;
8076af927c2SFrançois Tigeot 	pool->gfp_flags = flags;
8085718399fSFrançois Tigeot 	pool->name = name;
809*932d855eSSergey Zigachev 	pool->order = order;
8105718399fSFrançois Tigeot }
8115718399fSFrançois Tigeot 
ttm_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)8125718399fSFrançois Tigeot int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
8135718399fSFrançois Tigeot {
8143a2096e8SFrançois Tigeot 	int ret;
8153a2096e8SFrançois Tigeot 
8160bece63dSImre Vadasz 	WARN_ON(_manager);
8175718399fSFrançois Tigeot 
8180bece63dSImre Vadasz 	pr_info("Initializing pool allocator\n");
8195718399fSFrançois Tigeot 
820175896dfSzrj 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
8213f2dd94aSFrançois Tigeot 	if (!_manager)
8223f2dd94aSFrançois Tigeot 		return -ENOMEM;
8235718399fSFrançois Tigeot 
824*932d855eSSergey Zigachev 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
8256af927c2SFrançois Tigeot 
826*932d855eSSergey Zigachev 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
8276af927c2SFrançois Tigeot 
8285718399fSFrançois Tigeot 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
829*932d855eSSergey Zigachev 				  GFP_USER | GFP_DMA32, "wc dma", 0);
8306af927c2SFrançois Tigeot 
8315718399fSFrançois Tigeot 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
832*932d855eSSergey Zigachev 				  GFP_USER | GFP_DMA32, "uc dma", 0);
8335718399fSFrançois Tigeot 
8345718399fSFrançois Tigeot 	_manager->options.max_size = max_pages;
8355718399fSFrançois Tigeot 	_manager->options.small = SMALL_ALLOCATION;
8365718399fSFrançois Tigeot 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
8375718399fSFrançois Tigeot 
8383a2096e8SFrançois Tigeot 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
8393a2096e8SFrançois Tigeot 				   &glob->kobj, "pool");
840*932d855eSSergey Zigachev 	if (unlikely(ret != 0))
841*932d855eSSergey Zigachev 		goto error;
8426af927c2SFrançois Tigeot 
8435718399fSFrançois Tigeot 	ttm_pool_mm_shrink_init(_manager);
8445718399fSFrançois Tigeot 
8455718399fSFrançois Tigeot 	return 0;
846*932d855eSSergey Zigachev 
847*932d855eSSergey Zigachev error:
848*932d855eSSergey Zigachev 	kobject_put(&_manager->kobj);
849*932d855eSSergey Zigachev 	_manager = NULL;
850*932d855eSSergey Zigachev 	return ret;
8515718399fSFrançois Tigeot }
8525718399fSFrançois Tigeot 
ttm_page_alloc_fini(void)8535718399fSFrançois Tigeot void ttm_page_alloc_fini(void)
8545718399fSFrançois Tigeot {
8555718399fSFrançois Tigeot 	int i;
8565718399fSFrançois Tigeot 
8570bece63dSImre Vadasz 	pr_info("Finalizing pool allocator\n");
8585718399fSFrançois Tigeot 	ttm_pool_mm_shrink_fini(_manager);
8595718399fSFrançois Tigeot 
8607dcf36dcSFrançois Tigeot 	/* OK to use static buffer since global mutex is no longer used. */
8615718399fSFrançois Tigeot 	for (i = 0; i < NUM_POOLS; ++i)
8627dcf36dcSFrançois Tigeot 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
8635718399fSFrançois Tigeot 
8643a2096e8SFrançois Tigeot 	kobject_put(&_manager->kobj);
8655718399fSFrançois Tigeot 	_manager = NULL;
8665718399fSFrançois Tigeot }
8675718399fSFrançois Tigeot 
868*932d855eSSergey Zigachev static void
ttm_pool_unpopulate_helper(struct ttm_tt * ttm,unsigned mem_count_update)869*932d855eSSergey Zigachev ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
8705718399fSFrançois Tigeot {
871*932d855eSSergey Zigachev 	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
872*932d855eSSergey Zigachev 	unsigned i;
873*932d855eSSergey Zigachev 
874*932d855eSSergey Zigachev 	if (mem_count_update == 0)
875*932d855eSSergey Zigachev 		goto put_pages;
876*932d855eSSergey Zigachev 
877*932d855eSSergey Zigachev 	for (i = 0; i < mem_count_update; ++i) {
878*932d855eSSergey Zigachev 		if (!ttm->pages[i])
879*932d855eSSergey Zigachev 			continue;
880*932d855eSSergey Zigachev 
881*932d855eSSergey Zigachev 		ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
882*932d855eSSergey Zigachev 	}
883*932d855eSSergey Zigachev 
884*932d855eSSergey Zigachev put_pages:
885*932d855eSSergey Zigachev 	ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
886*932d855eSSergey Zigachev 		      ttm->caching_state);
887*932d855eSSergey Zigachev 	ttm->state = tt_unpopulated;
888*932d855eSSergey Zigachev }
889*932d855eSSergey Zigachev 
ttm_pool_populate(struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)890*932d855eSSergey Zigachev int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
891*932d855eSSergey Zigachev {
892*932d855eSSergey Zigachev 	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
8935718399fSFrançois Tigeot 	unsigned i;
8945718399fSFrançois Tigeot 	int ret;
8955718399fSFrançois Tigeot 
8965718399fSFrançois Tigeot 	if (ttm->state != tt_unpopulated)
8975718399fSFrançois Tigeot 		return 0;
8985718399fSFrançois Tigeot 
899*932d855eSSergey Zigachev 	if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
900*932d855eSSergey Zigachev 		return -ENOMEM;
901*932d855eSSergey Zigachev 
9023f2dd94aSFrançois Tigeot 	ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
9035718399fSFrançois Tigeot 			    ttm->caching_state);
9043f2dd94aSFrançois Tigeot 	if (unlikely(ret != 0)) {
905*932d855eSSergey Zigachev 		ttm_pool_unpopulate_helper(ttm, 0);
9063f2dd94aSFrançois Tigeot 		return ret;
9075718399fSFrançois Tigeot 	}
9085718399fSFrançois Tigeot 
9093f2dd94aSFrançois Tigeot 	for (i = 0; i < ttm->num_pages; ++i) {
9105718399fSFrançois Tigeot 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
911*932d855eSSergey Zigachev 						PAGE_SIZE, ctx);
9125718399fSFrançois Tigeot 		if (unlikely(ret != 0)) {
913*932d855eSSergey Zigachev 			ttm_pool_unpopulate_helper(ttm, i);
9145718399fSFrançois Tigeot 			return -ENOMEM;
9155718399fSFrançois Tigeot 		}
9165718399fSFrançois Tigeot 	}
9175718399fSFrançois Tigeot 
9185718399fSFrançois Tigeot 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
9195718399fSFrançois Tigeot 		ret = ttm_tt_swapin(ttm);
9205718399fSFrançois Tigeot 		if (unlikely(ret != 0)) {
9215718399fSFrançois Tigeot 			ttm_pool_unpopulate(ttm);
9225718399fSFrançois Tigeot 			return ret;
9235718399fSFrançois Tigeot 		}
9245718399fSFrançois Tigeot 	}
9255718399fSFrançois Tigeot 
9265718399fSFrançois Tigeot 	ttm->state = tt_unbound;
9275718399fSFrançois Tigeot 	return 0;
9285718399fSFrançois Tigeot }
9293a2096e8SFrançois Tigeot EXPORT_SYMBOL(ttm_pool_populate);
9305718399fSFrançois Tigeot 
ttm_pool_unpopulate(struct ttm_tt * ttm)9315718399fSFrançois Tigeot void ttm_pool_unpopulate(struct ttm_tt *ttm)
9325718399fSFrançois Tigeot {
933*932d855eSSergey Zigachev 	ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
9345718399fSFrançois Tigeot }
9356af927c2SFrançois Tigeot EXPORT_SYMBOL(ttm_pool_unpopulate);
9365718399fSFrançois Tigeot 
ttm_populate_and_map_pages(struct device * dev,struct ttm_dma_tt * tt,struct ttm_operation_ctx * ctx)937*932d855eSSergey Zigachev int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
938*932d855eSSergey Zigachev 					struct ttm_operation_ctx *ctx)
9393f2dd94aSFrançois Tigeot {
9403f2dd94aSFrançois Tigeot 	unsigned i, j;
9413f2dd94aSFrançois Tigeot 	int r;
9423f2dd94aSFrançois Tigeot 
943*932d855eSSergey Zigachev 	r = ttm_pool_populate(&tt->ttm, ctx);
9443f2dd94aSFrançois Tigeot 	if (r)
9453f2dd94aSFrançois Tigeot 		return r;
9463f2dd94aSFrançois Tigeot 
9473f2dd94aSFrançois Tigeot 	for (i = 0; i < tt->ttm.num_pages; ++i) {
9483f2dd94aSFrançois Tigeot 		struct page *p = tt->ttm.pages[i];
9493f2dd94aSFrançois Tigeot 		size_t num_pages = 1;
9503f2dd94aSFrançois Tigeot 
9513f2dd94aSFrançois Tigeot 		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
9523f2dd94aSFrançois Tigeot 			if (++p != tt->ttm.pages[j])
9533f2dd94aSFrançois Tigeot 				break;
9543f2dd94aSFrançois Tigeot 
9553f2dd94aSFrançois Tigeot 			++num_pages;
9563f2dd94aSFrançois Tigeot 		}
9573f2dd94aSFrançois Tigeot 
9583f2dd94aSFrançois Tigeot 		tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
9593f2dd94aSFrançois Tigeot 						  0, num_pages * PAGE_SIZE,
9603f2dd94aSFrançois Tigeot 						  DMA_BIDIRECTIONAL);
9613f2dd94aSFrançois Tigeot 		if (dma_mapping_error(dev, tt->dma_address[i])) {
9623f2dd94aSFrançois Tigeot 			while (i--) {
9633f2dd94aSFrançois Tigeot 				dma_unmap_page(dev, tt->dma_address[i],
9643f2dd94aSFrançois Tigeot 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
9653f2dd94aSFrançois Tigeot 				tt->dma_address[i] = 0;
9663f2dd94aSFrançois Tigeot 			}
9673f2dd94aSFrançois Tigeot 			ttm_pool_unpopulate(&tt->ttm);
9683f2dd94aSFrançois Tigeot 			return -EFAULT;
9693f2dd94aSFrançois Tigeot 		}
9703f2dd94aSFrançois Tigeot 
9713f2dd94aSFrançois Tigeot 		for (j = 1; j < num_pages; ++j) {
9723f2dd94aSFrançois Tigeot 			tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
9733f2dd94aSFrançois Tigeot 			++i;
9743f2dd94aSFrançois Tigeot 		}
9753f2dd94aSFrançois Tigeot 	}
9763f2dd94aSFrançois Tigeot 	return 0;
9773f2dd94aSFrançois Tigeot }
9783f2dd94aSFrançois Tigeot EXPORT_SYMBOL(ttm_populate_and_map_pages);
9793f2dd94aSFrançois Tigeot 
ttm_unmap_and_unpopulate_pages(struct device * dev,struct ttm_dma_tt * tt)9803f2dd94aSFrançois Tigeot void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
9813f2dd94aSFrançois Tigeot {
9823f2dd94aSFrançois Tigeot 	unsigned i, j;
9833f2dd94aSFrançois Tigeot 
9843f2dd94aSFrançois Tigeot 	for (i = 0; i < tt->ttm.num_pages;) {
9853f2dd94aSFrançois Tigeot 		struct page *p = tt->ttm.pages[i];
9863f2dd94aSFrançois Tigeot 		size_t num_pages = 1;
9873f2dd94aSFrançois Tigeot 
9883f2dd94aSFrançois Tigeot 		if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
9893f2dd94aSFrançois Tigeot 			++i;
9903f2dd94aSFrançois Tigeot 			continue;
9913f2dd94aSFrançois Tigeot 		}
9923f2dd94aSFrançois Tigeot 
9933f2dd94aSFrançois Tigeot 		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
9943f2dd94aSFrançois Tigeot 			if (++p != tt->ttm.pages[j])
9953f2dd94aSFrançois Tigeot 				break;
9963f2dd94aSFrançois Tigeot 
9973f2dd94aSFrançois Tigeot 			++num_pages;
9983f2dd94aSFrançois Tigeot 		}
9993f2dd94aSFrançois Tigeot 
10003f2dd94aSFrançois Tigeot 		dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
10013f2dd94aSFrançois Tigeot 			       DMA_BIDIRECTIONAL);
10023f2dd94aSFrançois Tigeot 
10033f2dd94aSFrançois Tigeot 		i += num_pages;
10043f2dd94aSFrançois Tigeot 	}
10053f2dd94aSFrançois Tigeot 	ttm_pool_unpopulate(&tt->ttm);
10063f2dd94aSFrançois Tigeot }
10073f2dd94aSFrançois Tigeot EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
10083f2dd94aSFrançois Tigeot 
10095718399fSFrançois Tigeot #if 0
10105718399fSFrançois Tigeot int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
10115718399fSFrançois Tigeot {
10125718399fSFrançois Tigeot 	struct ttm_page_pool *p;
10135718399fSFrançois Tigeot 	unsigned i;
10145718399fSFrançois Tigeot 	char *h[] = {"pool", "refills", "pages freed", "size"};
10155718399fSFrançois Tigeot 	if (!_manager) {
10165718399fSFrançois Tigeot 		seq_printf(m, "No pool allocator running.\n");
10175718399fSFrançois Tigeot 		return 0;
10185718399fSFrançois Tigeot 	}
1019*932d855eSSergey Zigachev 	seq_printf(m, "%7s %12s %13s %8s\n",
10205718399fSFrançois Tigeot 			h[0], h[1], h[2], h[3]);
10215718399fSFrançois Tigeot 	for (i = 0; i < NUM_POOLS; ++i) {
10225718399fSFrançois Tigeot 		p = &_manager->pools[i];
10235718399fSFrançois Tigeot 
1024*932d855eSSergey Zigachev 		seq_printf(m, "%7s %12ld %13ld %8d\n",
10255718399fSFrançois Tigeot 				p->name, p->nrefills,
10265718399fSFrançois Tigeot 				p->nfrees, p->npages);
10275718399fSFrançois Tigeot 	}
10285718399fSFrançois Tigeot 	return 0;
10295718399fSFrançois Tigeot }
10305718399fSFrançois Tigeot #endif
10313a2096e8SFrançois Tigeot EXPORT_SYMBOL(ttm_page_alloc_debugfs);
1032