xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_pool.c (revision bb29f9c04060c9e0d043635a7b8a6aa95ad0da72)
15ca02815Sjsg // SPDX-License-Identifier: GPL-2.0 OR MIT
25ca02815Sjsg /*
35ca02815Sjsg  * Copyright 2020 Advanced Micro Devices, Inc.
45ca02815Sjsg  *
55ca02815Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
65ca02815Sjsg  * copy of this software and associated documentation files (the "Software"),
75ca02815Sjsg  * to deal in the Software without restriction, including without limitation
85ca02815Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
95ca02815Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
105ca02815Sjsg  * Software is furnished to do so, subject to the following conditions:
115ca02815Sjsg  *
125ca02815Sjsg  * The above copyright notice and this permission notice shall be included in
135ca02815Sjsg  * all copies or substantial portions of the Software.
145ca02815Sjsg  *
155ca02815Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
165ca02815Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
175ca02815Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
185ca02815Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
195ca02815Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
205ca02815Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
215ca02815Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
225ca02815Sjsg  *
235ca02815Sjsg  * Authors: Christian König
245ca02815Sjsg  */
255ca02815Sjsg 
265ca02815Sjsg /* Pooling of allocated pages is necessary because changing the caching
275ca02815Sjsg  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
285ca02815Sjsg  * invalidate for those addresses.
295ca02815Sjsg  *
305ca02815Sjsg  * Additional to that allocations from the DMA coherent API are pooled as well
315ca02815Sjsg  * cause they are rather slow compared to alloc_pages+map.
325ca02815Sjsg  */
335ca02815Sjsg 
345ca02815Sjsg #include <linux/module.h>
355ca02815Sjsg #include <linux/dma-mapping.h>
36f005ef32Sjsg #include <linux/debugfs.h>
375ca02815Sjsg #include <linux/highmem.h>
385ca02815Sjsg #include <linux/sched/mm.h>
395ca02815Sjsg #include <linux/seq_file.h>
405ca02815Sjsg 
415ca02815Sjsg #ifdef CONFIG_X86
425ca02815Sjsg #include <asm/set_memory.h>
435ca02815Sjsg #endif
445ca02815Sjsg 
455ca02815Sjsg #include <drm/ttm/ttm_pool.h>
465ca02815Sjsg #include <drm/ttm/ttm_tt.h>
47f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
485ca02815Sjsg #include <drm/drm_legacy.h>
495ca02815Sjsg 
505ca02815Sjsg #include "ttm_module.h"
515ca02815Sjsg 
525ca02815Sjsg /**
535ca02815Sjsg  * struct ttm_pool_dma - Helper object for coherent DMA mappings
545ca02815Sjsg  *
555ca02815Sjsg  * @addr: original DMA address returned for the mapping
565ca02815Sjsg  * @vaddr: original vaddr return for the mapping and order in the lower bits
575ca02815Sjsg  */
585ca02815Sjsg struct ttm_pool_dma {
595ca02815Sjsg 	dma_addr_t addr;
605ca02815Sjsg 	unsigned long vaddr;
61018354a8Skettenis 	bus_dma_tag_t dmat;
62018354a8Skettenis 	bus_dmamap_t map;
63018354a8Skettenis 	bus_dma_segment_t seg;
645ca02815Sjsg };
655ca02815Sjsg 
665ca02815Sjsg static unsigned long page_pool_size;
675ca02815Sjsg 
685ca02815Sjsg MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
695ca02815Sjsg module_param(page_pool_size, ulong, 0644);
705ca02815Sjsg 
715ca02815Sjsg static atomic_long_t allocated_pages;
725ca02815Sjsg 
73192fc1c0Sjsg static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
74192fc1c0Sjsg static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
755ca02815Sjsg 
76192fc1c0Sjsg static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
77192fc1c0Sjsg static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
785ca02815Sjsg 
791bb76ff1Sjsg static spinlock_t shrinker_lock;
805ca02815Sjsg static struct list_head shrinker_list;
815ca02815Sjsg static struct shrinker mm_shrinker;
825ca02815Sjsg 
835ca02815Sjsg #ifdef __linux__
84018354a8Skettenis 
85018354a8Skettenis /* Allocate pages of size 1 << order with the given gfp_flags */
ttm_pool_alloc_page(struct ttm_pool * pool,gfp_t gfp_flags,unsigned int order)86018354a8Skettenis static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
87018354a8Skettenis 					unsigned int order)
88018354a8Skettenis {
895ca02815Sjsg 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
905ca02815Sjsg 	struct ttm_pool_dma *dma;
91018354a8Skettenis 	struct page *p;
925ca02815Sjsg 	void *vaddr;
935ca02815Sjsg 
945ca02815Sjsg 	/* Don't set the __GFP_COMP flag for higher order allocations.
955ca02815Sjsg 	 * Mapping pages directly into an userspace process and calling
965ca02815Sjsg 	 * put_page() on a TTM allocated page is illegal.
975ca02815Sjsg 	 */
985ca02815Sjsg 	if (order)
995ca02815Sjsg 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
1005ca02815Sjsg 			__GFP_KSWAPD_RECLAIM;
1015ca02815Sjsg 
1025ca02815Sjsg 	if (!pool->use_dma_alloc) {
103f005ef32Sjsg 		p = alloc_pages_node(pool->nid, gfp_flags, order);
104018354a8Skettenis 		if (p)
105018354a8Skettenis 			p->private = order;
1065ca02815Sjsg 
1075ca02815Sjsg 		return p;
1085ca02815Sjsg 	}
1095ca02815Sjsg 
1105ca02815Sjsg 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
1115ca02815Sjsg 	if (!dma)
1125ca02815Sjsg 		return NULL;
1135ca02815Sjsg 
1145ca02815Sjsg 	if (order)
1155ca02815Sjsg 		attr |= DMA_ATTR_NO_WARN;
1165ca02815Sjsg 
1175ca02815Sjsg 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
1185ca02815Sjsg 				&dma->addr, gfp_flags, attr);
1195ca02815Sjsg 	if (!vaddr)
1205ca02815Sjsg 		goto error_free;
1215ca02815Sjsg 
1225ca02815Sjsg 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
1235ca02815Sjsg 	 * TTM page fault handling and extend the DMA API to clean this up.
1245ca02815Sjsg 	 */
1255ca02815Sjsg 	if (is_vmalloc_addr(vaddr))
1265ca02815Sjsg 		p = vmalloc_to_page(vaddr);
1275ca02815Sjsg 	else
1285ca02815Sjsg 		p = virt_to_page(vaddr);
1295ca02815Sjsg 
1305ca02815Sjsg 	dma->vaddr = (unsigned long)vaddr | order;
1315ca02815Sjsg 	p->private = (unsigned long)dma;
1325ca02815Sjsg 	return p;
1335ca02815Sjsg 
1345ca02815Sjsg error_free:
1355ca02815Sjsg 	kfree(dma);
1365ca02815Sjsg 	return NULL;
1375ca02815Sjsg }
1385ca02815Sjsg 
1395ca02815Sjsg /* Reset the caching and pages of size 1 << order */
ttm_pool_free_page(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order,struct page * p)1405ca02815Sjsg static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
141018354a8Skettenis 			       unsigned int order, struct page *p)
1425ca02815Sjsg {
1435ca02815Sjsg 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
1445ca02815Sjsg 	struct ttm_pool_dma *dma;
1455ca02815Sjsg 	void *vaddr;
1465ca02815Sjsg 
1475ca02815Sjsg #ifdef CONFIG_X86
1485ca02815Sjsg 	/* We don't care that set_pages_wb is inefficient here. This is only
1495ca02815Sjsg 	 * used when we have to shrink and CPU overhead is irrelevant then.
1505ca02815Sjsg 	 */
1515ca02815Sjsg 	if (caching != ttm_cached && !PageHighMem(p))
1525ca02815Sjsg 		set_pages_wb(p, 1 << order);
1535ca02815Sjsg #endif
1545ca02815Sjsg 
1555ca02815Sjsg 	if (!pool || !pool->use_dma_alloc) {
1565ca02815Sjsg 		__free_pages(p, order);
1575ca02815Sjsg 		return;
1585ca02815Sjsg 	}
1595ca02815Sjsg 
1605ca02815Sjsg 	if (order)
1615ca02815Sjsg 		attr |= DMA_ATTR_NO_WARN;
1625ca02815Sjsg 
1635ca02815Sjsg 	dma = (void *)p->private;
164018354a8Skettenis 	vaddr = (void *)(dma->vaddr & PAGE_MASK);
1655ca02815Sjsg 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
1665ca02815Sjsg 		       attr);
1675ca02815Sjsg 	kfree(dma);
1685ca02815Sjsg }
1695ca02815Sjsg 
170018354a8Skettenis #else
171018354a8Skettenis 
ttm_pool_alloc_page(struct ttm_pool * pool,gfp_t gfp_flags,unsigned int order,bus_dma_tag_t dmat)172018354a8Skettenis static struct vm_page *ttm_pool_alloc_page(struct ttm_pool *pool,
173018354a8Skettenis 					   gfp_t gfp_flags, unsigned int order,
174018354a8Skettenis 					   bus_dma_tag_t dmat)
175018354a8Skettenis {
176018354a8Skettenis 	struct ttm_pool_dma *dma;
177018354a8Skettenis 	struct vm_page *p;
178018354a8Skettenis 	struct uvm_constraint_range *constraint = &no_constraint;
179018354a8Skettenis 	int flags = (gfp_flags & M_NOWAIT) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK;
180018354a8Skettenis 	int dmaflags = BUS_DMA_64BIT;
181018354a8Skettenis 	int nsegs;
182018354a8Skettenis 
183018354a8Skettenis 	if (pool->use_dma32) {
184018354a8Skettenis 		constraint = &dma_constraint;
185018354a8Skettenis 		dmaflags &= ~BUS_DMA_64BIT;
186018354a8Skettenis 	}
187018354a8Skettenis 
188018354a8Skettenis 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
189018354a8Skettenis 	if (!dma)
190018354a8Skettenis 		return NULL;
191018354a8Skettenis 
192018354a8Skettenis 	if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1,
193018354a8Skettenis 	    (1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map))
194018354a8Skettenis 		goto error_free;
195018354a8Skettenis #ifdef bus_dmamem_alloc_range
196018354a8Skettenis 	if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE,
197018354a8Skettenis 	    PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO,
198018354a8Skettenis 	    constraint->ucr_low, constraint->ucr_high)) {
199018354a8Skettenis 		bus_dmamap_destroy(dmat, dma->map);
200018354a8Skettenis 		goto error_free;
201018354a8Skettenis 	}
202018354a8Skettenis #else
203018354a8Skettenis 	if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE,
204018354a8Skettenis 	    PAGE_SIZE, 0, &dma->seg, 1, &nsegs, flags | BUS_DMA_ZERO)) {
205018354a8Skettenis 		bus_dmamap_destroy(dmat, dma->map);
206018354a8Skettenis 		goto error_free;
207018354a8Skettenis 	}
208018354a8Skettenis #endif
209018354a8Skettenis 	if (bus_dmamap_load_raw(dmat, dma->map, &dma->seg, 1,
210018354a8Skettenis 	    (1ULL << order) * PAGE_SIZE, flags)) {
211018354a8Skettenis 		bus_dmamem_free(dmat, &dma->seg, 1);
212018354a8Skettenis 		bus_dmamap_destroy(dmat, dma->map);
213018354a8Skettenis 		goto error_free;
214018354a8Skettenis 	}
215018354a8Skettenis 	dma->dmat = dmat;
216018354a8Skettenis 	dma->addr = dma->map->dm_segs[0].ds_addr;
217018354a8Skettenis 
218018354a8Skettenis #ifndef __sparc64__
219018354a8Skettenis 	p = PHYS_TO_VM_PAGE(dma->seg.ds_addr);
220018354a8Skettenis #else
221018354a8Skettenis 	p = TAILQ_FIRST((struct pglist *)dma->seg._ds_mlist);
222018354a8Skettenis #endif
223018354a8Skettenis 
224018354a8Skettenis 	p->objt.rbt_parent = (struct rb_entry *)dma;
225018354a8Skettenis 	return p;
226018354a8Skettenis 
227018354a8Skettenis error_free:
228018354a8Skettenis 	kfree(dma);
229018354a8Skettenis 	return NULL;
230018354a8Skettenis }
231018354a8Skettenis 
ttm_pool_free_page(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order,struct vm_page * p)232018354a8Skettenis static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
233018354a8Skettenis 			       unsigned int order, struct vm_page *p)
234018354a8Skettenis {
235018354a8Skettenis 	struct ttm_pool_dma *dma;
236018354a8Skettenis 
237018354a8Skettenis #ifdef CONFIG_X86
238018354a8Skettenis 	/* We don't care that set_pages_wb is inefficient here. This is only
239018354a8Skettenis 	 * used when we have to shrink and CPU overhead is irrelevant then.
240018354a8Skettenis 	 */
241018354a8Skettenis 	if (caching != ttm_cached && !PageHighMem(p))
242018354a8Skettenis 		set_pages_wb(p, 1 << order);
243018354a8Skettenis #endif
244018354a8Skettenis 
245018354a8Skettenis 	dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
246018354a8Skettenis 	bus_dmamap_unload(dma->dmat, dma->map);
247018354a8Skettenis 	bus_dmamem_free(dma->dmat, &dma->seg, 1);
248018354a8Skettenis 	bus_dmamap_destroy(dma->dmat, dma->map);
249018354a8Skettenis 	kfree(dma);
250018354a8Skettenis }
251018354a8Skettenis 
252018354a8Skettenis #endif
253018354a8Skettenis 
2545ca02815Sjsg /* Apply a new caching to an array of pages */
ttm_pool_apply_caching(struct vm_page ** first,struct vm_page ** last,enum ttm_caching caching)2555ca02815Sjsg static int ttm_pool_apply_caching(struct vm_page **first, struct vm_page **last,
2565ca02815Sjsg 				  enum ttm_caching caching)
2575ca02815Sjsg {
2585ca02815Sjsg #ifdef CONFIG_X86
2595ca02815Sjsg 	unsigned int num_pages = last - first;
2605ca02815Sjsg 
2615ca02815Sjsg 	if (!num_pages)
2625ca02815Sjsg 		return 0;
2635ca02815Sjsg 
2645ca02815Sjsg 	switch (caching) {
2655ca02815Sjsg 	case ttm_cached:
2665ca02815Sjsg 		break;
2675ca02815Sjsg 	case ttm_write_combined:
2685ca02815Sjsg 		return set_pages_array_wc(first, num_pages);
2695ca02815Sjsg 	case ttm_uncached:
2705ca02815Sjsg 		return set_pages_array_uc(first, num_pages);
2715ca02815Sjsg 	}
2725ca02815Sjsg #endif
2735ca02815Sjsg 	return 0;
2745ca02815Sjsg }
2755ca02815Sjsg 
276018354a8Skettenis #ifdef __linux__
277018354a8Skettenis 
2785ca02815Sjsg /* Map pages of 1 << order size and fill the DMA address array  */
ttm_pool_map(struct ttm_pool * pool,unsigned int order,struct vm_page * p,dma_addr_t ** dma_addr)2795ca02815Sjsg static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
2805ca02815Sjsg 			struct vm_page *p, dma_addr_t **dma_addr)
2815ca02815Sjsg {
2825ca02815Sjsg 	dma_addr_t addr;
2835ca02815Sjsg 	unsigned int i;
2845ca02815Sjsg 
2855ca02815Sjsg 	if (pool->use_dma_alloc) {
2865ca02815Sjsg 		struct ttm_pool_dma *dma = (void *)p->private;
2875ca02815Sjsg 
2885ca02815Sjsg 		addr = dma->addr;
2895ca02815Sjsg 	} else {
2905ca02815Sjsg 		size_t size = (1ULL << order) * PAGE_SIZE;
2915ca02815Sjsg 
2925ca02815Sjsg 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
2935ca02815Sjsg 		if (dma_mapping_error(pool->dev, addr))
2945ca02815Sjsg 			return -EFAULT;
2955ca02815Sjsg 	}
2965ca02815Sjsg 
2975ca02815Sjsg 	for (i = 1 << order; i ; --i) {
2985ca02815Sjsg 		*(*dma_addr)++ = addr;
2995ca02815Sjsg 		addr += PAGE_SIZE;
3005ca02815Sjsg 	}
3015ca02815Sjsg 
3025ca02815Sjsg 	return 0;
3035ca02815Sjsg }
3045ca02815Sjsg 
3055ca02815Sjsg /* Unmap pages of 1 << order size */
ttm_pool_unmap(struct ttm_pool * pool,dma_addr_t dma_addr,unsigned int num_pages)3065ca02815Sjsg static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
3075ca02815Sjsg 			   unsigned int num_pages)
3085ca02815Sjsg {
3095ca02815Sjsg 	/* Unmapped while freeing the page */
3105ca02815Sjsg 	if (pool->use_dma_alloc)
3115ca02815Sjsg 		return;
3125ca02815Sjsg 
3135ca02815Sjsg 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
3145ca02815Sjsg 		       DMA_BIDIRECTIONAL);
3155ca02815Sjsg }
3165ca02815Sjsg 
317018354a8Skettenis #else
318018354a8Skettenis 
ttm_pool_map(struct ttm_pool * pool,unsigned int order,struct vm_page * p,dma_addr_t ** dma_addr)319018354a8Skettenis static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
320018354a8Skettenis 			struct vm_page *p, dma_addr_t **dma_addr)
321018354a8Skettenis {
322018354a8Skettenis 	struct ttm_pool_dma *dma;
323018354a8Skettenis 	dma_addr_t addr;
324018354a8Skettenis 	unsigned int i;
325018354a8Skettenis 
326018354a8Skettenis 	dma = (struct ttm_pool_dma *)p->objt.rbt_parent;
327018354a8Skettenis 	addr = dma->addr;
328018354a8Skettenis 
329018354a8Skettenis 	for (i = 1 << order; i ; --i) {
330018354a8Skettenis 		*(*dma_addr)++ = addr;
331018354a8Skettenis 		addr += PAGE_SIZE;
332018354a8Skettenis 	}
333018354a8Skettenis 
334018354a8Skettenis 	return 0;
335018354a8Skettenis }
336018354a8Skettenis 
ttm_pool_unmap(struct ttm_pool * pool,dma_addr_t dma_addr,unsigned int num_pages)337018354a8Skettenis static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
338018354a8Skettenis 			   unsigned int num_pages)
339018354a8Skettenis {
340018354a8Skettenis }
341018354a8Skettenis 
342018354a8Skettenis #endif
343018354a8Skettenis 
3445ca02815Sjsg /* Give pages into a specific pool_type */
ttm_pool_type_give(struct ttm_pool_type * pt,struct vm_page * p)3455ca02815Sjsg static void ttm_pool_type_give(struct ttm_pool_type *pt, struct vm_page *p)
3465ca02815Sjsg {
3475ca02815Sjsg 	unsigned int i, num_pages = 1 << pt->order;
3485ca02815Sjsg 	struct ttm_pool_type_lru *entry;
3495ca02815Sjsg 
3505ca02815Sjsg 	for (i = 0; i < num_pages; ++i) {
3515ca02815Sjsg #ifdef notyet
3525ca02815Sjsg 		if (PageHighMem(p))
3535ca02815Sjsg 			clear_highpage(p + i);
3545ca02815Sjsg 		else
3555ca02815Sjsg #endif
3565ca02815Sjsg 			pmap_zero_page(p + i);
3575ca02815Sjsg 	}
3585ca02815Sjsg 
3595ca02815Sjsg 	entry = malloc(sizeof(struct ttm_pool_type_lru), M_DRM, M_WAITOK);
3605ca02815Sjsg 	entry->pg = p;
3615ca02815Sjsg 	spin_lock(&pt->lock);
3625ca02815Sjsg 	LIST_INSERT_HEAD(&pt->lru, entry, entries);
3635ca02815Sjsg 	spin_unlock(&pt->lock);
3645ca02815Sjsg 	atomic_long_add(1 << pt->order, &allocated_pages);
3655ca02815Sjsg }
3665ca02815Sjsg 
3675ca02815Sjsg /* Take pages from a specific pool_type, return NULL when nothing available */
ttm_pool_type_take(struct ttm_pool_type * pt)3685ca02815Sjsg static struct vm_page *ttm_pool_type_take(struct ttm_pool_type *pt)
3695ca02815Sjsg {
3705ca02815Sjsg 	struct vm_page *p = NULL;
3715ca02815Sjsg 	struct ttm_pool_type_lru *entry;
3725ca02815Sjsg 
3735ca02815Sjsg 	spin_lock(&pt->lock);
3745ca02815Sjsg 	if (!LIST_EMPTY(&pt->lru)) {
3755ca02815Sjsg 		entry = LIST_FIRST(&pt->lru);
3765ca02815Sjsg 		p = entry->pg;
3775ca02815Sjsg 		atomic_long_sub(1 << pt->order, &allocated_pages);
3785ca02815Sjsg 		LIST_REMOVE(entry, entries);
3795ca02815Sjsg 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
3805ca02815Sjsg 	}
3815ca02815Sjsg 	spin_unlock(&pt->lock);
3825ca02815Sjsg 
3835ca02815Sjsg 	return p;
3845ca02815Sjsg }
3855ca02815Sjsg 
3865ca02815Sjsg /* Initialize and add a pool type to the global shrinker list */
ttm_pool_type_init(struct ttm_pool_type * pt,struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)3875ca02815Sjsg static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
3885ca02815Sjsg 			       enum ttm_caching caching, unsigned int order)
3895ca02815Sjsg {
3905ca02815Sjsg 	pt->pool = pool;
3915ca02815Sjsg 	pt->caching = caching;
3925ca02815Sjsg 	pt->order = order;
3935ca02815Sjsg 	mtx_init(&pt->lock, IPL_NONE);
3945ca02815Sjsg 	INIT_LIST_HEAD(&pt->pages);
3955ca02815Sjsg 	LIST_INIT(&pt->lru);
3965ca02815Sjsg 
3971bb76ff1Sjsg 	spin_lock(&shrinker_lock);
3985ca02815Sjsg 	list_add_tail(&pt->shrinker_list, &shrinker_list);
3991bb76ff1Sjsg 	spin_unlock(&shrinker_lock);
4005ca02815Sjsg }
4015ca02815Sjsg 
4025ca02815Sjsg /* Remove a pool_type from the global shrinker list and free all pages */
ttm_pool_type_fini(struct ttm_pool_type * pt)4035ca02815Sjsg static void ttm_pool_type_fini(struct ttm_pool_type *pt)
4045ca02815Sjsg {
4055ca02815Sjsg 	struct vm_page *p;
4065ca02815Sjsg 	struct ttm_pool_type_lru *entry;
4075ca02815Sjsg 
4081bb76ff1Sjsg 	spin_lock(&shrinker_lock);
4095ca02815Sjsg 	list_del(&pt->shrinker_list);
4101bb76ff1Sjsg 	spin_unlock(&shrinker_lock);
4115ca02815Sjsg 
4125ca02815Sjsg 	while ((p = ttm_pool_type_take(pt)))
4135ca02815Sjsg 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
4145ca02815Sjsg 
4155ca02815Sjsg 	while (!LIST_EMPTY(&pt->lru)) {
4165ca02815Sjsg 		entry = LIST_FIRST(&pt->lru);
4175ca02815Sjsg 		LIST_REMOVE(entry, entries);
4185ca02815Sjsg 		free(entry, M_DRM, sizeof(struct ttm_pool_type_lru));
4195ca02815Sjsg 	}
4205ca02815Sjsg }
4215ca02815Sjsg 
4225ca02815Sjsg /* Return the pool_type to use for the given caching and order */
ttm_pool_select_type(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)4235ca02815Sjsg static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
4245ca02815Sjsg 						  enum ttm_caching caching,
4255ca02815Sjsg 						  unsigned int order)
4265ca02815Sjsg {
427*bb29f9c0Sjsg 	if (pool->use_dma_alloc)
4285ca02815Sjsg 		return &pool->caching[caching].orders[order];
4295ca02815Sjsg 
4305ca02815Sjsg #ifdef CONFIG_X86
4315ca02815Sjsg 	switch (caching) {
4325ca02815Sjsg 	case ttm_write_combined:
433*bb29f9c0Sjsg 		if (pool->nid != NUMA_NO_NODE)
434*bb29f9c0Sjsg 			return &pool->caching[caching].orders[order];
435*bb29f9c0Sjsg 
4365ca02815Sjsg 		if (pool->use_dma32)
4375ca02815Sjsg 			return &global_dma32_write_combined[order];
4385ca02815Sjsg 
4395ca02815Sjsg 		return &global_write_combined[order];
4405ca02815Sjsg 	case ttm_uncached:
441*bb29f9c0Sjsg 		if (pool->nid != NUMA_NO_NODE)
442*bb29f9c0Sjsg 			return &pool->caching[caching].orders[order];
443*bb29f9c0Sjsg 
4445ca02815Sjsg 		if (pool->use_dma32)
4455ca02815Sjsg 			return &global_dma32_uncached[order];
4465ca02815Sjsg 
4475ca02815Sjsg 		return &global_uncached[order];
4485ca02815Sjsg 	default:
4495ca02815Sjsg 		break;
4505ca02815Sjsg 	}
4515ca02815Sjsg #endif
4525ca02815Sjsg 
4535ca02815Sjsg 	return NULL;
4545ca02815Sjsg }
4555ca02815Sjsg 
4565ca02815Sjsg /* Free pages using the global shrinker list */
ttm_pool_shrink(void)4575ca02815Sjsg static unsigned int ttm_pool_shrink(void)
4585ca02815Sjsg {
4595ca02815Sjsg 	struct ttm_pool_type *pt;
4601bb76ff1Sjsg 	unsigned int num_pages;
4615ca02815Sjsg 	struct vm_page *p;
4625ca02815Sjsg 
4631bb76ff1Sjsg 	spin_lock(&shrinker_lock);
4645ca02815Sjsg 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
4651bb76ff1Sjsg 	list_move_tail(&pt->shrinker_list, &shrinker_list);
4661bb76ff1Sjsg 	spin_unlock(&shrinker_lock);
4675ca02815Sjsg 
4685ca02815Sjsg 	p = ttm_pool_type_take(pt);
4695ca02815Sjsg 	if (p) {
4705ca02815Sjsg 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
4711bb76ff1Sjsg 		num_pages = 1 << pt->order;
4725ca02815Sjsg 	} else {
4731bb76ff1Sjsg 		num_pages = 0;
4745ca02815Sjsg 	}
4755ca02815Sjsg 
4761bb76ff1Sjsg 	return num_pages;
4775ca02815Sjsg }
4785ca02815Sjsg 
4795ca02815Sjsg #ifdef notyet
4805ca02815Sjsg 
4815ca02815Sjsg /* Return the allocation order based for a page */
ttm_pool_page_order(struct ttm_pool * pool,struct vm_page * p)4825ca02815Sjsg static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct vm_page *p)
4835ca02815Sjsg {
4845ca02815Sjsg 	if (pool->use_dma_alloc) {
4855ca02815Sjsg 		struct ttm_pool_dma *dma = (void *)p->private;
4865ca02815Sjsg 
4875ca02815Sjsg 		return dma->vaddr & ~LINUX_PAGE_MASK;
4885ca02815Sjsg 	}
4895ca02815Sjsg 
4905ca02815Sjsg 	return p->private;
4915ca02815Sjsg }
4925ca02815Sjsg 
4935ca02815Sjsg #endif /* notyet */
4945ca02815Sjsg 
495d641b4f9Sjsg /* Called when we got a page, either from a pool or newly allocated */
ttm_pool_page_allocated(struct ttm_pool * pool,unsigned int order,struct vm_page * p,dma_addr_t ** dma_addr,unsigned long * num_pages,struct vm_page *** pages,unsigned long ** orders)496d641b4f9Sjsg static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
497d641b4f9Sjsg 				   struct vm_page *p, dma_addr_t **dma_addr,
498d641b4f9Sjsg 				   unsigned long *num_pages,
499d641b4f9Sjsg 				   struct vm_page ***pages,
500d641b4f9Sjsg 				   unsigned long **orders)
501d641b4f9Sjsg {
502d641b4f9Sjsg 	unsigned int i;
503d641b4f9Sjsg 	int r;
504d641b4f9Sjsg 
505d641b4f9Sjsg 	if (*dma_addr) {
506d641b4f9Sjsg 		r = ttm_pool_map(pool, order, p, dma_addr);
507d641b4f9Sjsg 		if (r)
508d641b4f9Sjsg 			return r;
509d641b4f9Sjsg 	}
510d641b4f9Sjsg 
511d641b4f9Sjsg 	*num_pages -= 1 << order;
512d641b4f9Sjsg 	for (i = 1 << order; i; --i, ++(*pages), ++p, ++(*orders)) {
513d641b4f9Sjsg 		**pages = p;
514d641b4f9Sjsg 		**orders = order;
515d641b4f9Sjsg 	}
516d641b4f9Sjsg 
517d641b4f9Sjsg 	return 0;
518d641b4f9Sjsg }
519d641b4f9Sjsg 
5205ca02815Sjsg /**
521601e1b27Sjsg  * ttm_pool_free_range() - Free a range of TTM pages
522601e1b27Sjsg  * @pool: The pool used for allocating.
523601e1b27Sjsg  * @tt: The struct ttm_tt holding the page pointers.
524601e1b27Sjsg  * @caching: The page caching mode used by the range.
525601e1b27Sjsg  * @start_page: index for first page to free.
526601e1b27Sjsg  * @end_page: index for last page to free + 1.
527601e1b27Sjsg  *
528601e1b27Sjsg  * During allocation the ttm_tt page-vector may be populated with ranges of
529601e1b27Sjsg  * pages with different attributes if allocation hit an error without being
530601e1b27Sjsg  * able to completely fulfill the allocation. This function can be used
531601e1b27Sjsg  * to free these individual ranges.
532601e1b27Sjsg  */
ttm_pool_free_range(struct ttm_pool * pool,struct ttm_tt * tt,enum ttm_caching caching,pgoff_t start_page,pgoff_t end_page)533601e1b27Sjsg static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
534601e1b27Sjsg 				enum ttm_caching caching,
535601e1b27Sjsg 				pgoff_t start_page, pgoff_t end_page)
536601e1b27Sjsg {
537733eb24dSjsg 	struct vm_page **pages = &tt->pages[start_page];
538601e1b27Sjsg 	unsigned int order;
539601e1b27Sjsg 	pgoff_t i, nr;
540601e1b27Sjsg 
541601e1b27Sjsg 	for (i = start_page; i < end_page; i += nr, pages += nr) {
542601e1b27Sjsg 		struct ttm_pool_type *pt = NULL;
543601e1b27Sjsg 
544601e1b27Sjsg 		order = tt->orders[i];
545601e1b27Sjsg 		nr = (1UL << order);
546601e1b27Sjsg 		if (tt->dma_address)
547601e1b27Sjsg 			ttm_pool_unmap(pool, tt->dma_address[i], nr);
548601e1b27Sjsg 
549601e1b27Sjsg 		pt = ttm_pool_select_type(pool, caching, order);
550601e1b27Sjsg 		if (pt)
551601e1b27Sjsg 			ttm_pool_type_give(pt, *pages);
552601e1b27Sjsg 		else
553601e1b27Sjsg 			ttm_pool_free_page(pool, caching, order, *pages);
554601e1b27Sjsg 	}
555601e1b27Sjsg }
556601e1b27Sjsg 
557601e1b27Sjsg /**
5585ca02815Sjsg  * ttm_pool_alloc - Fill a ttm_tt object
5595ca02815Sjsg  *
5605ca02815Sjsg  * @pool: ttm_pool to use
5615ca02815Sjsg  * @tt: ttm_tt object to fill
5625ca02815Sjsg  * @ctx: operation context
5635ca02815Sjsg  *
5645ca02815Sjsg  * Fill the ttm_tt object with pages and also make sure to DMA map them when
5655ca02815Sjsg  * necessary.
5665ca02815Sjsg  *
5675ca02815Sjsg  * Returns: 0 on successe, negative error code otherwise.
5685ca02815Sjsg  */
ttm_pool_alloc(struct ttm_pool * pool,struct ttm_tt * tt,struct ttm_operation_ctx * ctx)5695ca02815Sjsg int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
5705ca02815Sjsg 		   struct ttm_operation_ctx *ctx)
5715ca02815Sjsg {
572601e1b27Sjsg 	pgoff_t num_pages = tt->num_pages;
5735ca02815Sjsg 	dma_addr_t *dma_addr = tt->dma_address;
5745ca02815Sjsg 	struct vm_page **caching = tt->pages;
5755ca02815Sjsg 	struct vm_page **pages = tt->pages;
576601e1b27Sjsg 	enum ttm_caching page_caching;
5775ca02815Sjsg 	gfp_t gfp_flags = GFP_USER;
578601e1b27Sjsg 	pgoff_t caching_divide;
579601e1b27Sjsg 	unsigned int order;
5805ca02815Sjsg 	struct vm_page *p;
5815ca02815Sjsg 	int r;
582601e1b27Sjsg 	unsigned long *orders = tt->orders;
5835ca02815Sjsg 
5845ca02815Sjsg 	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
5855ca02815Sjsg #ifdef __linux__
5865ca02815Sjsg 	WARN_ON(dma_addr && !pool->dev);
5875ca02815Sjsg #endif
5885ca02815Sjsg 
5891bb76ff1Sjsg 	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
5905ca02815Sjsg 		gfp_flags |= __GFP_ZERO;
5915ca02815Sjsg 
5925ca02815Sjsg 	if (ctx->gfp_retry_mayfail)
5935ca02815Sjsg 		gfp_flags |= __GFP_RETRY_MAYFAIL;
5945ca02815Sjsg 
5955ca02815Sjsg 	if (pool->use_dma32)
5965ca02815Sjsg 		gfp_flags |= GFP_DMA32;
5975ca02815Sjsg 	else
5985ca02815Sjsg 		gfp_flags |= GFP_HIGHUSER;
5995ca02815Sjsg 
600f005ef32Sjsg 	for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
6015ca02815Sjsg 	     num_pages;
6025ca02815Sjsg 	     order = min_t(unsigned int, order, __fls(num_pages))) {
6035ca02815Sjsg 		struct ttm_pool_type *pt;
6045ca02815Sjsg 
605601e1b27Sjsg 		page_caching = tt->caching;
6065ca02815Sjsg 		pt = ttm_pool_select_type(pool, tt->caching, order);
6075ca02815Sjsg 		p = pt ? ttm_pool_type_take(pt) : NULL;
6085ca02815Sjsg 		if (p) {
609d641b4f9Sjsg 			r = ttm_pool_apply_caching(caching, pages,
610d641b4f9Sjsg 						   tt->caching);
611d641b4f9Sjsg 			if (r)
612d641b4f9Sjsg 				goto error_free_page;
613d641b4f9Sjsg 
614601e1b27Sjsg 			caching = pages;
615d641b4f9Sjsg 			do {
616d641b4f9Sjsg 				r = ttm_pool_page_allocated(pool, order, p,
617d641b4f9Sjsg 							    &dma_addr,
618d641b4f9Sjsg 							    &num_pages,
619d641b4f9Sjsg 							    &pages, &orders);
620d641b4f9Sjsg 				if (r)
621d641b4f9Sjsg 					goto error_free_page;
622d641b4f9Sjsg 
623601e1b27Sjsg 				caching = pages;
624d641b4f9Sjsg 				if (num_pages < (1 << order))
625d641b4f9Sjsg 					break;
626d641b4f9Sjsg 
627d641b4f9Sjsg 				p = ttm_pool_type_take(pt);
628d641b4f9Sjsg 			} while (p);
629d641b4f9Sjsg 		}
630d641b4f9Sjsg 
631601e1b27Sjsg 		page_caching = ttm_cached;
632d641b4f9Sjsg 		while (num_pages >= (1 << order) &&
633d641b4f9Sjsg 		       (p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat))) {
634d641b4f9Sjsg 
635d641b4f9Sjsg 			if (PageHighMem(p)) {
636d641b4f9Sjsg 				r = ttm_pool_apply_caching(caching, pages,
637d641b4f9Sjsg 							   tt->caching);
638d641b4f9Sjsg 				if (r)
639d641b4f9Sjsg 					goto error_free_page;
640601e1b27Sjsg 				caching = pages;
641d641b4f9Sjsg 			}
642d641b4f9Sjsg 			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
643d641b4f9Sjsg 						    &num_pages, &pages, &orders);
644d641b4f9Sjsg 			if (r)
645d641b4f9Sjsg 				goto error_free_page;
646d641b4f9Sjsg 			if (PageHighMem(p))
647d641b4f9Sjsg 				caching = pages;
6485ca02815Sjsg 		}
6495ca02815Sjsg 
6505ca02815Sjsg 		if (!p) {
6515ca02815Sjsg 			if (order) {
6525ca02815Sjsg 				--order;
6535ca02815Sjsg 				continue;
6545ca02815Sjsg 			}
6555ca02815Sjsg 			r = -ENOMEM;
6565ca02815Sjsg 			goto error_free_all;
6575ca02815Sjsg 		}
6585ca02815Sjsg 	}
6595ca02815Sjsg 
6605ca02815Sjsg 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
6615ca02815Sjsg 	if (r)
6625ca02815Sjsg 		goto error_free_all;
6635ca02815Sjsg 
6645ca02815Sjsg 	return 0;
6655ca02815Sjsg 
6665ca02815Sjsg error_free_page:
667601e1b27Sjsg 	ttm_pool_free_page(pool, page_caching, order, p);
6685ca02815Sjsg 
6695ca02815Sjsg error_free_all:
6705ca02815Sjsg 	num_pages = tt->num_pages - num_pages;
671601e1b27Sjsg 	caching_divide = caching - tt->pages;
672601e1b27Sjsg 	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
673601e1b27Sjsg 	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
6745ca02815Sjsg 
6755ca02815Sjsg 	return r;
6765ca02815Sjsg }
6775ca02815Sjsg EXPORT_SYMBOL(ttm_pool_alloc);
6785ca02815Sjsg 
6795ca02815Sjsg /**
6805ca02815Sjsg  * ttm_pool_free - Free the backing pages from a ttm_tt object
6815ca02815Sjsg  *
6825ca02815Sjsg  * @pool: Pool to give pages back to.
6835ca02815Sjsg  * @tt: ttm_tt object to unpopulate
6845ca02815Sjsg  *
6855ca02815Sjsg  * Give the packing pages back to a pool or free them
6865ca02815Sjsg  */
ttm_pool_free(struct ttm_pool * pool,struct ttm_tt * tt)6875ca02815Sjsg void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
6885ca02815Sjsg {
689601e1b27Sjsg 	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
6905ca02815Sjsg 
6915ca02815Sjsg 	while (atomic_long_read(&allocated_pages) > page_pool_size)
6925ca02815Sjsg 		ttm_pool_shrink();
6935ca02815Sjsg }
6945ca02815Sjsg EXPORT_SYMBOL(ttm_pool_free);
6955ca02815Sjsg 
6965ca02815Sjsg /**
6975ca02815Sjsg  * ttm_pool_init - Initialize a pool
6985ca02815Sjsg  *
6995ca02815Sjsg  * @pool: the pool to initialize
7005ca02815Sjsg  * @dev: device for DMA allocations and mappings
701f005ef32Sjsg  * @nid: NUMA node to use for allocations
7025ca02815Sjsg  * @use_dma_alloc: true if coherent DMA alloc should be used
7035ca02815Sjsg  * @use_dma32: true if GFP_DMA32 should be used
7045ca02815Sjsg  *
7055ca02815Sjsg  * Initialize the pool and its pool types.
7065ca02815Sjsg  */
ttm_pool_init(struct ttm_pool * pool,struct device * dev,int nid,bool use_dma_alloc,bool use_dma32)7075ca02815Sjsg void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
708f005ef32Sjsg 		   int nid, bool use_dma_alloc, bool use_dma32)
7095ca02815Sjsg {
7105ca02815Sjsg 	unsigned int i, j;
7115ca02815Sjsg 
7125ca02815Sjsg 	WARN_ON(!dev && use_dma_alloc);
7135ca02815Sjsg 
7145ca02815Sjsg 	pool->dev = dev;
715f005ef32Sjsg 	pool->nid = nid;
7165ca02815Sjsg 	pool->use_dma_alloc = use_dma_alloc;
7175ca02815Sjsg 	pool->use_dma32 = use_dma32;
7185ca02815Sjsg 
719*bb29f9c0Sjsg 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
720*bb29f9c0Sjsg 		for (j = 0; j < NR_PAGE_ORDERS; ++j) {
721*bb29f9c0Sjsg 			struct ttm_pool_type *pt;
722*bb29f9c0Sjsg 
723*bb29f9c0Sjsg 			/* Initialize only pool types which are actually used */
724*bb29f9c0Sjsg 			pt = ttm_pool_select_type(pool, i, j);
725*bb29f9c0Sjsg 			if (pt != &pool->caching[i].orders[j])
726*bb29f9c0Sjsg 				continue;
727*bb29f9c0Sjsg 
728*bb29f9c0Sjsg 			ttm_pool_type_init(pt, pool, i, j);
729*bb29f9c0Sjsg 		}
7305ca02815Sjsg 	}
7315ca02815Sjsg }
732f005ef32Sjsg EXPORT_SYMBOL(ttm_pool_init);
7335ca02815Sjsg 
7345ca02815Sjsg /**
7355ca02815Sjsg  * ttm_pool_fini - Cleanup a pool
7365ca02815Sjsg  *
7375ca02815Sjsg  * @pool: the pool to clean up
7385ca02815Sjsg  *
7395ca02815Sjsg  * Free all pages in the pool and unregister the types from the global
7405ca02815Sjsg  * shrinker.
7415ca02815Sjsg  */
ttm_pool_fini(struct ttm_pool * pool)7425ca02815Sjsg void ttm_pool_fini(struct ttm_pool *pool)
7435ca02815Sjsg {
7445ca02815Sjsg 	unsigned int i, j;
7455ca02815Sjsg 
746*bb29f9c0Sjsg 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
747*bb29f9c0Sjsg 		for (j = 0; j < NR_PAGE_ORDERS; ++j) {
748*bb29f9c0Sjsg 			struct ttm_pool_type *pt;
749*bb29f9c0Sjsg 
750*bb29f9c0Sjsg 			pt = ttm_pool_select_type(pool, i, j);
751*bb29f9c0Sjsg 			if (pt != &pool->caching[i].orders[j])
752*bb29f9c0Sjsg 				continue;
753*bb29f9c0Sjsg 
754*bb29f9c0Sjsg 			ttm_pool_type_fini(pt);
755*bb29f9c0Sjsg 		}
7565ca02815Sjsg 	}
7571bb76ff1Sjsg 
7581bb76ff1Sjsg 	/* We removed the pool types from the LRU, but we need to also make sure
7591bb76ff1Sjsg 	 * that no shrinker is concurrently freeing pages from the pool.
7601bb76ff1Sjsg 	 */
7611bb76ff1Sjsg 	synchronize_shrinkers();
7625ca02815Sjsg }
763f005ef32Sjsg EXPORT_SYMBOL(ttm_pool_fini);
7645ca02815Sjsg 
7655ca02815Sjsg /* As long as pages are available make sure to release at least one */
ttm_pool_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)7665ca02815Sjsg static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
7675ca02815Sjsg 					    struct shrink_control *sc)
7685ca02815Sjsg {
7695ca02815Sjsg 	unsigned long num_freed = 0;
7705ca02815Sjsg 
7715ca02815Sjsg 	do
7725ca02815Sjsg 		num_freed += ttm_pool_shrink();
7735ca02815Sjsg 	while (!num_freed && atomic_long_read(&allocated_pages));
7745ca02815Sjsg 
7755ca02815Sjsg 	return num_freed;
7765ca02815Sjsg }
7775ca02815Sjsg 
7785ca02815Sjsg /* Return the number of pages available or SHRINK_EMPTY if we have none */
ttm_pool_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)7795ca02815Sjsg static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
7805ca02815Sjsg 					     struct shrink_control *sc)
7815ca02815Sjsg {
7825ca02815Sjsg #ifdef notyet
7835ca02815Sjsg 	unsigned long num_pages = atomic_long_read(&allocated_pages);
7845ca02815Sjsg 
7855ca02815Sjsg 	return num_pages ? num_pages : SHRINK_EMPTY;
7865ca02815Sjsg #else
7875ca02815Sjsg 	STUB();
7885ca02815Sjsg 	unsigned long num_pages = atomic_long_read(&allocated_pages);
7895ca02815Sjsg 
7905ca02815Sjsg 	return num_pages ? num_pages : 0;
7915ca02815Sjsg #endif
7925ca02815Sjsg }
7935ca02815Sjsg 
7945ca02815Sjsg #ifdef CONFIG_DEBUG_FS
7955ca02815Sjsg /* Count the number of pages available in a pool_type */
ttm_pool_type_count(struct ttm_pool_type * pt)7965ca02815Sjsg static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
7975ca02815Sjsg {
7985ca02815Sjsg 	unsigned int count = 0;
7995ca02815Sjsg 	struct ttm_pool_type_lru *entry;
8005ca02815Sjsg 
8015ca02815Sjsg 	spin_lock(&pt->lock);
8025ca02815Sjsg 	/* Only used for debugfs, the overhead doesn't matter */
8035ca02815Sjsg 	LIST_FOREACH(entry, &pt->lru, entries)
8045ca02815Sjsg 		++count;
8055ca02815Sjsg 	spin_unlock(&pt->lock);
8065ca02815Sjsg 
8075ca02815Sjsg 	return count;
8085ca02815Sjsg }
8095ca02815Sjsg 
8105ca02815Sjsg /* Print a nice header for the order */
ttm_pool_debugfs_header(struct seq_file * m)8115ca02815Sjsg static void ttm_pool_debugfs_header(struct seq_file *m)
8125ca02815Sjsg {
8135ca02815Sjsg 	unsigned int i;
8145ca02815Sjsg 
8155ca02815Sjsg 	seq_puts(m, "\t ");
816192fc1c0Sjsg 	for (i = 0; i < NR_PAGE_ORDERS; ++i)
8175ca02815Sjsg 		seq_printf(m, " ---%2u---", i);
8185ca02815Sjsg 	seq_puts(m, "\n");
8195ca02815Sjsg }
8205ca02815Sjsg 
8215ca02815Sjsg /* Dump information about the different pool types */
ttm_pool_debugfs_orders(struct ttm_pool_type * pt,struct seq_file * m)8225ca02815Sjsg static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
8235ca02815Sjsg 				    struct seq_file *m)
8245ca02815Sjsg {
8255ca02815Sjsg 	unsigned int i;
8265ca02815Sjsg 
827192fc1c0Sjsg 	for (i = 0; i < NR_PAGE_ORDERS; ++i)
8285ca02815Sjsg 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
8295ca02815Sjsg 	seq_puts(m, "\n");
8305ca02815Sjsg }
8315ca02815Sjsg 
8325ca02815Sjsg /* Dump the total amount of allocated pages */
ttm_pool_debugfs_footer(struct seq_file * m)8335ca02815Sjsg static void ttm_pool_debugfs_footer(struct seq_file *m)
8345ca02815Sjsg {
8355ca02815Sjsg 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
8365ca02815Sjsg 		   atomic_long_read(&allocated_pages), page_pool_size);
8375ca02815Sjsg }
8385ca02815Sjsg 
8395ca02815Sjsg /* Dump the information for the global pools */
ttm_pool_debugfs_globals_show(struct seq_file * m,void * data)8405ca02815Sjsg static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
8415ca02815Sjsg {
8425ca02815Sjsg 	ttm_pool_debugfs_header(m);
8435ca02815Sjsg 
8441bb76ff1Sjsg 	spin_lock(&shrinker_lock);
8455ca02815Sjsg 	seq_puts(m, "wc\t:");
8465ca02815Sjsg 	ttm_pool_debugfs_orders(global_write_combined, m);
8475ca02815Sjsg 	seq_puts(m, "uc\t:");
8485ca02815Sjsg 	ttm_pool_debugfs_orders(global_uncached, m);
8495ca02815Sjsg 	seq_puts(m, "wc 32\t:");
8505ca02815Sjsg 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
8515ca02815Sjsg 	seq_puts(m, "uc 32\t:");
8525ca02815Sjsg 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
8531bb76ff1Sjsg 	spin_unlock(&shrinker_lock);
8545ca02815Sjsg 
8555ca02815Sjsg 	ttm_pool_debugfs_footer(m);
8565ca02815Sjsg 
8575ca02815Sjsg 	return 0;
8585ca02815Sjsg }
8595ca02815Sjsg DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
8605ca02815Sjsg 
8615ca02815Sjsg /**
8625ca02815Sjsg  * ttm_pool_debugfs - Debugfs dump function for a pool
8635ca02815Sjsg  *
8645ca02815Sjsg  * @pool: the pool to dump the information for
8655ca02815Sjsg  * @m: seq_file to dump to
8665ca02815Sjsg  *
8675ca02815Sjsg  * Make a debugfs dump with the per pool and global information.
8685ca02815Sjsg  */
ttm_pool_debugfs(struct ttm_pool * pool,struct seq_file * m)8695ca02815Sjsg int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
8705ca02815Sjsg {
8715ca02815Sjsg 	unsigned int i;
8725ca02815Sjsg 
8735ca02815Sjsg 	if (!pool->use_dma_alloc) {
8745ca02815Sjsg 		seq_puts(m, "unused\n");
8755ca02815Sjsg 		return 0;
8765ca02815Sjsg 	}
8775ca02815Sjsg 
8785ca02815Sjsg 	ttm_pool_debugfs_header(m);
8795ca02815Sjsg 
8801bb76ff1Sjsg 	spin_lock(&shrinker_lock);
8815ca02815Sjsg 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
8825ca02815Sjsg 		seq_puts(m, "DMA ");
8835ca02815Sjsg 		switch (i) {
8845ca02815Sjsg 		case ttm_cached:
8855ca02815Sjsg 			seq_puts(m, "\t:");
8865ca02815Sjsg 			break;
8875ca02815Sjsg 		case ttm_write_combined:
8885ca02815Sjsg 			seq_puts(m, "wc\t:");
8895ca02815Sjsg 			break;
8905ca02815Sjsg 		case ttm_uncached:
8915ca02815Sjsg 			seq_puts(m, "uc\t:");
8925ca02815Sjsg 			break;
8935ca02815Sjsg 		}
8945ca02815Sjsg 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
8955ca02815Sjsg 	}
8961bb76ff1Sjsg 	spin_unlock(&shrinker_lock);
8975ca02815Sjsg 
8985ca02815Sjsg 	ttm_pool_debugfs_footer(m);
8995ca02815Sjsg 	return 0;
9005ca02815Sjsg }
9015ca02815Sjsg EXPORT_SYMBOL(ttm_pool_debugfs);
9025ca02815Sjsg 
9035ca02815Sjsg /* Test the shrinker functions and dump the result */
ttm_pool_debugfs_shrink_show(struct seq_file * m,void * data)9045ca02815Sjsg static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
9055ca02815Sjsg {
9065ca02815Sjsg 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
9075ca02815Sjsg 
9085ca02815Sjsg 	fs_reclaim_acquire(GFP_KERNEL);
9095ca02815Sjsg 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
9105ca02815Sjsg 		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
9115ca02815Sjsg 	fs_reclaim_release(GFP_KERNEL);
9125ca02815Sjsg 
9135ca02815Sjsg 	return 0;
9145ca02815Sjsg }
9155ca02815Sjsg DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
9165ca02815Sjsg 
9175ca02815Sjsg #endif
9185ca02815Sjsg 
9195ca02815Sjsg /**
9205ca02815Sjsg  * ttm_pool_mgr_init - Initialize globals
9215ca02815Sjsg  *
9225ca02815Sjsg  * @num_pages: default number of pages
9235ca02815Sjsg  *
9245ca02815Sjsg  * Initialize the global locks and lists for the MM shrinker.
9255ca02815Sjsg  */
ttm_pool_mgr_init(unsigned long num_pages)9265ca02815Sjsg int ttm_pool_mgr_init(unsigned long num_pages)
9275ca02815Sjsg {
9285ca02815Sjsg 	unsigned int i;
9295ca02815Sjsg 
9305ca02815Sjsg 	if (!page_pool_size)
9315ca02815Sjsg 		page_pool_size = num_pages;
9325ca02815Sjsg 
9331bb76ff1Sjsg 	mtx_init(&shrinker_lock, IPL_NONE);
9345ca02815Sjsg 	INIT_LIST_HEAD(&shrinker_list);
9355ca02815Sjsg 
936192fc1c0Sjsg 	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
9375ca02815Sjsg 		ttm_pool_type_init(&global_write_combined[i], NULL,
9385ca02815Sjsg 				   ttm_write_combined, i);
9395ca02815Sjsg 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
9405ca02815Sjsg 
9415ca02815Sjsg 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
9425ca02815Sjsg 				   ttm_write_combined, i);
9435ca02815Sjsg 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
9445ca02815Sjsg 				   ttm_uncached, i);
9455ca02815Sjsg 	}
9465ca02815Sjsg 
9475ca02815Sjsg #ifdef CONFIG_DEBUG_FS
9485ca02815Sjsg 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
9495ca02815Sjsg 			    &ttm_pool_debugfs_globals_fops);
9505ca02815Sjsg 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
9515ca02815Sjsg 			    &ttm_pool_debugfs_shrink_fops);
9525ca02815Sjsg #endif
9535ca02815Sjsg 
9545ca02815Sjsg 	mm_shrinker.count_objects = ttm_pool_shrinker_count;
9555ca02815Sjsg 	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
9565ca02815Sjsg 	mm_shrinker.seeks = 1;
9571bb76ff1Sjsg 	return register_shrinker(&mm_shrinker, "drm-ttm_pool");
9585ca02815Sjsg }
9595ca02815Sjsg 
9605ca02815Sjsg /**
9615ca02815Sjsg  * ttm_pool_mgr_fini - Finalize globals
9625ca02815Sjsg  *
9635ca02815Sjsg  * Cleanup the global pools and unregister the MM shrinker.
9645ca02815Sjsg  */
ttm_pool_mgr_fini(void)9655ca02815Sjsg void ttm_pool_mgr_fini(void)
9665ca02815Sjsg {
9675ca02815Sjsg 	unsigned int i;
9685ca02815Sjsg 
969192fc1c0Sjsg 	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
9705ca02815Sjsg 		ttm_pool_type_fini(&global_write_combined[i]);
9715ca02815Sjsg 		ttm_pool_type_fini(&global_uncached[i]);
9725ca02815Sjsg 
9735ca02815Sjsg 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
9745ca02815Sjsg 		ttm_pool_type_fini(&global_dma32_uncached[i]);
9755ca02815Sjsg 	}
9765ca02815Sjsg 
9775ca02815Sjsg 	unregister_shrinker(&mm_shrinker);
9785ca02815Sjsg 	WARN_ON(!list_empty(&shrinker_list));
9795ca02815Sjsg }
980