1*932d855eSSergey Zigachev /*
2*932d855eSSergey Zigachev * Copyright 2011 (c) Oracle Corp.
3*932d855eSSergey Zigachev
4*932d855eSSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
5*932d855eSSergey Zigachev * copy of this software and associated documentation files (the "Software"),
6*932d855eSSergey Zigachev * to deal in the Software without restriction, including without limitation
7*932d855eSSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sub license,
8*932d855eSSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the
9*932d855eSSergey Zigachev * Software is furnished to do so, subject to the following conditions:
10*932d855eSSergey Zigachev *
11*932d855eSSergey Zigachev * The above copyright notice and this permission notice (including the
12*932d855eSSergey Zigachev * next paragraph) shall be included in all copies or substantial portions
13*932d855eSSergey Zigachev * of the Software.
14*932d855eSSergey Zigachev *
15*932d855eSSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*932d855eSSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*932d855eSSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18*932d855eSSergey Zigachev * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*932d855eSSergey Zigachev * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*932d855eSSergey Zigachev * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21*932d855eSSergey Zigachev * DEALINGS IN THE SOFTWARE.
22*932d855eSSergey Zigachev *
23*932d855eSSergey Zigachev * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24*932d855eSSergey Zigachev */
25*932d855eSSergey Zigachev
26*932d855eSSergey Zigachev /*
27*932d855eSSergey Zigachev * A simple DMA pool losely based on dmapool.c. It has certain advantages
28*932d855eSSergey Zigachev * over the DMA pools:
29*932d855eSSergey Zigachev * - Pool collects resently freed pages for reuse (and hooks up to
30*932d855eSSergey Zigachev * the shrinker).
31*932d855eSSergey Zigachev * - Tracks currently in use pages
32*932d855eSSergey Zigachev * - Tracks whether the page is UC, WB or cached (and reverts to WB
33*932d855eSSergey Zigachev * when freed).
34*932d855eSSergey Zigachev */
35*932d855eSSergey Zigachev
36*932d855eSSergey Zigachev #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37*932d855eSSergey Zigachev #define pr_fmt(fmt) "[TTM] " fmt
38*932d855eSSergey Zigachev
39*932d855eSSergey Zigachev #include <linux/dma-mapping.h>
40*932d855eSSergey Zigachev #include <linux/list.h>
41*932d855eSSergey Zigachev #include <linux/seq_file.h> /* for seq_printf */
42*932d855eSSergey Zigachev #include <linux/slab.h>
43*932d855eSSergey Zigachev #include <linux/spinlock.h>
44*932d855eSSergey Zigachev #include <linux/highmem.h>
45*932d855eSSergey Zigachev #include <linux/mm_types.h>
46*932d855eSSergey Zigachev #include <linux/module.h>
47*932d855eSSergey Zigachev #include <linux/mm.h>
48*932d855eSSergey Zigachev #include <linux/atomic.h>
49*932d855eSSergey Zigachev #include <linux/device.h>
50*932d855eSSergey Zigachev #include <linux/kthread.h>
51*932d855eSSergey Zigachev #include <drm/ttm/ttm_bo_driver.h>
52*932d855eSSergey Zigachev #include <drm/ttm/ttm_page_alloc.h>
53*932d855eSSergey Zigachev #include <drm/ttm/ttm_set_memory.h>
54*932d855eSSergey Zigachev
55*932d855eSSergey Zigachev #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56*932d855eSSergey Zigachev #define SMALL_ALLOCATION 4
57*932d855eSSergey Zigachev #define FREE_ALL_PAGES (~0U)
58*932d855eSSergey Zigachev #define VADDR_FLAG_HUGE_POOL 1UL
59*932d855eSSergey Zigachev #define VADDR_FLAG_UPDATED_COUNT 2UL
60*932d855eSSergey Zigachev
61*932d855eSSergey Zigachev enum pool_type {
62*932d855eSSergey Zigachev IS_UNDEFINED = 0,
63*932d855eSSergey Zigachev IS_WC = 1 << 1,
64*932d855eSSergey Zigachev IS_UC = 1 << 2,
65*932d855eSSergey Zigachev IS_CACHED = 1 << 3,
66*932d855eSSergey Zigachev IS_DMA32 = 1 << 4,
67*932d855eSSergey Zigachev IS_HUGE = 1 << 5
68*932d855eSSergey Zigachev };
69*932d855eSSergey Zigachev
70*932d855eSSergey Zigachev /*
71*932d855eSSergey Zigachev * The pool structure. There are up to nine pools:
72*932d855eSSergey Zigachev * - generic (not restricted to DMA32):
73*932d855eSSergey Zigachev * - write combined, uncached, cached.
74*932d855eSSergey Zigachev * - dma32 (up to 2^32 - so up 4GB):
75*932d855eSSergey Zigachev * - write combined, uncached, cached.
76*932d855eSSergey Zigachev * - huge (not restricted to DMA32):
77*932d855eSSergey Zigachev * - write combined, uncached, cached.
78*932d855eSSergey Zigachev * for each 'struct device'. The 'cached' is for pages that are actively used.
79*932d855eSSergey Zigachev * The other ones can be shrunk by the shrinker API if neccessary.
80*932d855eSSergey Zigachev * @pools: The 'struct device->dma_pools' link.
81*932d855eSSergey Zigachev * @type: Type of the pool
82*932d855eSSergey Zigachev * @lock: Protects the free_list from concurrnet access. Must be
83*932d855eSSergey Zigachev * used with irqsave/irqrestore variants because pool allocator maybe called
84*932d855eSSergey Zigachev * from delayed work.
85*932d855eSSergey Zigachev * @free_list: Pool of pages that are free to be used. No order requirements.
86*932d855eSSergey Zigachev * @dev: The device that is associated with these pools.
87*932d855eSSergey Zigachev * @size: Size used during DMA allocation.
88*932d855eSSergey Zigachev * @npages_free: Count of available pages for re-use.
89*932d855eSSergey Zigachev * @npages_in_use: Count of pages that are in use.
90*932d855eSSergey Zigachev * @nfrees: Stats when pool is shrinking.
91*932d855eSSergey Zigachev * @nrefills: Stats when the pool is grown.
92*932d855eSSergey Zigachev * @gfp_flags: Flags to pass for alloc_page.
93*932d855eSSergey Zigachev * @name: Name of the pool.
94*932d855eSSergey Zigachev * @dev_name: Name derieved from dev - similar to how dev_info works.
95*932d855eSSergey Zigachev * Used during shutdown as the dev_info during release is unavailable.
96*932d855eSSergey Zigachev */
97*932d855eSSergey Zigachev struct dma_pool {
98*932d855eSSergey Zigachev struct list_head pools; /* The 'struct device->dma_pools link */
99*932d855eSSergey Zigachev enum pool_type type;
100*932d855eSSergey Zigachev spinlock_t lock;
101*932d855eSSergey Zigachev struct list_head free_list;
102*932d855eSSergey Zigachev struct device *dev;
103*932d855eSSergey Zigachev unsigned size;
104*932d855eSSergey Zigachev unsigned npages_free;
105*932d855eSSergey Zigachev unsigned npages_in_use;
106*932d855eSSergey Zigachev unsigned long nfrees; /* Stats when shrunk. */
107*932d855eSSergey Zigachev unsigned long nrefills; /* Stats when grown. */
108*932d855eSSergey Zigachev gfp_t gfp_flags;
109*932d855eSSergey Zigachev char name[13]; /* "cached dma32" */
110*932d855eSSergey Zigachev char dev_name[64]; /* Constructed from dev */
111*932d855eSSergey Zigachev };
112*932d855eSSergey Zigachev
113*932d855eSSergey Zigachev /*
114*932d855eSSergey Zigachev * The accounting page keeping track of the allocated page along with
115*932d855eSSergey Zigachev * the DMA address.
116*932d855eSSergey Zigachev * @page_list: The link to the 'page_list' in 'struct dma_pool'.
117*932d855eSSergey Zigachev * @vaddr: The virtual address of the page and a flag if the page belongs to a
118*932d855eSSergey Zigachev * huge pool
119*932d855eSSergey Zigachev * @dma: The bus address of the page. If the page is not allocated
120*932d855eSSergey Zigachev * via the DMA API, it will be -1.
121*932d855eSSergey Zigachev */
122*932d855eSSergey Zigachev struct dma_page {
123*932d855eSSergey Zigachev struct list_head page_list;
124*932d855eSSergey Zigachev unsigned long vaddr;
125*932d855eSSergey Zigachev struct page *p;
126*932d855eSSergey Zigachev dma_addr_t dma;
127*932d855eSSergey Zigachev };
128*932d855eSSergey Zigachev
129*932d855eSSergey Zigachev /*
130*932d855eSSergey Zigachev * Limits for the pool. They are handled without locks because only place where
131*932d855eSSergey Zigachev * they may change is in sysfs store. They won't have immediate effect anyway
132*932d855eSSergey Zigachev * so forcing serialization to access them is pointless.
133*932d855eSSergey Zigachev */
134*932d855eSSergey Zigachev
135*932d855eSSergey Zigachev struct ttm_pool_opts {
136*932d855eSSergey Zigachev unsigned alloc_size;
137*932d855eSSergey Zigachev unsigned max_size;
138*932d855eSSergey Zigachev unsigned small;
139*932d855eSSergey Zigachev };
140*932d855eSSergey Zigachev
141*932d855eSSergey Zigachev /*
142*932d855eSSergey Zigachev * Contains the list of all of the 'struct device' and their corresponding
143*932d855eSSergey Zigachev * DMA pools. Guarded by _mutex->lock.
144*932d855eSSergey Zigachev * @pools: The link to 'struct ttm_pool_manager->pools'
145*932d855eSSergey Zigachev * @dev: The 'struct device' associated with the 'pool'
146*932d855eSSergey Zigachev * @pool: The 'struct dma_pool' associated with the 'dev'
147*932d855eSSergey Zigachev */
148*932d855eSSergey Zigachev struct device_pools {
149*932d855eSSergey Zigachev struct list_head pools;
150*932d855eSSergey Zigachev struct device *dev;
151*932d855eSSergey Zigachev struct dma_pool *pool;
152*932d855eSSergey Zigachev };
153*932d855eSSergey Zigachev
154*932d855eSSergey Zigachev /*
155*932d855eSSergey Zigachev * struct ttm_pool_manager - Holds memory pools for fast allocation
156*932d855eSSergey Zigachev *
157*932d855eSSergey Zigachev * @lock: Lock used when adding/removing from pools
158*932d855eSSergey Zigachev * @pools: List of 'struct device' and 'struct dma_pool' tuples.
159*932d855eSSergey Zigachev * @options: Limits for the pool.
160*932d855eSSergey Zigachev * @npools: Total amount of pools in existence.
161*932d855eSSergey Zigachev * @shrinker: The structure used by [un|]register_shrinker
162*932d855eSSergey Zigachev */
163*932d855eSSergey Zigachev struct ttm_pool_manager {
164*932d855eSSergey Zigachev struct mutex lock;
165*932d855eSSergey Zigachev struct list_head pools;
166*932d855eSSergey Zigachev struct ttm_pool_opts options;
167*932d855eSSergey Zigachev unsigned npools;
168*932d855eSSergey Zigachev struct shrinker mm_shrink;
169*932d855eSSergey Zigachev struct kobject kobj;
170*932d855eSSergey Zigachev };
171*932d855eSSergey Zigachev
172*932d855eSSergey Zigachev static struct ttm_pool_manager *_manager;
173*932d855eSSergey Zigachev
174*932d855eSSergey Zigachev static struct attribute ttm_page_pool_max = {
175*932d855eSSergey Zigachev .name = "pool_max_size",
176*932d855eSSergey Zigachev .mode = S_IRUGO | S_IWUSR
177*932d855eSSergey Zigachev };
178*932d855eSSergey Zigachev static struct attribute ttm_page_pool_small = {
179*932d855eSSergey Zigachev .name = "pool_small_allocation",
180*932d855eSSergey Zigachev .mode = S_IRUGO | S_IWUSR
181*932d855eSSergey Zigachev };
182*932d855eSSergey Zigachev static struct attribute ttm_page_pool_alloc_size = {
183*932d855eSSergey Zigachev .name = "pool_allocation_size",
184*932d855eSSergey Zigachev .mode = S_IRUGO | S_IWUSR
185*932d855eSSergey Zigachev };
186*932d855eSSergey Zigachev
187*932d855eSSergey Zigachev static struct attribute *ttm_pool_attrs[] = {
188*932d855eSSergey Zigachev &ttm_page_pool_max,
189*932d855eSSergey Zigachev &ttm_page_pool_small,
190*932d855eSSergey Zigachev &ttm_page_pool_alloc_size,
191*932d855eSSergey Zigachev NULL
192*932d855eSSergey Zigachev };
193*932d855eSSergey Zigachev
ttm_pool_kobj_release(struct kobject * kobj)194*932d855eSSergey Zigachev static void ttm_pool_kobj_release(struct kobject *kobj)
195*932d855eSSergey Zigachev {
196*932d855eSSergey Zigachev struct ttm_pool_manager *m =
197*932d855eSSergey Zigachev container_of(kobj, struct ttm_pool_manager, kobj);
198*932d855eSSergey Zigachev kfree(m);
199*932d855eSSergey Zigachev }
200*932d855eSSergey Zigachev
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)201*932d855eSSergey Zigachev static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
202*932d855eSSergey Zigachev const char *buffer, size_t size)
203*932d855eSSergey Zigachev {
204*932d855eSSergey Zigachev struct ttm_pool_manager *m =
205*932d855eSSergey Zigachev container_of(kobj, struct ttm_pool_manager, kobj);
206*932d855eSSergey Zigachev int chars;
207*932d855eSSergey Zigachev unsigned val;
208*932d855eSSergey Zigachev
209*932d855eSSergey Zigachev chars = sscanf(buffer, "%u", &val);
210*932d855eSSergey Zigachev if (chars == 0)
211*932d855eSSergey Zigachev return size;
212*932d855eSSergey Zigachev
213*932d855eSSergey Zigachev /* Convert kb to number of pages */
214*932d855eSSergey Zigachev val = val / (PAGE_SIZE >> 10);
215*932d855eSSergey Zigachev
216*932d855eSSergey Zigachev if (attr == &ttm_page_pool_max) {
217*932d855eSSergey Zigachev m->options.max_size = val;
218*932d855eSSergey Zigachev } else if (attr == &ttm_page_pool_small) {
219*932d855eSSergey Zigachev m->options.small = val;
220*932d855eSSergey Zigachev } else if (attr == &ttm_page_pool_alloc_size) {
221*932d855eSSergey Zigachev if (val > NUM_PAGES_TO_ALLOC*8) {
222*932d855eSSergey Zigachev pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
223*932d855eSSergey Zigachev NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
224*932d855eSSergey Zigachev NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
225*932d855eSSergey Zigachev return size;
226*932d855eSSergey Zigachev } else if (val > NUM_PAGES_TO_ALLOC) {
227*932d855eSSergey Zigachev pr_warn("Setting allocation size to larger than %lu is not recommended\n",
228*932d855eSSergey Zigachev NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
229*932d855eSSergey Zigachev }
230*932d855eSSergey Zigachev m->options.alloc_size = val;
231*932d855eSSergey Zigachev }
232*932d855eSSergey Zigachev
233*932d855eSSergey Zigachev return size;
234*932d855eSSergey Zigachev }
235*932d855eSSergey Zigachev
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)236*932d855eSSergey Zigachev static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
237*932d855eSSergey Zigachev char *buffer)
238*932d855eSSergey Zigachev {
239*932d855eSSergey Zigachev struct ttm_pool_manager *m =
240*932d855eSSergey Zigachev container_of(kobj, struct ttm_pool_manager, kobj);
241*932d855eSSergey Zigachev unsigned val = 0;
242*932d855eSSergey Zigachev
243*932d855eSSergey Zigachev if (attr == &ttm_page_pool_max)
244*932d855eSSergey Zigachev val = m->options.max_size;
245*932d855eSSergey Zigachev else if (attr == &ttm_page_pool_small)
246*932d855eSSergey Zigachev val = m->options.small;
247*932d855eSSergey Zigachev else if (attr == &ttm_page_pool_alloc_size)
248*932d855eSSergey Zigachev val = m->options.alloc_size;
249*932d855eSSergey Zigachev
250*932d855eSSergey Zigachev val = val * (PAGE_SIZE >> 10);
251*932d855eSSergey Zigachev
252*932d855eSSergey Zigachev return snprintf(buffer, PAGE_SIZE, "%u\n", val);
253*932d855eSSergey Zigachev }
254*932d855eSSergey Zigachev
255*932d855eSSergey Zigachev static const struct sysfs_ops ttm_pool_sysfs_ops = {
256*932d855eSSergey Zigachev .show = &ttm_pool_show,
257*932d855eSSergey Zigachev .store = &ttm_pool_store,
258*932d855eSSergey Zigachev };
259*932d855eSSergey Zigachev
260*932d855eSSergey Zigachev static struct kobj_type ttm_pool_kobj_type = {
261*932d855eSSergey Zigachev .release = &ttm_pool_kobj_release,
262*932d855eSSergey Zigachev .sysfs_ops = &ttm_pool_sysfs_ops,
263*932d855eSSergey Zigachev .default_attrs = ttm_pool_attrs,
264*932d855eSSergey Zigachev };
265*932d855eSSergey Zigachev
ttm_set_pages_caching(struct dma_pool * pool,struct page ** pages,unsigned cpages)266*932d855eSSergey Zigachev static int ttm_set_pages_caching(struct dma_pool *pool,
267*932d855eSSergey Zigachev struct page **pages, unsigned cpages)
268*932d855eSSergey Zigachev {
269*932d855eSSergey Zigachev int r = 0;
270*932d855eSSergey Zigachev /* Set page caching */
271*932d855eSSergey Zigachev if (pool->type & IS_UC) {
272*932d855eSSergey Zigachev r = ttm_set_pages_array_uc(pages, cpages);
273*932d855eSSergey Zigachev if (r)
274*932d855eSSergey Zigachev pr_err("%s: Failed to set %d pages to uc!\n",
275*932d855eSSergey Zigachev pool->dev_name, cpages);
276*932d855eSSergey Zigachev }
277*932d855eSSergey Zigachev if (pool->type & IS_WC) {
278*932d855eSSergey Zigachev r = ttm_set_pages_array_wc(pages, cpages);
279*932d855eSSergey Zigachev if (r)
280*932d855eSSergey Zigachev pr_err("%s: Failed to set %d pages to wc!\n",
281*932d855eSSergey Zigachev pool->dev_name, cpages);
282*932d855eSSergey Zigachev }
283*932d855eSSergey Zigachev return r;
284*932d855eSSergey Zigachev }
285*932d855eSSergey Zigachev
__ttm_dma_free_page(struct dma_pool * pool,struct dma_page * d_page)286*932d855eSSergey Zigachev static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
287*932d855eSSergey Zigachev {
288*932d855eSSergey Zigachev dma_addr_t dma = d_page->dma;
289*932d855eSSergey Zigachev d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
290*932d855eSSergey Zigachev dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
291*932d855eSSergey Zigachev
292*932d855eSSergey Zigachev kfree(d_page);
293*932d855eSSergey Zigachev d_page = NULL;
294*932d855eSSergey Zigachev }
__ttm_dma_alloc_page(struct dma_pool * pool)295*932d855eSSergey Zigachev static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
296*932d855eSSergey Zigachev {
297*932d855eSSergey Zigachev struct dma_page *d_page;
298*932d855eSSergey Zigachev unsigned long attrs = 0;
299*932d855eSSergey Zigachev void *vaddr;
300*932d855eSSergey Zigachev
301*932d855eSSergey Zigachev d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
302*932d855eSSergey Zigachev if (!d_page)
303*932d855eSSergey Zigachev return NULL;
304*932d855eSSergey Zigachev
305*932d855eSSergey Zigachev if (pool->type & IS_HUGE)
306*932d855eSSergey Zigachev attrs = DMA_ATTR_NO_WARN;
307*932d855eSSergey Zigachev
308*932d855eSSergey Zigachev vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
309*932d855eSSergey Zigachev pool->gfp_flags, attrs);
310*932d855eSSergey Zigachev if (vaddr) {
311*932d855eSSergey Zigachev if (is_vmalloc_addr(vaddr))
312*932d855eSSergey Zigachev d_page->p = vmalloc_to_page(vaddr);
313*932d855eSSergey Zigachev else
314*932d855eSSergey Zigachev d_page->p = virt_to_page(vaddr);
315*932d855eSSergey Zigachev d_page->vaddr = (unsigned long)vaddr;
316*932d855eSSergey Zigachev if (pool->type & IS_HUGE)
317*932d855eSSergey Zigachev d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
318*932d855eSSergey Zigachev } else {
319*932d855eSSergey Zigachev kfree(d_page);
320*932d855eSSergey Zigachev d_page = NULL;
321*932d855eSSergey Zigachev }
322*932d855eSSergey Zigachev return d_page;
323*932d855eSSergey Zigachev }
ttm_to_type(int flags,enum ttm_caching_state cstate)324*932d855eSSergey Zigachev static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
325*932d855eSSergey Zigachev {
326*932d855eSSergey Zigachev enum pool_type type = IS_UNDEFINED;
327*932d855eSSergey Zigachev
328*932d855eSSergey Zigachev if (flags & TTM_PAGE_FLAG_DMA32)
329*932d855eSSergey Zigachev type |= IS_DMA32;
330*932d855eSSergey Zigachev if (cstate == tt_cached)
331*932d855eSSergey Zigachev type |= IS_CACHED;
332*932d855eSSergey Zigachev else if (cstate == tt_uncached)
333*932d855eSSergey Zigachev type |= IS_UC;
334*932d855eSSergey Zigachev else
335*932d855eSSergey Zigachev type |= IS_WC;
336*932d855eSSergey Zigachev
337*932d855eSSergey Zigachev return type;
338*932d855eSSergey Zigachev }
339*932d855eSSergey Zigachev
ttm_pool_update_free_locked(struct dma_pool * pool,unsigned freed_pages)340*932d855eSSergey Zigachev static void ttm_pool_update_free_locked(struct dma_pool *pool,
341*932d855eSSergey Zigachev unsigned freed_pages)
342*932d855eSSergey Zigachev {
343*932d855eSSergey Zigachev pool->npages_free -= freed_pages;
344*932d855eSSergey Zigachev pool->nfrees += freed_pages;
345*932d855eSSergey Zigachev
346*932d855eSSergey Zigachev }
347*932d855eSSergey Zigachev
348*932d855eSSergey Zigachev /* set memory back to wb and free the pages. */
ttm_dma_page_put(struct dma_pool * pool,struct dma_page * d_page)349*932d855eSSergey Zigachev static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
350*932d855eSSergey Zigachev {
351*932d855eSSergey Zigachev struct page *page = d_page->p;
352*932d855eSSergey Zigachev unsigned num_pages;
353*932d855eSSergey Zigachev
354*932d855eSSergey Zigachev /* Don't set WB on WB page pool. */
355*932d855eSSergey Zigachev if (!(pool->type & IS_CACHED)) {
356*932d855eSSergey Zigachev num_pages = pool->size / PAGE_SIZE;
357*932d855eSSergey Zigachev if (ttm_set_pages_wb(page, num_pages))
358*932d855eSSergey Zigachev pr_err("%s: Failed to set %d pages to wb!\n",
359*932d855eSSergey Zigachev pool->dev_name, num_pages);
360*932d855eSSergey Zigachev }
361*932d855eSSergey Zigachev
362*932d855eSSergey Zigachev list_del(&d_page->page_list);
363*932d855eSSergey Zigachev __ttm_dma_free_page(pool, d_page);
364*932d855eSSergey Zigachev }
365*932d855eSSergey Zigachev
ttm_dma_pages_put(struct dma_pool * pool,struct list_head * d_pages,struct page * pages[],unsigned npages)366*932d855eSSergey Zigachev static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
367*932d855eSSergey Zigachev struct page *pages[], unsigned npages)
368*932d855eSSergey Zigachev {
369*932d855eSSergey Zigachev struct dma_page *d_page, *tmp;
370*932d855eSSergey Zigachev
371*932d855eSSergey Zigachev if (pool->type & IS_HUGE) {
372*932d855eSSergey Zigachev list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
373*932d855eSSergey Zigachev ttm_dma_page_put(pool, d_page);
374*932d855eSSergey Zigachev
375*932d855eSSergey Zigachev return;
376*932d855eSSergey Zigachev }
377*932d855eSSergey Zigachev
378*932d855eSSergey Zigachev /* Don't set WB on WB page pool. */
379*932d855eSSergey Zigachev if (npages && !(pool->type & IS_CACHED) &&
380*932d855eSSergey Zigachev ttm_set_pages_array_wb(pages, npages))
381*932d855eSSergey Zigachev pr_err("%s: Failed to set %d pages to wb!\n",
382*932d855eSSergey Zigachev pool->dev_name, npages);
383*932d855eSSergey Zigachev
384*932d855eSSergey Zigachev list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
385*932d855eSSergey Zigachev list_del(&d_page->page_list);
386*932d855eSSergey Zigachev __ttm_dma_free_page(pool, d_page);
387*932d855eSSergey Zigachev }
388*932d855eSSergey Zigachev }
389*932d855eSSergey Zigachev
390*932d855eSSergey Zigachev /*
391*932d855eSSergey Zigachev * Free pages from pool.
392*932d855eSSergey Zigachev *
393*932d855eSSergey Zigachev * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
394*932d855eSSergey Zigachev * number of pages in one go.
395*932d855eSSergey Zigachev *
396*932d855eSSergey Zigachev * @pool: to free the pages from
397*932d855eSSergey Zigachev * @nr_free: If set to true will free all pages in pool
398*932d855eSSergey Zigachev * @use_static: Safe to use static buffer
399*932d855eSSergey Zigachev **/
ttm_dma_page_pool_free(struct dma_pool * pool,unsigned nr_free,bool use_static)400*932d855eSSergey Zigachev static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
401*932d855eSSergey Zigachev bool use_static)
402*932d855eSSergey Zigachev {
403*932d855eSSergey Zigachev static struct page *static_buf[NUM_PAGES_TO_ALLOC];
404*932d855eSSergey Zigachev unsigned long irq_flags;
405*932d855eSSergey Zigachev struct dma_page *dma_p, *tmp;
406*932d855eSSergey Zigachev struct page **pages_to_free;
407*932d855eSSergey Zigachev struct list_head d_pages;
408*932d855eSSergey Zigachev unsigned freed_pages = 0,
409*932d855eSSergey Zigachev npages_to_free = nr_free;
410*932d855eSSergey Zigachev
411*932d855eSSergey Zigachev if (NUM_PAGES_TO_ALLOC < nr_free)
412*932d855eSSergey Zigachev npages_to_free = NUM_PAGES_TO_ALLOC;
413*932d855eSSergey Zigachev #if 0
414*932d855eSSergey Zigachev if (nr_free > 1) {
415*932d855eSSergey Zigachev pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
416*932d855eSSergey Zigachev pool->dev_name, pool->name, current->pid,
417*932d855eSSergey Zigachev npages_to_free, nr_free);
418*932d855eSSergey Zigachev }
419*932d855eSSergey Zigachev #endif
420*932d855eSSergey Zigachev if (use_static)
421*932d855eSSergey Zigachev pages_to_free = static_buf;
422*932d855eSSergey Zigachev else
423*932d855eSSergey Zigachev pages_to_free = kmalloc_array(npages_to_free,
424*932d855eSSergey Zigachev sizeof(struct page *),
425*932d855eSSergey Zigachev GFP_KERNEL);
426*932d855eSSergey Zigachev
427*932d855eSSergey Zigachev if (!pages_to_free) {
428*932d855eSSergey Zigachev pr_debug("%s: Failed to allocate memory for pool free operation\n",
429*932d855eSSergey Zigachev pool->dev_name);
430*932d855eSSergey Zigachev return 0;
431*932d855eSSergey Zigachev }
432*932d855eSSergey Zigachev INIT_LIST_HEAD(&d_pages);
433*932d855eSSergey Zigachev restart:
434*932d855eSSergey Zigachev spin_lock_irqsave(&pool->lock, irq_flags);
435*932d855eSSergey Zigachev
436*932d855eSSergey Zigachev /* We picking the oldest ones off the list */
437*932d855eSSergey Zigachev list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
438*932d855eSSergey Zigachev page_list) {
439*932d855eSSergey Zigachev if (freed_pages >= npages_to_free)
440*932d855eSSergey Zigachev break;
441*932d855eSSergey Zigachev
442*932d855eSSergey Zigachev /* Move the dma_page from one list to another. */
443*932d855eSSergey Zigachev list_move(&dma_p->page_list, &d_pages);
444*932d855eSSergey Zigachev
445*932d855eSSergey Zigachev pages_to_free[freed_pages++] = dma_p->p;
446*932d855eSSergey Zigachev /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
447*932d855eSSergey Zigachev if (freed_pages >= NUM_PAGES_TO_ALLOC) {
448*932d855eSSergey Zigachev
449*932d855eSSergey Zigachev ttm_pool_update_free_locked(pool, freed_pages);
450*932d855eSSergey Zigachev /**
451*932d855eSSergey Zigachev * Because changing page caching is costly
452*932d855eSSergey Zigachev * we unlock the pool to prevent stalling.
453*932d855eSSergey Zigachev */
454*932d855eSSergey Zigachev spin_unlock_irqrestore(&pool->lock, irq_flags);
455*932d855eSSergey Zigachev
456*932d855eSSergey Zigachev ttm_dma_pages_put(pool, &d_pages, pages_to_free,
457*932d855eSSergey Zigachev freed_pages);
458*932d855eSSergey Zigachev
459*932d855eSSergey Zigachev INIT_LIST_HEAD(&d_pages);
460*932d855eSSergey Zigachev
461*932d855eSSergey Zigachev if (likely(nr_free != FREE_ALL_PAGES))
462*932d855eSSergey Zigachev nr_free -= freed_pages;
463*932d855eSSergey Zigachev
464*932d855eSSergey Zigachev if (NUM_PAGES_TO_ALLOC >= nr_free)
465*932d855eSSergey Zigachev npages_to_free = nr_free;
466*932d855eSSergey Zigachev else
467*932d855eSSergey Zigachev npages_to_free = NUM_PAGES_TO_ALLOC;
468*932d855eSSergey Zigachev
469*932d855eSSergey Zigachev freed_pages = 0;
470*932d855eSSergey Zigachev
471*932d855eSSergey Zigachev /* free all so restart the processing */
472*932d855eSSergey Zigachev if (nr_free)
473*932d855eSSergey Zigachev goto restart;
474*932d855eSSergey Zigachev
475*932d855eSSergey Zigachev /* Not allowed to fall through or break because
476*932d855eSSergey Zigachev * following context is inside spinlock while we are
477*932d855eSSergey Zigachev * outside here.
478*932d855eSSergey Zigachev */
479*932d855eSSergey Zigachev goto out;
480*932d855eSSergey Zigachev
481*932d855eSSergey Zigachev }
482*932d855eSSergey Zigachev }
483*932d855eSSergey Zigachev
484*932d855eSSergey Zigachev /* remove range of pages from the pool */
485*932d855eSSergey Zigachev if (freed_pages) {
486*932d855eSSergey Zigachev ttm_pool_update_free_locked(pool, freed_pages);
487*932d855eSSergey Zigachev nr_free -= freed_pages;
488*932d855eSSergey Zigachev }
489*932d855eSSergey Zigachev
490*932d855eSSergey Zigachev spin_unlock_irqrestore(&pool->lock, irq_flags);
491*932d855eSSergey Zigachev
492*932d855eSSergey Zigachev if (freed_pages)
493*932d855eSSergey Zigachev ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
494*932d855eSSergey Zigachev out:
495*932d855eSSergey Zigachev if (pages_to_free != static_buf)
496*932d855eSSergey Zigachev kfree(pages_to_free);
497*932d855eSSergey Zigachev return nr_free;
498*932d855eSSergey Zigachev }
499*932d855eSSergey Zigachev
ttm_dma_free_pool(struct device * dev,enum pool_type type)500*932d855eSSergey Zigachev static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
501*932d855eSSergey Zigachev {
502*932d855eSSergey Zigachev struct device_pools *p;
503*932d855eSSergey Zigachev struct dma_pool *pool;
504*932d855eSSergey Zigachev
505*932d855eSSergey Zigachev if (!dev)
506*932d855eSSergey Zigachev return;
507*932d855eSSergey Zigachev
508*932d855eSSergey Zigachev mutex_lock(&_manager->lock);
509*932d855eSSergey Zigachev list_for_each_entry_reverse(p, &_manager->pools, pools) {
510*932d855eSSergey Zigachev if (p->dev != dev)
511*932d855eSSergey Zigachev continue;
512*932d855eSSergey Zigachev pool = p->pool;
513*932d855eSSergey Zigachev if (pool->type != type)
514*932d855eSSergey Zigachev continue;
515*932d855eSSergey Zigachev
516*932d855eSSergey Zigachev list_del(&p->pools);
517*932d855eSSergey Zigachev kfree(p);
518*932d855eSSergey Zigachev _manager->npools--;
519*932d855eSSergey Zigachev break;
520*932d855eSSergey Zigachev }
521*932d855eSSergey Zigachev list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
522*932d855eSSergey Zigachev if (pool->type != type)
523*932d855eSSergey Zigachev continue;
524*932d855eSSergey Zigachev /* Takes a spinlock.. */
525*932d855eSSergey Zigachev /* OK to use static buffer since global mutex is held. */
526*932d855eSSergey Zigachev ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
527*932d855eSSergey Zigachev WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
528*932d855eSSergey Zigachev /* This code path is called after _all_ references to the
529*932d855eSSergey Zigachev * struct device has been dropped - so nobody should be
530*932d855eSSergey Zigachev * touching it. In case somebody is trying to _add_ we are
531*932d855eSSergey Zigachev * guarded by the mutex. */
532*932d855eSSergey Zigachev list_del(&pool->pools);
533*932d855eSSergey Zigachev kfree(pool);
534*932d855eSSergey Zigachev break;
535*932d855eSSergey Zigachev }
536*932d855eSSergey Zigachev mutex_unlock(&_manager->lock);
537*932d855eSSergey Zigachev }
538*932d855eSSergey Zigachev
539*932d855eSSergey Zigachev /*
540*932d855eSSergey Zigachev * On free-ing of the 'struct device' this deconstructor is run.
541*932d855eSSergey Zigachev * Albeit the pool might have already been freed earlier.
542*932d855eSSergey Zigachev */
ttm_dma_pool_release(struct device * dev,void * res)543*932d855eSSergey Zigachev static void ttm_dma_pool_release(struct device *dev, void *res)
544*932d855eSSergey Zigachev {
545*932d855eSSergey Zigachev struct dma_pool *pool = *(struct dma_pool **)res;
546*932d855eSSergey Zigachev
547*932d855eSSergey Zigachev if (pool)
548*932d855eSSergey Zigachev ttm_dma_free_pool(dev, pool->type);
549*932d855eSSergey Zigachev }
550*932d855eSSergey Zigachev
ttm_dma_pool_match(struct device * dev,void * res,void * match_data)551*932d855eSSergey Zigachev static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
552*932d855eSSergey Zigachev {
553*932d855eSSergey Zigachev return *(struct dma_pool **)res == match_data;
554*932d855eSSergey Zigachev }
555*932d855eSSergey Zigachev
ttm_dma_pool_init(struct device * dev,gfp_t flags,enum pool_type type)556*932d855eSSergey Zigachev static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
557*932d855eSSergey Zigachev enum pool_type type)
558*932d855eSSergey Zigachev {
559*932d855eSSergey Zigachev const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
560*932d855eSSergey Zigachev enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
561*932d855eSSergey Zigachev struct device_pools *sec_pool = NULL;
562*932d855eSSergey Zigachev struct dma_pool *pool = NULL, **ptr;
563*932d855eSSergey Zigachev unsigned i;
564*932d855eSSergey Zigachev int ret = -ENODEV;
565*932d855eSSergey Zigachev char *p;
566*932d855eSSergey Zigachev
567*932d855eSSergey Zigachev if (!dev)
568*932d855eSSergey Zigachev return NULL;
569*932d855eSSergey Zigachev
570*932d855eSSergey Zigachev ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
571*932d855eSSergey Zigachev if (!ptr)
572*932d855eSSergey Zigachev return NULL;
573*932d855eSSergey Zigachev
574*932d855eSSergey Zigachev ret = -ENOMEM;
575*932d855eSSergey Zigachev
576*932d855eSSergey Zigachev pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
577*932d855eSSergey Zigachev dev_to_node(dev));
578*932d855eSSergey Zigachev if (!pool)
579*932d855eSSergey Zigachev goto err_mem;
580*932d855eSSergey Zigachev
581*932d855eSSergey Zigachev sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
582*932d855eSSergey Zigachev dev_to_node(dev));
583*932d855eSSergey Zigachev if (!sec_pool)
584*932d855eSSergey Zigachev goto err_mem;
585*932d855eSSergey Zigachev
586*932d855eSSergey Zigachev INIT_LIST_HEAD(&sec_pool->pools);
587*932d855eSSergey Zigachev sec_pool->dev = dev;
588*932d855eSSergey Zigachev sec_pool->pool = pool;
589*932d855eSSergey Zigachev
590*932d855eSSergey Zigachev INIT_LIST_HEAD(&pool->free_list);
591*932d855eSSergey Zigachev INIT_LIST_HEAD(&pool->pools);
592*932d855eSSergey Zigachev spin_lock_init(&pool->lock);
593*932d855eSSergey Zigachev pool->dev = dev;
594*932d855eSSergey Zigachev pool->npages_free = pool->npages_in_use = 0;
595*932d855eSSergey Zigachev pool->nfrees = 0;
596*932d855eSSergey Zigachev pool->gfp_flags = flags;
597*932d855eSSergey Zigachev if (type & IS_HUGE)
598*932d855eSSergey Zigachev #ifdef CONFIG_TRANSPARENT_HUGEPAGE
599*932d855eSSergey Zigachev pool->size = HPAGE_PMD_SIZE;
600*932d855eSSergey Zigachev #else
601*932d855eSSergey Zigachev BUG();
602*932d855eSSergey Zigachev #endif
603*932d855eSSergey Zigachev else
604*932d855eSSergey Zigachev pool->size = PAGE_SIZE;
605*932d855eSSergey Zigachev pool->type = type;
606*932d855eSSergey Zigachev pool->nrefills = 0;
607*932d855eSSergey Zigachev p = pool->name;
608*932d855eSSergey Zigachev for (i = 0; i < ARRAY_SIZE(t); i++) {
609*932d855eSSergey Zigachev if (type & t[i]) {
610*932d855eSSergey Zigachev p += snprintf(p, sizeof(pool->name) - (p - pool->name),
611*932d855eSSergey Zigachev "%s", n[i]);
612*932d855eSSergey Zigachev }
613*932d855eSSergey Zigachev }
614*932d855eSSergey Zigachev *p = 0;
615*932d855eSSergey Zigachev /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
616*932d855eSSergey Zigachev * - the kobj->name has already been deallocated.*/
617*932d855eSSergey Zigachev snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
618*932d855eSSergey Zigachev dev_driver_string(dev), dev_name(dev));
619*932d855eSSergey Zigachev mutex_lock(&_manager->lock);
620*932d855eSSergey Zigachev /* You can get the dma_pool from either the global: */
621*932d855eSSergey Zigachev list_add(&sec_pool->pools, &_manager->pools);
622*932d855eSSergey Zigachev _manager->npools++;
623*932d855eSSergey Zigachev /* or from 'struct device': */
624*932d855eSSergey Zigachev list_add(&pool->pools, &dev->dma_pools);
625*932d855eSSergey Zigachev mutex_unlock(&_manager->lock);
626*932d855eSSergey Zigachev
627*932d855eSSergey Zigachev *ptr = pool;
628*932d855eSSergey Zigachev devres_add(dev, ptr);
629*932d855eSSergey Zigachev
630*932d855eSSergey Zigachev return pool;
631*932d855eSSergey Zigachev err_mem:
632*932d855eSSergey Zigachev devres_free(ptr);
633*932d855eSSergey Zigachev kfree(sec_pool);
634*932d855eSSergey Zigachev kfree(pool);
635*932d855eSSergey Zigachev return ERR_PTR(ret);
636*932d855eSSergey Zigachev }
637*932d855eSSergey Zigachev
ttm_dma_find_pool(struct device * dev,enum pool_type type)638*932d855eSSergey Zigachev static struct dma_pool *ttm_dma_find_pool(struct device *dev,
639*932d855eSSergey Zigachev enum pool_type type)
640*932d855eSSergey Zigachev {
641*932d855eSSergey Zigachev struct dma_pool *pool, *tmp;
642*932d855eSSergey Zigachev
643*932d855eSSergey Zigachev if (type == IS_UNDEFINED)
644*932d855eSSergey Zigachev return NULL;
645*932d855eSSergey Zigachev
646*932d855eSSergey Zigachev /* NB: We iterate on the 'struct dev' which has no spinlock, but
647*932d855eSSergey Zigachev * it does have a kref which we have taken. The kref is taken during
648*932d855eSSergey Zigachev * graphic driver loading - in the drm_pci_init it calls either
649*932d855eSSergey Zigachev * pci_dev_get or pci_register_driver which both end up taking a kref
650*932d855eSSergey Zigachev * on 'struct device'.
651*932d855eSSergey Zigachev *
652*932d855eSSergey Zigachev * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
653*932d855eSSergey Zigachev * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
654*932d855eSSergey Zigachev * thing is at that point of time there are no pages associated with the
655*932d855eSSergey Zigachev * driver so this function will not be called.
656*932d855eSSergey Zigachev */
657*932d855eSSergey Zigachev list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
658*932d855eSSergey Zigachev if (pool->type == type)
659*932d855eSSergey Zigachev return pool;
660*932d855eSSergey Zigachev return NULL;
661*932d855eSSergey Zigachev }
662*932d855eSSergey Zigachev
663*932d855eSSergey Zigachev /*
664*932d855eSSergey Zigachev * Free pages the pages that failed to change the caching state. If there
665*932d855eSSergey Zigachev * are pages that have changed their caching state already put them to the
666*932d855eSSergey Zigachev * pool.
667*932d855eSSergey Zigachev */
ttm_dma_handle_caching_state_failure(struct dma_pool * pool,struct list_head * d_pages,struct page ** failed_pages,unsigned cpages)668*932d855eSSergey Zigachev static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
669*932d855eSSergey Zigachev struct list_head *d_pages,
670*932d855eSSergey Zigachev struct page **failed_pages,
671*932d855eSSergey Zigachev unsigned cpages)
672*932d855eSSergey Zigachev {
673*932d855eSSergey Zigachev struct dma_page *d_page, *tmp;
674*932d855eSSergey Zigachev struct page *p;
675*932d855eSSergey Zigachev unsigned i = 0;
676*932d855eSSergey Zigachev
677*932d855eSSergey Zigachev p = failed_pages[0];
678*932d855eSSergey Zigachev if (!p)
679*932d855eSSergey Zigachev return;
680*932d855eSSergey Zigachev /* Find the failed page. */
681*932d855eSSergey Zigachev list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
682*932d855eSSergey Zigachev if (d_page->p != p)
683*932d855eSSergey Zigachev continue;
684*932d855eSSergey Zigachev /* .. and then progress over the full list. */
685*932d855eSSergey Zigachev list_del(&d_page->page_list);
686*932d855eSSergey Zigachev __ttm_dma_free_page(pool, d_page);
687*932d855eSSergey Zigachev if (++i < cpages)
688*932d855eSSergey Zigachev p = failed_pages[i];
689*932d855eSSergey Zigachev else
690*932d855eSSergey Zigachev break;
691*932d855eSSergey Zigachev }
692*932d855eSSergey Zigachev
693*932d855eSSergey Zigachev }
694*932d855eSSergey Zigachev
695*932d855eSSergey Zigachev /*
696*932d855eSSergey Zigachev * Allocate 'count' pages, and put 'need' number of them on the
697*932d855eSSergey Zigachev * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
698*932d855eSSergey Zigachev * The full list of pages should also be on 'd_pages'.
699*932d855eSSergey Zigachev * We return zero for success, and negative numbers as errors.
700*932d855eSSergey Zigachev */
ttm_dma_pool_alloc_new_pages(struct dma_pool * pool,struct list_head * d_pages,unsigned count)701*932d855eSSergey Zigachev static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
702*932d855eSSergey Zigachev struct list_head *d_pages,
703*932d855eSSergey Zigachev unsigned count)
704*932d855eSSergey Zigachev {
705*932d855eSSergey Zigachev struct page **caching_array;
706*932d855eSSergey Zigachev struct dma_page *dma_p;
707*932d855eSSergey Zigachev struct page *p;
708*932d855eSSergey Zigachev int r = 0;
709*932d855eSSergey Zigachev unsigned i, j, npages, cpages;
710*932d855eSSergey Zigachev unsigned max_cpages = min(count,
711*932d855eSSergey Zigachev (unsigned)(PAGE_SIZE/sizeof(struct page *)));
712*932d855eSSergey Zigachev
713*932d855eSSergey Zigachev /* allocate array for page caching change */
714*932d855eSSergey Zigachev caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
715*932d855eSSergey Zigachev GFP_KERNEL);
716*932d855eSSergey Zigachev
717*932d855eSSergey Zigachev if (!caching_array) {
718*932d855eSSergey Zigachev pr_debug("%s: Unable to allocate table for new pages\n",
719*932d855eSSergey Zigachev pool->dev_name);
720*932d855eSSergey Zigachev return -ENOMEM;
721*932d855eSSergey Zigachev }
722*932d855eSSergey Zigachev
723*932d855eSSergey Zigachev if (count > 1)
724*932d855eSSergey Zigachev pr_debug("%s: (%s:%d) Getting %d pages\n",
725*932d855eSSergey Zigachev pool->dev_name, pool->name, current->pid, count);
726*932d855eSSergey Zigachev
727*932d855eSSergey Zigachev for (i = 0, cpages = 0; i < count; ++i) {
728*932d855eSSergey Zigachev dma_p = __ttm_dma_alloc_page(pool);
729*932d855eSSergey Zigachev if (!dma_p) {
730*932d855eSSergey Zigachev pr_debug("%s: Unable to get page %u\n",
731*932d855eSSergey Zigachev pool->dev_name, i);
732*932d855eSSergey Zigachev
733*932d855eSSergey Zigachev /* store already allocated pages in the pool after
734*932d855eSSergey Zigachev * setting the caching state */
735*932d855eSSergey Zigachev if (cpages) {
736*932d855eSSergey Zigachev r = ttm_set_pages_caching(pool, caching_array,
737*932d855eSSergey Zigachev cpages);
738*932d855eSSergey Zigachev if (r)
739*932d855eSSergey Zigachev ttm_dma_handle_caching_state_failure(
740*932d855eSSergey Zigachev pool, d_pages, caching_array,
741*932d855eSSergey Zigachev cpages);
742*932d855eSSergey Zigachev }
743*932d855eSSergey Zigachev r = -ENOMEM;
744*932d855eSSergey Zigachev goto out;
745*932d855eSSergey Zigachev }
746*932d855eSSergey Zigachev p = dma_p->p;
747*932d855eSSergey Zigachev list_add(&dma_p->page_list, d_pages);
748*932d855eSSergey Zigachev
749*932d855eSSergey Zigachev #ifdef CONFIG_HIGHMEM
750*932d855eSSergey Zigachev /* gfp flags of highmem page should never be dma32 so we
751*932d855eSSergey Zigachev * we should be fine in such case
752*932d855eSSergey Zigachev */
753*932d855eSSergey Zigachev if (PageHighMem(p))
754*932d855eSSergey Zigachev continue;
755*932d855eSSergey Zigachev #endif
756*932d855eSSergey Zigachev
757*932d855eSSergey Zigachev npages = pool->size / PAGE_SIZE;
758*932d855eSSergey Zigachev for (j = 0; j < npages; ++j) {
759*932d855eSSergey Zigachev caching_array[cpages++] = p + j;
760*932d855eSSergey Zigachev if (cpages == max_cpages) {
761*932d855eSSergey Zigachev /* Note: Cannot hold the spinlock */
762*932d855eSSergey Zigachev r = ttm_set_pages_caching(pool, caching_array,
763*932d855eSSergey Zigachev cpages);
764*932d855eSSergey Zigachev if (r) {
765*932d855eSSergey Zigachev ttm_dma_handle_caching_state_failure(
766*932d855eSSergey Zigachev pool, d_pages, caching_array,
767*932d855eSSergey Zigachev cpages);
768*932d855eSSergey Zigachev goto out;
769*932d855eSSergey Zigachev }
770*932d855eSSergey Zigachev cpages = 0;
771*932d855eSSergey Zigachev }
772*932d855eSSergey Zigachev }
773*932d855eSSergey Zigachev }
774*932d855eSSergey Zigachev
775*932d855eSSergey Zigachev if (cpages) {
776*932d855eSSergey Zigachev r = ttm_set_pages_caching(pool, caching_array, cpages);
777*932d855eSSergey Zigachev if (r)
778*932d855eSSergey Zigachev ttm_dma_handle_caching_state_failure(pool, d_pages,
779*932d855eSSergey Zigachev caching_array, cpages);
780*932d855eSSergey Zigachev }
781*932d855eSSergey Zigachev out:
782*932d855eSSergey Zigachev kfree(caching_array);
783*932d855eSSergey Zigachev return r;
784*932d855eSSergey Zigachev }
785*932d855eSSergey Zigachev
786*932d855eSSergey Zigachev /*
787*932d855eSSergey Zigachev * @return count of pages still required to fulfill the request.
788*932d855eSSergey Zigachev */
ttm_dma_page_pool_fill_locked(struct dma_pool * pool,unsigned long * irq_flags)789*932d855eSSergey Zigachev static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
790*932d855eSSergey Zigachev unsigned long *irq_flags)
791*932d855eSSergey Zigachev {
792*932d855eSSergey Zigachev unsigned count = _manager->options.small;
793*932d855eSSergey Zigachev int r = pool->npages_free;
794*932d855eSSergey Zigachev
795*932d855eSSergey Zigachev if (count > pool->npages_free) {
796*932d855eSSergey Zigachev struct list_head d_pages;
797*932d855eSSergey Zigachev
798*932d855eSSergey Zigachev INIT_LIST_HEAD(&d_pages);
799*932d855eSSergey Zigachev
800*932d855eSSergey Zigachev spin_unlock_irqrestore(&pool->lock, *irq_flags);
801*932d855eSSergey Zigachev
802*932d855eSSergey Zigachev /* Returns how many more are neccessary to fulfill the
803*932d855eSSergey Zigachev * request. */
804*932d855eSSergey Zigachev r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
805*932d855eSSergey Zigachev
806*932d855eSSergey Zigachev spin_lock_irqsave(&pool->lock, *irq_flags);
807*932d855eSSergey Zigachev if (!r) {
808*932d855eSSergey Zigachev /* Add the fresh to the end.. */
809*932d855eSSergey Zigachev list_splice(&d_pages, &pool->free_list);
810*932d855eSSergey Zigachev ++pool->nrefills;
811*932d855eSSergey Zigachev pool->npages_free += count;
812*932d855eSSergey Zigachev r = count;
813*932d855eSSergey Zigachev } else {
814*932d855eSSergey Zigachev struct dma_page *d_page;
815*932d855eSSergey Zigachev unsigned cpages = 0;
816*932d855eSSergey Zigachev
817*932d855eSSergey Zigachev pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
818*932d855eSSergey Zigachev pool->dev_name, pool->name, r);
819*932d855eSSergey Zigachev
820*932d855eSSergey Zigachev list_for_each_entry(d_page, &d_pages, page_list) {
821*932d855eSSergey Zigachev cpages++;
822*932d855eSSergey Zigachev }
823*932d855eSSergey Zigachev list_splice_tail(&d_pages, &pool->free_list);
824*932d855eSSergey Zigachev pool->npages_free += cpages;
825*932d855eSSergey Zigachev r = cpages;
826*932d855eSSergey Zigachev }
827*932d855eSSergey Zigachev }
828*932d855eSSergey Zigachev return r;
829*932d855eSSergey Zigachev }
830*932d855eSSergey Zigachev
831*932d855eSSergey Zigachev /*
832*932d855eSSergey Zigachev * The populate list is actually a stack (not that is matters as TTM
833*932d855eSSergey Zigachev * allocates one page at a time.
834*932d855eSSergey Zigachev * return dma_page pointer if success, otherwise NULL.
835*932d855eSSergey Zigachev */
ttm_dma_pool_get_pages(struct dma_pool * pool,struct ttm_dma_tt * ttm_dma,unsigned index)836*932d855eSSergey Zigachev static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
837*932d855eSSergey Zigachev struct ttm_dma_tt *ttm_dma,
838*932d855eSSergey Zigachev unsigned index)
839*932d855eSSergey Zigachev {
840*932d855eSSergey Zigachev struct dma_page *d_page = NULL;
841*932d855eSSergey Zigachev struct ttm_tt *ttm = &ttm_dma->ttm;
842*932d855eSSergey Zigachev unsigned long irq_flags;
843*932d855eSSergey Zigachev int count;
844*932d855eSSergey Zigachev
845*932d855eSSergey Zigachev spin_lock_irqsave(&pool->lock, irq_flags);
846*932d855eSSergey Zigachev count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
847*932d855eSSergey Zigachev if (count) {
848*932d855eSSergey Zigachev d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
849*932d855eSSergey Zigachev ttm->pages[index] = d_page->p;
850*932d855eSSergey Zigachev ttm_dma->dma_address[index] = d_page->dma;
851*932d855eSSergey Zigachev list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
852*932d855eSSergey Zigachev pool->npages_in_use += 1;
853*932d855eSSergey Zigachev pool->npages_free -= 1;
854*932d855eSSergey Zigachev }
855*932d855eSSergey Zigachev spin_unlock_irqrestore(&pool->lock, irq_flags);
856*932d855eSSergey Zigachev return d_page;
857*932d855eSSergey Zigachev }
858*932d855eSSergey Zigachev
ttm_dma_pool_gfp_flags(struct ttm_dma_tt * ttm_dma,bool huge)859*932d855eSSergey Zigachev static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
860*932d855eSSergey Zigachev {
861*932d855eSSergey Zigachev struct ttm_tt *ttm = &ttm_dma->ttm;
862*932d855eSSergey Zigachev gfp_t gfp_flags;
863*932d855eSSergey Zigachev
864*932d855eSSergey Zigachev if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
865*932d855eSSergey Zigachev gfp_flags = GFP_USER | GFP_DMA32;
866*932d855eSSergey Zigachev else
867*932d855eSSergey Zigachev gfp_flags = GFP_HIGHUSER;
868*932d855eSSergey Zigachev if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
869*932d855eSSergey Zigachev gfp_flags |= __GFP_ZERO;
870*932d855eSSergey Zigachev
871*932d855eSSergey Zigachev if (huge) {
872*932d855eSSergey Zigachev gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
873*932d855eSSergey Zigachev __GFP_KSWAPD_RECLAIM;
874*932d855eSSergey Zigachev gfp_flags &= ~__GFP_MOVABLE;
875*932d855eSSergey Zigachev gfp_flags &= ~__GFP_COMP;
876*932d855eSSergey Zigachev }
877*932d855eSSergey Zigachev
878*932d855eSSergey Zigachev if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
879*932d855eSSergey Zigachev gfp_flags |= __GFP_RETRY_MAYFAIL;
880*932d855eSSergey Zigachev
881*932d855eSSergey Zigachev return gfp_flags;
882*932d855eSSergey Zigachev }
883*932d855eSSergey Zigachev
884*932d855eSSergey Zigachev /*
885*932d855eSSergey Zigachev * On success pages list will hold count number of correctly
886*932d855eSSergey Zigachev * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
887*932d855eSSergey Zigachev */
ttm_dma_populate(struct ttm_dma_tt * ttm_dma,struct device * dev,struct ttm_operation_ctx * ctx)888*932d855eSSergey Zigachev int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
889*932d855eSSergey Zigachev struct ttm_operation_ctx *ctx)
890*932d855eSSergey Zigachev {
891*932d855eSSergey Zigachev struct ttm_tt *ttm = &ttm_dma->ttm;
892*932d855eSSergey Zigachev struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
893*932d855eSSergey Zigachev unsigned long num_pages = ttm->num_pages;
894*932d855eSSergey Zigachev struct dma_pool *pool;
895*932d855eSSergey Zigachev struct dma_page *d_page;
896*932d855eSSergey Zigachev enum pool_type type;
897*932d855eSSergey Zigachev unsigned i;
898*932d855eSSergey Zigachev int ret;
899*932d855eSSergey Zigachev
900*932d855eSSergey Zigachev if (ttm->state != tt_unpopulated)
901*932d855eSSergey Zigachev return 0;
902*932d855eSSergey Zigachev
903*932d855eSSergey Zigachev if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
904*932d855eSSergey Zigachev return -ENOMEM;
905*932d855eSSergey Zigachev
906*932d855eSSergey Zigachev INIT_LIST_HEAD(&ttm_dma->pages_list);
907*932d855eSSergey Zigachev i = 0;
908*932d855eSSergey Zigachev
909*932d855eSSergey Zigachev type = ttm_to_type(ttm->page_flags, ttm->caching_state);
910*932d855eSSergey Zigachev
911*932d855eSSergey Zigachev #ifdef CONFIG_TRANSPARENT_HUGEPAGE
912*932d855eSSergey Zigachev if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
913*932d855eSSergey Zigachev goto skip_huge;
914*932d855eSSergey Zigachev
915*932d855eSSergey Zigachev pool = ttm_dma_find_pool(dev, type | IS_HUGE);
916*932d855eSSergey Zigachev if (!pool) {
917*932d855eSSergey Zigachev gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
918*932d855eSSergey Zigachev
919*932d855eSSergey Zigachev pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
920*932d855eSSergey Zigachev if (IS_ERR_OR_NULL(pool))
921*932d855eSSergey Zigachev goto skip_huge;
922*932d855eSSergey Zigachev }
923*932d855eSSergey Zigachev
924*932d855eSSergey Zigachev while (num_pages >= HPAGE_PMD_NR) {
925*932d855eSSergey Zigachev unsigned j;
926*932d855eSSergey Zigachev
927*932d855eSSergey Zigachev d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
928*932d855eSSergey Zigachev if (!d_page)
929*932d855eSSergey Zigachev break;
930*932d855eSSergey Zigachev
931*932d855eSSergey Zigachev ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
932*932d855eSSergey Zigachev pool->size, ctx);
933*932d855eSSergey Zigachev if (unlikely(ret != 0)) {
934*932d855eSSergey Zigachev ttm_dma_unpopulate(ttm_dma, dev);
935*932d855eSSergey Zigachev return -ENOMEM;
936*932d855eSSergey Zigachev }
937*932d855eSSergey Zigachev
938*932d855eSSergey Zigachev d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
939*932d855eSSergey Zigachev for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
940*932d855eSSergey Zigachev ttm->pages[j] = ttm->pages[j - 1] + 1;
941*932d855eSSergey Zigachev ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
942*932d855eSSergey Zigachev PAGE_SIZE;
943*932d855eSSergey Zigachev }
944*932d855eSSergey Zigachev
945*932d855eSSergey Zigachev i += HPAGE_PMD_NR;
946*932d855eSSergey Zigachev num_pages -= HPAGE_PMD_NR;
947*932d855eSSergey Zigachev }
948*932d855eSSergey Zigachev
949*932d855eSSergey Zigachev skip_huge:
950*932d855eSSergey Zigachev #endif
951*932d855eSSergey Zigachev
952*932d855eSSergey Zigachev pool = ttm_dma_find_pool(dev, type);
953*932d855eSSergey Zigachev if (!pool) {
954*932d855eSSergey Zigachev gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
955*932d855eSSergey Zigachev
956*932d855eSSergey Zigachev pool = ttm_dma_pool_init(dev, gfp_flags, type);
957*932d855eSSergey Zigachev if (IS_ERR_OR_NULL(pool))
958*932d855eSSergey Zigachev return -ENOMEM;
959*932d855eSSergey Zigachev }
960*932d855eSSergey Zigachev
961*932d855eSSergey Zigachev while (num_pages) {
962*932d855eSSergey Zigachev d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
963*932d855eSSergey Zigachev if (!d_page) {
964*932d855eSSergey Zigachev ttm_dma_unpopulate(ttm_dma, dev);
965*932d855eSSergey Zigachev return -ENOMEM;
966*932d855eSSergey Zigachev }
967*932d855eSSergey Zigachev
968*932d855eSSergey Zigachev ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
969*932d855eSSergey Zigachev pool->size, ctx);
970*932d855eSSergey Zigachev if (unlikely(ret != 0)) {
971*932d855eSSergey Zigachev ttm_dma_unpopulate(ttm_dma, dev);
972*932d855eSSergey Zigachev return -ENOMEM;
973*932d855eSSergey Zigachev }
974*932d855eSSergey Zigachev
975*932d855eSSergey Zigachev d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
976*932d855eSSergey Zigachev ++i;
977*932d855eSSergey Zigachev --num_pages;
978*932d855eSSergey Zigachev }
979*932d855eSSergey Zigachev
980*932d855eSSergey Zigachev if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
981*932d855eSSergey Zigachev ret = ttm_tt_swapin(ttm);
982*932d855eSSergey Zigachev if (unlikely(ret != 0)) {
983*932d855eSSergey Zigachev ttm_dma_unpopulate(ttm_dma, dev);
984*932d855eSSergey Zigachev return ret;
985*932d855eSSergey Zigachev }
986*932d855eSSergey Zigachev }
987*932d855eSSergey Zigachev
988*932d855eSSergey Zigachev ttm->state = tt_unbound;
989*932d855eSSergey Zigachev return 0;
990*932d855eSSergey Zigachev }
991*932d855eSSergey Zigachev EXPORT_SYMBOL_GPL(ttm_dma_populate);
992*932d855eSSergey Zigachev
993*932d855eSSergey Zigachev /* Put all pages in pages list to correct pool to wait for reuse */
ttm_dma_unpopulate(struct ttm_dma_tt * ttm_dma,struct device * dev)994*932d855eSSergey Zigachev void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
995*932d855eSSergey Zigachev {
996*932d855eSSergey Zigachev struct ttm_tt *ttm = &ttm_dma->ttm;
997*932d855eSSergey Zigachev struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
998*932d855eSSergey Zigachev struct dma_pool *pool;
999*932d855eSSergey Zigachev struct dma_page *d_page, *next;
1000*932d855eSSergey Zigachev enum pool_type type;
1001*932d855eSSergey Zigachev bool is_cached = false;
1002*932d855eSSergey Zigachev unsigned count, i, npages = 0;
1003*932d855eSSergey Zigachev unsigned long irq_flags;
1004*932d855eSSergey Zigachev
1005*932d855eSSergey Zigachev type = ttm_to_type(ttm->page_flags, ttm->caching_state);
1006*932d855eSSergey Zigachev
1007*932d855eSSergey Zigachev #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1008*932d855eSSergey Zigachev pool = ttm_dma_find_pool(dev, type | IS_HUGE);
1009*932d855eSSergey Zigachev if (pool) {
1010*932d855eSSergey Zigachev count = 0;
1011*932d855eSSergey Zigachev list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1012*932d855eSSergey Zigachev page_list) {
1013*932d855eSSergey Zigachev if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
1014*932d855eSSergey Zigachev continue;
1015*932d855eSSergey Zigachev
1016*932d855eSSergey Zigachev count++;
1017*932d855eSSergey Zigachev if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1018*932d855eSSergey Zigachev ttm_mem_global_free_page(mem_glob, d_page->p,
1019*932d855eSSergey Zigachev pool->size);
1020*932d855eSSergey Zigachev d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1021*932d855eSSergey Zigachev }
1022*932d855eSSergey Zigachev ttm_dma_page_put(pool, d_page);
1023*932d855eSSergey Zigachev }
1024*932d855eSSergey Zigachev
1025*932d855eSSergey Zigachev spin_lock_irqsave(&pool->lock, irq_flags);
1026*932d855eSSergey Zigachev pool->npages_in_use -= count;
1027*932d855eSSergey Zigachev pool->nfrees += count;
1028*932d855eSSergey Zigachev spin_unlock_irqrestore(&pool->lock, irq_flags);
1029*932d855eSSergey Zigachev }
1030*932d855eSSergey Zigachev #endif
1031*932d855eSSergey Zigachev
1032*932d855eSSergey Zigachev pool = ttm_dma_find_pool(dev, type);
1033*932d855eSSergey Zigachev if (!pool)
1034*932d855eSSergey Zigachev return;
1035*932d855eSSergey Zigachev
1036*932d855eSSergey Zigachev is_cached = (ttm_dma_find_pool(pool->dev,
1037*932d855eSSergey Zigachev ttm_to_type(ttm->page_flags, tt_cached)) == pool);
1038*932d855eSSergey Zigachev
1039*932d855eSSergey Zigachev /* make sure pages array match list and count number of pages */
1040*932d855eSSergey Zigachev count = 0;
1041*932d855eSSergey Zigachev list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1042*932d855eSSergey Zigachev page_list) {
1043*932d855eSSergey Zigachev ttm->pages[count] = d_page->p;
1044*932d855eSSergey Zigachev count++;
1045*932d855eSSergey Zigachev
1046*932d855eSSergey Zigachev if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1047*932d855eSSergey Zigachev ttm_mem_global_free_page(mem_glob, d_page->p,
1048*932d855eSSergey Zigachev pool->size);
1049*932d855eSSergey Zigachev d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1050*932d855eSSergey Zigachev }
1051*932d855eSSergey Zigachev
1052*932d855eSSergey Zigachev if (is_cached)
1053*932d855eSSergey Zigachev ttm_dma_page_put(pool, d_page);
1054*932d855eSSergey Zigachev }
1055*932d855eSSergey Zigachev
1056*932d855eSSergey Zigachev spin_lock_irqsave(&pool->lock, irq_flags);
1057*932d855eSSergey Zigachev pool->npages_in_use -= count;
1058*932d855eSSergey Zigachev if (is_cached) {
1059*932d855eSSergey Zigachev pool->nfrees += count;
1060*932d855eSSergey Zigachev } else {
1061*932d855eSSergey Zigachev pool->npages_free += count;
1062*932d855eSSergey Zigachev list_splice(&ttm_dma->pages_list, &pool->free_list);
1063*932d855eSSergey Zigachev /*
1064*932d855eSSergey Zigachev * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
1065*932d855eSSergey Zigachev * to free in order to minimize calls to set_memory_wb().
1066*932d855eSSergey Zigachev */
1067*932d855eSSergey Zigachev if (pool->npages_free >= (_manager->options.max_size +
1068*932d855eSSergey Zigachev NUM_PAGES_TO_ALLOC))
1069*932d855eSSergey Zigachev npages = pool->npages_free - _manager->options.max_size;
1070*932d855eSSergey Zigachev }
1071*932d855eSSergey Zigachev spin_unlock_irqrestore(&pool->lock, irq_flags);
1072*932d855eSSergey Zigachev
1073*932d855eSSergey Zigachev INIT_LIST_HEAD(&ttm_dma->pages_list);
1074*932d855eSSergey Zigachev for (i = 0; i < ttm->num_pages; i++) {
1075*932d855eSSergey Zigachev ttm->pages[i] = NULL;
1076*932d855eSSergey Zigachev ttm_dma->dma_address[i] = 0;
1077*932d855eSSergey Zigachev }
1078*932d855eSSergey Zigachev
1079*932d855eSSergey Zigachev /* shrink pool if necessary (only on !is_cached pools)*/
1080*932d855eSSergey Zigachev if (npages)
1081*932d855eSSergey Zigachev ttm_dma_page_pool_free(pool, npages, false);
1082*932d855eSSergey Zigachev ttm->state = tt_unpopulated;
1083*932d855eSSergey Zigachev }
1084*932d855eSSergey Zigachev EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1085*932d855eSSergey Zigachev
1086*932d855eSSergey Zigachev /**
1087*932d855eSSergey Zigachev * Callback for mm to request pool to reduce number of page held.
1088*932d855eSSergey Zigachev *
1089*932d855eSSergey Zigachev * XXX: (dchinner) Deadlock warning!
1090*932d855eSSergey Zigachev *
1091*932d855eSSergey Zigachev * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1092*932d855eSSergey Zigachev * shrinkers
1093*932d855eSSergey Zigachev */
1094*932d855eSSergey Zigachev static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1095*932d855eSSergey Zigachev ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1096*932d855eSSergey Zigachev {
1097*932d855eSSergey Zigachev static unsigned start_pool;
1098*932d855eSSergey Zigachev unsigned idx = 0;
1099*932d855eSSergey Zigachev unsigned pool_offset;
1100*932d855eSSergey Zigachev unsigned shrink_pages = sc->nr_to_scan;
1101*932d855eSSergey Zigachev struct device_pools *p;
1102*932d855eSSergey Zigachev unsigned long freed = 0;
1103*932d855eSSergey Zigachev
1104*932d855eSSergey Zigachev if (list_empty(&_manager->pools))
1105*932d855eSSergey Zigachev return SHRINK_STOP;
1106*932d855eSSergey Zigachev
1107*932d855eSSergey Zigachev if (!mutex_trylock(&_manager->lock))
1108*932d855eSSergey Zigachev return SHRINK_STOP;
1109*932d855eSSergey Zigachev if (!_manager->npools)
1110*932d855eSSergey Zigachev goto out;
1111*932d855eSSergey Zigachev pool_offset = ++start_pool % _manager->npools;
1112*932d855eSSergey Zigachev list_for_each_entry(p, &_manager->pools, pools) {
1113*932d855eSSergey Zigachev unsigned nr_free;
1114*932d855eSSergey Zigachev
1115*932d855eSSergey Zigachev if (!p->dev)
1116*932d855eSSergey Zigachev continue;
1117*932d855eSSergey Zigachev if (shrink_pages == 0)
1118*932d855eSSergey Zigachev break;
1119*932d855eSSergey Zigachev /* Do it in round-robin fashion. */
1120*932d855eSSergey Zigachev if (++idx < pool_offset)
1121*932d855eSSergey Zigachev continue;
1122*932d855eSSergey Zigachev nr_free = shrink_pages;
1123*932d855eSSergey Zigachev /* OK to use static buffer since global mutex is held. */
1124*932d855eSSergey Zigachev shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1125*932d855eSSergey Zigachev freed += nr_free - shrink_pages;
1126*932d855eSSergey Zigachev
1127*932d855eSSergey Zigachev pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1128*932d855eSSergey Zigachev p->pool->dev_name, p->pool->name, current->pid,
1129*932d855eSSergey Zigachev nr_free, shrink_pages);
1130*932d855eSSergey Zigachev }
1131*932d855eSSergey Zigachev out:
1132*932d855eSSergey Zigachev mutex_unlock(&_manager->lock);
1133*932d855eSSergey Zigachev return freed;
1134*932d855eSSergey Zigachev }
1135*932d855eSSergey Zigachev
1136*932d855eSSergey Zigachev static unsigned long
ttm_dma_pool_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1137*932d855eSSergey Zigachev ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1138*932d855eSSergey Zigachev {
1139*932d855eSSergey Zigachev struct device_pools *p;
1140*932d855eSSergey Zigachev unsigned long count = 0;
1141*932d855eSSergey Zigachev
1142*932d855eSSergey Zigachev if (!mutex_trylock(&_manager->lock))
1143*932d855eSSergey Zigachev return 0;
1144*932d855eSSergey Zigachev list_for_each_entry(p, &_manager->pools, pools)
1145*932d855eSSergey Zigachev count += p->pool->npages_free;
1146*932d855eSSergey Zigachev mutex_unlock(&_manager->lock);
1147*932d855eSSergey Zigachev return count;
1148*932d855eSSergey Zigachev }
1149*932d855eSSergey Zigachev
ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager * manager)1150*932d855eSSergey Zigachev static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1151*932d855eSSergey Zigachev {
1152*932d855eSSergey Zigachev manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1153*932d855eSSergey Zigachev manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1154*932d855eSSergey Zigachev manager->mm_shrink.seeks = 1;
1155*932d855eSSergey Zigachev return register_shrinker(&manager->mm_shrink);
1156*932d855eSSergey Zigachev }
1157*932d855eSSergey Zigachev
ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager * manager)1158*932d855eSSergey Zigachev static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1159*932d855eSSergey Zigachev {
1160*932d855eSSergey Zigachev unregister_shrinker(&manager->mm_shrink);
1161*932d855eSSergey Zigachev }
1162*932d855eSSergey Zigachev
ttm_dma_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)1163*932d855eSSergey Zigachev int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1164*932d855eSSergey Zigachev {
1165*932d855eSSergey Zigachev int ret;
1166*932d855eSSergey Zigachev
1167*932d855eSSergey Zigachev WARN_ON(_manager);
1168*932d855eSSergey Zigachev
1169*932d855eSSergey Zigachev pr_info("Initializing DMA pool allocator\n");
1170*932d855eSSergey Zigachev
1171*932d855eSSergey Zigachev _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1172*932d855eSSergey Zigachev if (!_manager)
1173*932d855eSSergey Zigachev return -ENOMEM;
1174*932d855eSSergey Zigachev
1175*932d855eSSergey Zigachev mutex_init(&_manager->lock);
1176*932d855eSSergey Zigachev INIT_LIST_HEAD(&_manager->pools);
1177*932d855eSSergey Zigachev
1178*932d855eSSergey Zigachev _manager->options.max_size = max_pages;
1179*932d855eSSergey Zigachev _manager->options.small = SMALL_ALLOCATION;
1180*932d855eSSergey Zigachev _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1181*932d855eSSergey Zigachev
1182*932d855eSSergey Zigachev /* This takes care of auto-freeing the _manager */
1183*932d855eSSergey Zigachev ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1184*932d855eSSergey Zigachev &glob->kobj, "dma_pool");
1185*932d855eSSergey Zigachev if (unlikely(ret != 0))
1186*932d855eSSergey Zigachev goto error;
1187*932d855eSSergey Zigachev
1188*932d855eSSergey Zigachev ret = ttm_dma_pool_mm_shrink_init(_manager);
1189*932d855eSSergey Zigachev if (unlikely(ret != 0))
1190*932d855eSSergey Zigachev goto error;
1191*932d855eSSergey Zigachev return 0;
1192*932d855eSSergey Zigachev
1193*932d855eSSergey Zigachev error:
1194*932d855eSSergey Zigachev kobject_put(&_manager->kobj);
1195*932d855eSSergey Zigachev _manager = NULL;
1196*932d855eSSergey Zigachev return ret;
1197*932d855eSSergey Zigachev }
1198*932d855eSSergey Zigachev
ttm_dma_page_alloc_fini(void)1199*932d855eSSergey Zigachev void ttm_dma_page_alloc_fini(void)
1200*932d855eSSergey Zigachev {
1201*932d855eSSergey Zigachev struct device_pools *p, *t;
1202*932d855eSSergey Zigachev
1203*932d855eSSergey Zigachev pr_info("Finalizing DMA pool allocator\n");
1204*932d855eSSergey Zigachev ttm_dma_pool_mm_shrink_fini(_manager);
1205*932d855eSSergey Zigachev
1206*932d855eSSergey Zigachev list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1207*932d855eSSergey Zigachev dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1208*932d855eSSergey Zigachev current->pid);
1209*932d855eSSergey Zigachev WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1210*932d855eSSergey Zigachev ttm_dma_pool_match, p->pool));
1211*932d855eSSergey Zigachev ttm_dma_free_pool(p->dev, p->pool->type);
1212*932d855eSSergey Zigachev }
1213*932d855eSSergey Zigachev kobject_put(&_manager->kobj);
1214*932d855eSSergey Zigachev _manager = NULL;
1215*932d855eSSergey Zigachev }
1216*932d855eSSergey Zigachev
ttm_dma_page_alloc_debugfs(struct seq_file * m,void * data)1217*932d855eSSergey Zigachev int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1218*932d855eSSergey Zigachev {
1219*932d855eSSergey Zigachev struct device_pools *p;
1220*932d855eSSergey Zigachev struct dma_pool *pool = NULL;
1221*932d855eSSergey Zigachev
1222*932d855eSSergey Zigachev if (!_manager) {
1223*932d855eSSergey Zigachev seq_printf(m, "No pool allocator running.\n");
1224*932d855eSSergey Zigachev return 0;
1225*932d855eSSergey Zigachev }
1226*932d855eSSergey Zigachev seq_printf(m, " pool refills pages freed inuse available name\n");
1227*932d855eSSergey Zigachev mutex_lock(&_manager->lock);
1228*932d855eSSergey Zigachev list_for_each_entry(p, &_manager->pools, pools) {
1229*932d855eSSergey Zigachev struct device *dev = p->dev;
1230*932d855eSSergey Zigachev if (!dev)
1231*932d855eSSergey Zigachev continue;
1232*932d855eSSergey Zigachev pool = p->pool;
1233*932d855eSSergey Zigachev seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1234*932d855eSSergey Zigachev pool->name, pool->nrefills,
1235*932d855eSSergey Zigachev pool->nfrees, pool->npages_in_use,
1236*932d855eSSergey Zigachev pool->npages_free,
1237*932d855eSSergey Zigachev pool->dev_name);
1238*932d855eSSergey Zigachev }
1239*932d855eSSergey Zigachev mutex_unlock(&_manager->lock);
1240*932d855eSSergey Zigachev return 0;
1241*932d855eSSergey Zigachev }
1242*932d855eSSergey Zigachev EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1243*932d855eSSergey Zigachev
1244*932d855eSSergey Zigachev #endif
1245