xref: /openbsd-src/sys/dev/pci/drm/ttm/tests/ttm_pool_test.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1*f005ef32Sjsg // SPDX-License-Identifier: GPL-2.0 AND MIT
2*f005ef32Sjsg /*
3*f005ef32Sjsg  * Copyright © 2023 Intel Corporation
4*f005ef32Sjsg  */
5*f005ef32Sjsg #include <linux/mm.h>
6*f005ef32Sjsg 
7*f005ef32Sjsg #include <drm/ttm/ttm_tt.h>
8*f005ef32Sjsg #include <drm/ttm/ttm_pool.h>
9*f005ef32Sjsg 
10*f005ef32Sjsg #include "ttm_kunit_helpers.h"
11*f005ef32Sjsg 
12*f005ef32Sjsg struct ttm_pool_test_case {
13*f005ef32Sjsg 	const char *description;
14*f005ef32Sjsg 	unsigned int order;
15*f005ef32Sjsg 	bool use_dma_alloc;
16*f005ef32Sjsg };
17*f005ef32Sjsg 
18*f005ef32Sjsg struct ttm_pool_test_priv {
19*f005ef32Sjsg 	struct ttm_test_devices *devs;
20*f005ef32Sjsg 
21*f005ef32Sjsg 	/* Used to create mock ttm_tts */
22*f005ef32Sjsg 	struct ttm_buffer_object *mock_bo;
23*f005ef32Sjsg };
24*f005ef32Sjsg 
25*f005ef32Sjsg static struct ttm_operation_ctx simple_ctx = {
26*f005ef32Sjsg 	.interruptible = true,
27*f005ef32Sjsg 	.no_wait_gpu = false,
28*f005ef32Sjsg };
29*f005ef32Sjsg 
ttm_pool_test_init(struct kunit * test)30*f005ef32Sjsg static int ttm_pool_test_init(struct kunit *test)
31*f005ef32Sjsg {
32*f005ef32Sjsg 	struct ttm_pool_test_priv *priv;
33*f005ef32Sjsg 
34*f005ef32Sjsg 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
35*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, priv);
36*f005ef32Sjsg 
37*f005ef32Sjsg 	priv->devs = ttm_test_devices_basic(test);
38*f005ef32Sjsg 	test->priv = priv;
39*f005ef32Sjsg 
40*f005ef32Sjsg 	return 0;
41*f005ef32Sjsg }
42*f005ef32Sjsg 
ttm_pool_test_fini(struct kunit * test)43*f005ef32Sjsg static void ttm_pool_test_fini(struct kunit *test)
44*f005ef32Sjsg {
45*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
46*f005ef32Sjsg 
47*f005ef32Sjsg 	ttm_test_devices_put(test, priv->devs);
48*f005ef32Sjsg }
49*f005ef32Sjsg 
ttm_tt_kunit_init(struct kunit * test,uint32_t page_flags,enum ttm_caching caching,size_t size)50*f005ef32Sjsg static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
51*f005ef32Sjsg 					uint32_t page_flags,
52*f005ef32Sjsg 					enum ttm_caching caching,
53*f005ef32Sjsg 					size_t size)
54*f005ef32Sjsg {
55*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
56*f005ef32Sjsg 	struct ttm_buffer_object *bo;
57*f005ef32Sjsg 	struct ttm_tt *tt;
58*f005ef32Sjsg 	int err;
59*f005ef32Sjsg 
60*f005ef32Sjsg 	bo = ttm_bo_kunit_init(test, priv->devs, size);
61*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, bo);
62*f005ef32Sjsg 	priv->mock_bo = bo;
63*f005ef32Sjsg 
64*f005ef32Sjsg 	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
65*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
66*f005ef32Sjsg 
67*f005ef32Sjsg 	err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
68*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
69*f005ef32Sjsg 
70*f005ef32Sjsg 	return tt;
71*f005ef32Sjsg }
72*f005ef32Sjsg 
ttm_pool_pre_populated(struct kunit * test,size_t size,enum ttm_caching caching)73*f005ef32Sjsg static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
74*f005ef32Sjsg 					       size_t size,
75*f005ef32Sjsg 					       enum ttm_caching caching)
76*f005ef32Sjsg {
77*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
78*f005ef32Sjsg 	struct ttm_test_devices *devs = priv->devs;
79*f005ef32Sjsg 	struct ttm_pool *pool;
80*f005ef32Sjsg 	struct ttm_tt *tt;
81*f005ef32Sjsg 	unsigned long order = __fls(size / PAGE_SIZE);
82*f005ef32Sjsg 	int err;
83*f005ef32Sjsg 
84*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, order, caching, size);
85*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
86*f005ef32Sjsg 
87*f005ef32Sjsg 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
88*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, pool);
89*f005ef32Sjsg 
90*f005ef32Sjsg 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
91*f005ef32Sjsg 
92*f005ef32Sjsg 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
93*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
94*f005ef32Sjsg 
95*f005ef32Sjsg 	ttm_pool_free(pool, tt);
96*f005ef32Sjsg 	ttm_tt_fini(tt);
97*f005ef32Sjsg 
98*f005ef32Sjsg 	return pool;
99*f005ef32Sjsg }
100*f005ef32Sjsg 
101*f005ef32Sjsg static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
102*f005ef32Sjsg 	{
103*f005ef32Sjsg 		.description = "One page",
104*f005ef32Sjsg 		.order = 0,
105*f005ef32Sjsg 	},
106*f005ef32Sjsg 	{
107*f005ef32Sjsg 		.description = "More than one page",
108*f005ef32Sjsg 		.order = 2,
109*f005ef32Sjsg 	},
110*f005ef32Sjsg 	{
111*f005ef32Sjsg 		.description = "Above the allocation limit",
112*f005ef32Sjsg 		.order = MAX_ORDER + 1,
113*f005ef32Sjsg 	},
114*f005ef32Sjsg 	{
115*f005ef32Sjsg 		.description = "One page, with coherent DMA mappings enabled",
116*f005ef32Sjsg 		.order = 0,
117*f005ef32Sjsg 		.use_dma_alloc = true,
118*f005ef32Sjsg 	},
119*f005ef32Sjsg 	{
120*f005ef32Sjsg 		.description = "Above the allocation limit, with coherent DMA mappings enabled",
121*f005ef32Sjsg 		.order = MAX_ORDER + 1,
122*f005ef32Sjsg 		.use_dma_alloc = true,
123*f005ef32Sjsg 	},
124*f005ef32Sjsg };
125*f005ef32Sjsg 
ttm_pool_alloc_case_desc(const struct ttm_pool_test_case * t,char * desc)126*f005ef32Sjsg static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
127*f005ef32Sjsg 				     char *desc)
128*f005ef32Sjsg {
129*f005ef32Sjsg 	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
130*f005ef32Sjsg }
131*f005ef32Sjsg 
132*f005ef32Sjsg KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
133*f005ef32Sjsg 		  ttm_pool_alloc_case_desc);
134*f005ef32Sjsg 
ttm_pool_alloc_basic(struct kunit * test)135*f005ef32Sjsg static void ttm_pool_alloc_basic(struct kunit *test)
136*f005ef32Sjsg {
137*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
138*f005ef32Sjsg 	struct ttm_test_devices *devs = priv->devs;
139*f005ef32Sjsg 	const struct ttm_pool_test_case *params = test->param_value;
140*f005ef32Sjsg 	struct ttm_tt *tt;
141*f005ef32Sjsg 	struct ttm_pool *pool;
142*f005ef32Sjsg 	struct page *fst_page, *last_page;
143*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
144*f005ef32Sjsg 	unsigned int expected_num_pages = 1 << params->order;
145*f005ef32Sjsg 	size_t size = expected_num_pages * PAGE_SIZE;
146*f005ef32Sjsg 	int err;
147*f005ef32Sjsg 
148*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, 0, caching, size);
149*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
150*f005ef32Sjsg 
151*f005ef32Sjsg 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
152*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, pool);
153*f005ef32Sjsg 
154*f005ef32Sjsg 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
155*f005ef32Sjsg 		      false);
156*f005ef32Sjsg 
157*f005ef32Sjsg 	KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
158*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
159*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
160*f005ef32Sjsg 
161*f005ef32Sjsg 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
162*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
163*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
164*f005ef32Sjsg 
165*f005ef32Sjsg 	fst_page = tt->pages[0];
166*f005ef32Sjsg 	last_page = tt->pages[tt->num_pages - 1];
167*f005ef32Sjsg 
168*f005ef32Sjsg 	if (params->order <= MAX_ORDER) {
169*f005ef32Sjsg 		if (params->use_dma_alloc) {
170*f005ef32Sjsg 			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
171*f005ef32Sjsg 			KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
172*f005ef32Sjsg 		} else {
173*f005ef32Sjsg 			KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
174*f005ef32Sjsg 		}
175*f005ef32Sjsg 	} else {
176*f005ef32Sjsg 		if (params->use_dma_alloc) {
177*f005ef32Sjsg 			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
178*f005ef32Sjsg 			KUNIT_ASSERT_NULL(test, (void *)last_page->private);
179*f005ef32Sjsg 		} else {
180*f005ef32Sjsg 			/*
181*f005ef32Sjsg 			 * We expect to alloc one big block, followed by
182*f005ef32Sjsg 			 * order 0 blocks
183*f005ef32Sjsg 			 */
184*f005ef32Sjsg 			KUNIT_ASSERT_EQ(test, fst_page->private,
185*f005ef32Sjsg 					min_t(unsigned int, MAX_ORDER,
186*f005ef32Sjsg 					      params->order));
187*f005ef32Sjsg 			KUNIT_ASSERT_EQ(test, last_page->private, 0);
188*f005ef32Sjsg 		}
189*f005ef32Sjsg 	}
190*f005ef32Sjsg 
191*f005ef32Sjsg 	ttm_pool_free(pool, tt);
192*f005ef32Sjsg 	ttm_tt_fini(tt);
193*f005ef32Sjsg 	ttm_pool_fini(pool);
194*f005ef32Sjsg }
195*f005ef32Sjsg 
ttm_pool_alloc_basic_dma_addr(struct kunit * test)196*f005ef32Sjsg static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
197*f005ef32Sjsg {
198*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
199*f005ef32Sjsg 	struct ttm_test_devices *devs = priv->devs;
200*f005ef32Sjsg 	const struct ttm_pool_test_case *params = test->param_value;
201*f005ef32Sjsg 	struct ttm_tt *tt;
202*f005ef32Sjsg 	struct ttm_pool *pool;
203*f005ef32Sjsg 	struct ttm_buffer_object *bo;
204*f005ef32Sjsg 	dma_addr_t dma1, dma2;
205*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
206*f005ef32Sjsg 	unsigned int expected_num_pages = 1 << params->order;
207*f005ef32Sjsg 	size_t size = expected_num_pages * PAGE_SIZE;
208*f005ef32Sjsg 	int err;
209*f005ef32Sjsg 
210*f005ef32Sjsg 	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
211*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
212*f005ef32Sjsg 
213*f005ef32Sjsg 	bo = ttm_bo_kunit_init(test, devs, size);
214*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, bo);
215*f005ef32Sjsg 
216*f005ef32Sjsg 	err = ttm_sg_tt_init(tt, bo, 0, caching);
217*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
218*f005ef32Sjsg 
219*f005ef32Sjsg 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
220*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, pool);
221*f005ef32Sjsg 
222*f005ef32Sjsg 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
223*f005ef32Sjsg 
224*f005ef32Sjsg 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
225*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
226*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
227*f005ef32Sjsg 
228*f005ef32Sjsg 	dma1 = tt->dma_address[0];
229*f005ef32Sjsg 	dma2 = tt->dma_address[tt->num_pages - 1];
230*f005ef32Sjsg 
231*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
232*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
233*f005ef32Sjsg 
234*f005ef32Sjsg 	ttm_pool_free(pool, tt);
235*f005ef32Sjsg 	ttm_tt_fini(tt);
236*f005ef32Sjsg 	ttm_pool_fini(pool);
237*f005ef32Sjsg }
238*f005ef32Sjsg 
ttm_pool_alloc_order_caching_match(struct kunit * test)239*f005ef32Sjsg static void ttm_pool_alloc_order_caching_match(struct kunit *test)
240*f005ef32Sjsg {
241*f005ef32Sjsg 	struct ttm_tt *tt;
242*f005ef32Sjsg 	struct ttm_pool *pool;
243*f005ef32Sjsg 	struct ttm_pool_type *pt;
244*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
245*f005ef32Sjsg 	unsigned int order = 0;
246*f005ef32Sjsg 	size_t size = PAGE_SIZE;
247*f005ef32Sjsg 	int err;
248*f005ef32Sjsg 
249*f005ef32Sjsg 	pool = ttm_pool_pre_populated(test, size, caching);
250*f005ef32Sjsg 
251*f005ef32Sjsg 	pt = &pool->caching[caching].orders[order];
252*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
253*f005ef32Sjsg 
254*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, 0, caching, size);
255*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
256*f005ef32Sjsg 
257*f005ef32Sjsg 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
258*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
259*f005ef32Sjsg 
260*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
261*f005ef32Sjsg 
262*f005ef32Sjsg 	ttm_pool_free(pool, tt);
263*f005ef32Sjsg 	ttm_tt_fini(tt);
264*f005ef32Sjsg 	ttm_pool_fini(pool);
265*f005ef32Sjsg }
266*f005ef32Sjsg 
ttm_pool_alloc_caching_mismatch(struct kunit * test)267*f005ef32Sjsg static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
268*f005ef32Sjsg {
269*f005ef32Sjsg 	struct ttm_tt *tt;
270*f005ef32Sjsg 	struct ttm_pool *pool;
271*f005ef32Sjsg 	struct ttm_pool_type *pt_pool, *pt_tt;
272*f005ef32Sjsg 	enum ttm_caching tt_caching = ttm_uncached;
273*f005ef32Sjsg 	enum ttm_caching pool_caching = ttm_cached;
274*f005ef32Sjsg 	size_t size = PAGE_SIZE;
275*f005ef32Sjsg 	unsigned int order = 0;
276*f005ef32Sjsg 	int err;
277*f005ef32Sjsg 
278*f005ef32Sjsg 	pool = ttm_pool_pre_populated(test, size, pool_caching);
279*f005ef32Sjsg 
280*f005ef32Sjsg 	pt_pool = &pool->caching[pool_caching].orders[order];
281*f005ef32Sjsg 	pt_tt = &pool->caching[tt_caching].orders[order];
282*f005ef32Sjsg 
283*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
284*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
285*f005ef32Sjsg 
286*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
287*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
288*f005ef32Sjsg 
289*f005ef32Sjsg 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
290*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
291*f005ef32Sjsg 
292*f005ef32Sjsg 	ttm_pool_free(pool, tt);
293*f005ef32Sjsg 	ttm_tt_fini(tt);
294*f005ef32Sjsg 
295*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
296*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
297*f005ef32Sjsg 
298*f005ef32Sjsg 	ttm_pool_fini(pool);
299*f005ef32Sjsg }
300*f005ef32Sjsg 
ttm_pool_alloc_order_mismatch(struct kunit * test)301*f005ef32Sjsg static void ttm_pool_alloc_order_mismatch(struct kunit *test)
302*f005ef32Sjsg {
303*f005ef32Sjsg 	struct ttm_tt *tt;
304*f005ef32Sjsg 	struct ttm_pool *pool;
305*f005ef32Sjsg 	struct ttm_pool_type *pt_pool, *pt_tt;
306*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
307*f005ef32Sjsg 	unsigned int order = 2;
308*f005ef32Sjsg 	size_t fst_size = (1 << order) * PAGE_SIZE;
309*f005ef32Sjsg 	size_t snd_size = PAGE_SIZE;
310*f005ef32Sjsg 	int err;
311*f005ef32Sjsg 
312*f005ef32Sjsg 	pool = ttm_pool_pre_populated(test, fst_size, caching);
313*f005ef32Sjsg 
314*f005ef32Sjsg 	pt_pool = &pool->caching[caching].orders[order];
315*f005ef32Sjsg 	pt_tt = &pool->caching[caching].orders[0];
316*f005ef32Sjsg 
317*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
318*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
319*f005ef32Sjsg 
320*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
321*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
322*f005ef32Sjsg 
323*f005ef32Sjsg 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
324*f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
325*f005ef32Sjsg 
326*f005ef32Sjsg 	ttm_pool_free(pool, tt);
327*f005ef32Sjsg 	ttm_tt_fini(tt);
328*f005ef32Sjsg 
329*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
330*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
331*f005ef32Sjsg 
332*f005ef32Sjsg 	ttm_pool_fini(pool);
333*f005ef32Sjsg }
334*f005ef32Sjsg 
ttm_pool_free_dma_alloc(struct kunit * test)335*f005ef32Sjsg static void ttm_pool_free_dma_alloc(struct kunit *test)
336*f005ef32Sjsg {
337*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
338*f005ef32Sjsg 	struct ttm_test_devices *devs = priv->devs;
339*f005ef32Sjsg 	struct ttm_tt *tt;
340*f005ef32Sjsg 	struct ttm_pool *pool;
341*f005ef32Sjsg 	struct ttm_pool_type *pt;
342*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
343*f005ef32Sjsg 	unsigned int order = 2;
344*f005ef32Sjsg 	size_t size = (1 << order) * PAGE_SIZE;
345*f005ef32Sjsg 
346*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, 0, caching, size);
347*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
348*f005ef32Sjsg 
349*f005ef32Sjsg 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
350*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, pool);
351*f005ef32Sjsg 
352*f005ef32Sjsg 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
353*f005ef32Sjsg 	ttm_pool_alloc(pool, tt, &simple_ctx);
354*f005ef32Sjsg 
355*f005ef32Sjsg 	pt = &pool->caching[caching].orders[order];
356*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
357*f005ef32Sjsg 
358*f005ef32Sjsg 	ttm_pool_free(pool, tt);
359*f005ef32Sjsg 	ttm_tt_fini(tt);
360*f005ef32Sjsg 
361*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
362*f005ef32Sjsg 
363*f005ef32Sjsg 	ttm_pool_fini(pool);
364*f005ef32Sjsg }
365*f005ef32Sjsg 
ttm_pool_free_no_dma_alloc(struct kunit * test)366*f005ef32Sjsg static void ttm_pool_free_no_dma_alloc(struct kunit *test)
367*f005ef32Sjsg {
368*f005ef32Sjsg 	struct ttm_pool_test_priv *priv = test->priv;
369*f005ef32Sjsg 	struct ttm_test_devices *devs = priv->devs;
370*f005ef32Sjsg 	struct ttm_tt *tt;
371*f005ef32Sjsg 	struct ttm_pool *pool;
372*f005ef32Sjsg 	struct ttm_pool_type *pt;
373*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
374*f005ef32Sjsg 	unsigned int order = 2;
375*f005ef32Sjsg 	size_t size = (1 << order) * PAGE_SIZE;
376*f005ef32Sjsg 
377*f005ef32Sjsg 	tt = ttm_tt_kunit_init(test, 0, caching, size);
378*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, tt);
379*f005ef32Sjsg 
380*f005ef32Sjsg 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
381*f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, pool);
382*f005ef32Sjsg 
383*f005ef32Sjsg 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
384*f005ef32Sjsg 	ttm_pool_alloc(pool, tt, &simple_ctx);
385*f005ef32Sjsg 
386*f005ef32Sjsg 	pt = &pool->caching[caching].orders[order];
387*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
388*f005ef32Sjsg 
389*f005ef32Sjsg 	ttm_pool_free(pool, tt);
390*f005ef32Sjsg 	ttm_tt_fini(tt);
391*f005ef32Sjsg 
392*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
393*f005ef32Sjsg 
394*f005ef32Sjsg 	ttm_pool_fini(pool);
395*f005ef32Sjsg }
396*f005ef32Sjsg 
ttm_pool_fini_basic(struct kunit * test)397*f005ef32Sjsg static void ttm_pool_fini_basic(struct kunit *test)
398*f005ef32Sjsg {
399*f005ef32Sjsg 	struct ttm_pool *pool;
400*f005ef32Sjsg 	struct ttm_pool_type *pt;
401*f005ef32Sjsg 	enum ttm_caching caching = ttm_uncached;
402*f005ef32Sjsg 	unsigned int order = 0;
403*f005ef32Sjsg 	size_t size = PAGE_SIZE;
404*f005ef32Sjsg 
405*f005ef32Sjsg 	pool = ttm_pool_pre_populated(test, size, caching);
406*f005ef32Sjsg 	pt = &pool->caching[caching].orders[order];
407*f005ef32Sjsg 
408*f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
409*f005ef32Sjsg 
410*f005ef32Sjsg 	ttm_pool_fini(pool);
411*f005ef32Sjsg 
412*f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
413*f005ef32Sjsg }
414*f005ef32Sjsg 
415*f005ef32Sjsg static struct kunit_case ttm_pool_test_cases[] = {
416*f005ef32Sjsg 	KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
417*f005ef32Sjsg 	KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
418*f005ef32Sjsg 			 ttm_pool_alloc_basic_gen_params),
419*f005ef32Sjsg 	KUNIT_CASE(ttm_pool_alloc_order_caching_match),
420*f005ef32Sjsg 	KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
421*f005ef32Sjsg 	KUNIT_CASE(ttm_pool_alloc_order_mismatch),
422*f005ef32Sjsg 	KUNIT_CASE(ttm_pool_free_dma_alloc),
423*f005ef32Sjsg 	KUNIT_CASE(ttm_pool_free_no_dma_alloc),
424*f005ef32Sjsg 	KUNIT_CASE(ttm_pool_fini_basic),
425*f005ef32Sjsg 	{}
426*f005ef32Sjsg };
427*f005ef32Sjsg 
428*f005ef32Sjsg static struct kunit_suite ttm_pool_test_suite = {
429*f005ef32Sjsg 	.name = "ttm_pool",
430*f005ef32Sjsg 	.init = ttm_pool_test_init,
431*f005ef32Sjsg 	.exit = ttm_pool_test_fini,
432*f005ef32Sjsg 	.test_cases = ttm_pool_test_cases,
433*f005ef32Sjsg };
434*f005ef32Sjsg 
435*f005ef32Sjsg kunit_test_suites(&ttm_pool_test_suite);
436*f005ef32Sjsg 
437*f005ef32Sjsg MODULE_LICENSE("GPL");
438