xref: /openbsd-src/sys/dev/pci/drm/ttm/tests/ttm_device_test.c (revision 192fc1c096302d914bbc869bdcc5269f6d0065f7)
1f005ef32Sjsg // SPDX-License-Identifier: GPL-2.0 AND MIT
2f005ef32Sjsg /*
3f005ef32Sjsg  * Copyright © 2023 Intel Corporation
4f005ef32Sjsg  */
5f005ef32Sjsg #include <drm/ttm/ttm_resource.h>
6f005ef32Sjsg #include <drm/ttm/ttm_device.h>
7f005ef32Sjsg #include <drm/ttm/ttm_placement.h>
8f005ef32Sjsg 
9f005ef32Sjsg #include "ttm_kunit_helpers.h"
10f005ef32Sjsg 
11f005ef32Sjsg struct ttm_device_test_case {
12f005ef32Sjsg 	const char *description;
13f005ef32Sjsg 	bool use_dma_alloc;
14f005ef32Sjsg 	bool use_dma32;
15f005ef32Sjsg 	bool pools_init_expected;
16f005ef32Sjsg };
17f005ef32Sjsg 
ttm_device_init_basic(struct kunit * test)18f005ef32Sjsg static void ttm_device_init_basic(struct kunit *test)
19f005ef32Sjsg {
20f005ef32Sjsg 	struct ttm_test_devices *priv = test->priv;
21f005ef32Sjsg 	struct ttm_device *ttm_dev;
22f005ef32Sjsg 	struct ttm_resource_manager *ttm_sys_man;
23f005ef32Sjsg 	int err;
24f005ef32Sjsg 
25f005ef32Sjsg 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
26f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
27f005ef32Sjsg 
28f005ef32Sjsg 	err = ttm_device_kunit_init(priv, ttm_dev, false, false);
29f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
30f005ef32Sjsg 
31f005ef32Sjsg 	KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
32f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
33f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
34f005ef32Sjsg 
35f005ef32Sjsg 	ttm_sys_man = &ttm_dev->sysman;
36f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
37f005ef32Sjsg 	KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
38f005ef32Sjsg 	KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
39f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
40f005ef32Sjsg 
41f005ef32Sjsg 	KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
42f005ef32Sjsg 			    priv->drm->anon_inode->i_mapping);
43f005ef32Sjsg 
44f005ef32Sjsg 	ttm_device_fini(ttm_dev);
45f005ef32Sjsg }
46f005ef32Sjsg 
ttm_device_init_multiple(struct kunit * test)47f005ef32Sjsg static void ttm_device_init_multiple(struct kunit *test)
48f005ef32Sjsg {
49f005ef32Sjsg 	struct ttm_test_devices *priv = test->priv;
50f005ef32Sjsg 	struct ttm_device *ttm_devs;
51f005ef32Sjsg 	unsigned int i, num_dev = 3;
52f005ef32Sjsg 	int err;
53f005ef32Sjsg 
54f005ef32Sjsg 	ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
55f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
56f005ef32Sjsg 
57f005ef32Sjsg 	for (i = 0; i < num_dev; i++) {
58f005ef32Sjsg 		err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
59f005ef32Sjsg 		KUNIT_ASSERT_EQ(test, err, 0);
60f005ef32Sjsg 
61f005ef32Sjsg 		KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
62f005ef32Sjsg 				    priv->drm->anon_inode->i_mapping);
63f005ef32Sjsg 		KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
64f005ef32Sjsg 		KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
65f005ef32Sjsg 		KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
66f005ef32Sjsg 	}
67f005ef32Sjsg 
68f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
69f005ef32Sjsg 
70f005ef32Sjsg 	for (i = 0; i < num_dev; i++)
71f005ef32Sjsg 		ttm_device_fini(&ttm_devs[i]);
72f005ef32Sjsg }
73f005ef32Sjsg 
ttm_device_fini_basic(struct kunit * test)74f005ef32Sjsg static void ttm_device_fini_basic(struct kunit *test)
75f005ef32Sjsg {
76f005ef32Sjsg 	struct ttm_test_devices *priv = test->priv;
77f005ef32Sjsg 	struct ttm_device *ttm_dev;
78f005ef32Sjsg 	struct ttm_resource_manager *man;
79f005ef32Sjsg 	int err;
80f005ef32Sjsg 
81f005ef32Sjsg 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
82f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
83f005ef32Sjsg 
84f005ef32Sjsg 	err = ttm_device_kunit_init(priv, ttm_dev, false, false);
85f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
86f005ef32Sjsg 
87f005ef32Sjsg 	man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
88f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, man);
89f005ef32Sjsg 
90f005ef32Sjsg 	ttm_device_fini(ttm_dev);
91f005ef32Sjsg 
92f005ef32Sjsg 	KUNIT_ASSERT_FALSE(test, man->use_type);
93f005ef32Sjsg 	KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
94f005ef32Sjsg 	KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
95f005ef32Sjsg }
96f005ef32Sjsg 
ttm_device_init_no_vma_man(struct kunit * test)97f005ef32Sjsg static void ttm_device_init_no_vma_man(struct kunit *test)
98f005ef32Sjsg {
99f005ef32Sjsg 	struct ttm_test_devices *priv = test->priv;
100f005ef32Sjsg 	struct drm_device *drm = priv->drm;
101f005ef32Sjsg 	struct ttm_device *ttm_dev;
102f005ef32Sjsg 	struct drm_vma_offset_manager *vma_man;
103f005ef32Sjsg 	int err;
104f005ef32Sjsg 
105f005ef32Sjsg 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
106f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
107f005ef32Sjsg 
108f005ef32Sjsg 	/* Let's pretend there's no VMA manager allocated */
109f005ef32Sjsg 	vma_man = drm->vma_offset_manager;
110f005ef32Sjsg 	drm->vma_offset_manager = NULL;
111f005ef32Sjsg 
112f005ef32Sjsg 	err = ttm_device_kunit_init(priv, ttm_dev, false, false);
113f005ef32Sjsg 	KUNIT_EXPECT_EQ(test, err, -EINVAL);
114f005ef32Sjsg 
115f005ef32Sjsg 	/* Bring the manager back for a graceful cleanup */
116f005ef32Sjsg 	drm->vma_offset_manager = vma_man;
117f005ef32Sjsg }
118f005ef32Sjsg 
119f005ef32Sjsg static const struct ttm_device_test_case ttm_device_cases[] = {
120f005ef32Sjsg 	{
121f005ef32Sjsg 		.description = "No DMA allocations, no DMA32 required",
122f005ef32Sjsg 		.use_dma_alloc = false,
123f005ef32Sjsg 		.use_dma32 = false,
124f005ef32Sjsg 		.pools_init_expected = false,
125f005ef32Sjsg 	},
126f005ef32Sjsg 	{
127f005ef32Sjsg 		.description = "DMA allocations, DMA32 required",
128f005ef32Sjsg 		.use_dma_alloc = true,
129f005ef32Sjsg 		.use_dma32 = true,
130f005ef32Sjsg 		.pools_init_expected = true,
131f005ef32Sjsg 	},
132f005ef32Sjsg 	{
133f005ef32Sjsg 		.description = "No DMA allocations, DMA32 required",
134f005ef32Sjsg 		.use_dma_alloc = false,
135f005ef32Sjsg 		.use_dma32 = true,
136f005ef32Sjsg 		.pools_init_expected = false,
137f005ef32Sjsg 	},
138f005ef32Sjsg 	{
139f005ef32Sjsg 		.description = "DMA allocations, no DMA32 required",
140f005ef32Sjsg 		.use_dma_alloc = true,
141f005ef32Sjsg 		.use_dma32 = false,
142f005ef32Sjsg 		.pools_init_expected = true,
143f005ef32Sjsg 	},
144f005ef32Sjsg };
145f005ef32Sjsg 
ttm_device_case_desc(const struct ttm_device_test_case * t,char * desc)146f005ef32Sjsg static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
147f005ef32Sjsg {
148f005ef32Sjsg 	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
149f005ef32Sjsg }
150f005ef32Sjsg 
151f005ef32Sjsg KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
152f005ef32Sjsg 
ttm_device_init_pools(struct kunit * test)153f005ef32Sjsg static void ttm_device_init_pools(struct kunit *test)
154f005ef32Sjsg {
155f005ef32Sjsg 	struct ttm_test_devices *priv = test->priv;
156f005ef32Sjsg 	const struct ttm_device_test_case *params = test->param_value;
157f005ef32Sjsg 	struct ttm_device *ttm_dev;
158f005ef32Sjsg 	struct ttm_pool *pool;
159f005ef32Sjsg 	struct ttm_pool_type pt;
160f005ef32Sjsg 	int err;
161f005ef32Sjsg 
162f005ef32Sjsg 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
163f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
164f005ef32Sjsg 
165f005ef32Sjsg 	err = ttm_device_kunit_init(priv, ttm_dev,
166f005ef32Sjsg 				    params->use_dma_alloc,
167f005ef32Sjsg 				    params->use_dma32);
168f005ef32Sjsg 	KUNIT_ASSERT_EQ(test, err, 0);
169f005ef32Sjsg 
170f005ef32Sjsg 	pool = &ttm_dev->pool;
171f005ef32Sjsg 	KUNIT_ASSERT_NOT_NULL(test, pool);
172f005ef32Sjsg 	KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
173f005ef32Sjsg 	KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
174f005ef32Sjsg 	KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
175f005ef32Sjsg 
176f005ef32Sjsg 	if (params->pools_init_expected) {
177f005ef32Sjsg 		for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
178*192fc1c0Sjsg 			for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
179f005ef32Sjsg 				pt = pool->caching[i].orders[j];
180f005ef32Sjsg 				KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
181f005ef32Sjsg 				KUNIT_EXPECT_EQ(test, pt.caching, i);
182f005ef32Sjsg 				KUNIT_EXPECT_EQ(test, pt.order, j);
183f005ef32Sjsg 
184f005ef32Sjsg 				if (params->use_dma_alloc)
185f005ef32Sjsg 					KUNIT_ASSERT_FALSE(test,
186f005ef32Sjsg 							   list_empty(&pt.pages));
187f005ef32Sjsg 			}
188f005ef32Sjsg 		}
189f005ef32Sjsg 	}
190f005ef32Sjsg 
191f005ef32Sjsg 	ttm_device_fini(ttm_dev);
192f005ef32Sjsg }
193f005ef32Sjsg 
194f005ef32Sjsg static struct kunit_case ttm_device_test_cases[] = {
195f005ef32Sjsg 	KUNIT_CASE(ttm_device_init_basic),
196f005ef32Sjsg 	KUNIT_CASE(ttm_device_init_multiple),
197f005ef32Sjsg 	KUNIT_CASE(ttm_device_fini_basic),
198f005ef32Sjsg 	KUNIT_CASE(ttm_device_init_no_vma_man),
199f005ef32Sjsg 	KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
200f005ef32Sjsg 	{}
201f005ef32Sjsg };
202f005ef32Sjsg 
203f005ef32Sjsg static struct kunit_suite ttm_device_test_suite = {
204f005ef32Sjsg 	.name = "ttm_device",
205f005ef32Sjsg 	.init = ttm_test_devices_init,
206f005ef32Sjsg 	.exit = ttm_test_devices_fini,
207f005ef32Sjsg 	.test_cases = ttm_device_test_cases,
208f005ef32Sjsg };
209f005ef32Sjsg 
210f005ef32Sjsg kunit_test_suites(&ttm_device_test_suite);
211f005ef32Sjsg 
212f005ef32Sjsg MODULE_LICENSE("GPL");
213