xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/selftests/intel_memory_region.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: intel_memory_region.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 /*
5  * Copyright © 2019 Intel Corporation
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_memory_region.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
10 
11 #include <linux/prime_numbers.h>
12 
13 #include "../i915_selftest.h"
14 
15 #include "mock_drm.h"
16 #include "mock_gem_device.h"
17 #include "mock_region.h"
18 
19 #include "gem/i915_gem_context.h"
20 #include "gem/i915_gem_lmem.h"
21 #include "gem/i915_gem_region.h"
22 #include "gem/i915_gem_object_blt.h"
23 #include "gem/selftests/igt_gem_utils.h"
24 #include "gem/selftests/mock_context.h"
25 #include "gt/intel_engine_user.h"
26 #include "gt/intel_gt.h"
27 #include "selftests/igt_flush_test.h"
28 #include "selftests/i915_random.h"
29 
close_objects(struct intel_memory_region * mem,struct list_head * objects)30 static void close_objects(struct intel_memory_region *mem,
31 			  struct list_head *objects)
32 {
33 	struct drm_i915_private *i915 = mem->i915;
34 	struct drm_i915_gem_object *obj, *on;
35 
36 	list_for_each_entry_safe(obj, on, objects, st_link) {
37 		if (i915_gem_object_has_pinned_pages(obj))
38 			i915_gem_object_unpin_pages(obj);
39 		/* No polluting the memory region between tests */
40 		__i915_gem_object_put_pages(obj);
41 		list_del(&obj->st_link);
42 		i915_gem_object_put(obj);
43 	}
44 
45 	cond_resched();
46 
47 	i915_gem_drain_freed_objects(i915);
48 }
49 
igt_mock_fill(void * arg)50 static int igt_mock_fill(void *arg)
51 {
52 	struct intel_memory_region *mem = arg;
53 	resource_size_t total = resource_size(&mem->region);
54 	resource_size_t page_size;
55 	resource_size_t rem;
56 	unsigned long max_pages;
57 	unsigned long page_num;
58 	LIST_HEAD(objects);
59 	int err = 0;
60 
61 	page_size = mem->mm.chunk_size;
62 	max_pages = div64_u64(total, page_size);
63 	rem = total;
64 
65 	for_each_prime_number_from(page_num, 1, max_pages) {
66 		resource_size_t size = page_num * page_size;
67 		struct drm_i915_gem_object *obj;
68 
69 		obj = i915_gem_object_create_region(mem, size, 0);
70 		if (IS_ERR(obj)) {
71 			err = PTR_ERR(obj);
72 			break;
73 		}
74 
75 		err = i915_gem_object_pin_pages(obj);
76 		if (err) {
77 			i915_gem_object_put(obj);
78 			break;
79 		}
80 
81 		list_add(&obj->st_link, &objects);
82 		rem -= size;
83 	}
84 
85 	if (err == -ENOMEM)
86 		err = 0;
87 	if (err == -ENXIO) {
88 		if (page_num * page_size <= rem) {
89 			pr_err("%s failed, space still left in region\n",
90 			       __func__);
91 			err = -EINVAL;
92 		} else {
93 			err = 0;
94 		}
95 	}
96 
97 	close_objects(mem, &objects);
98 
99 	return err;
100 }
101 
102 static struct drm_i915_gem_object *
igt_object_create(struct intel_memory_region * mem,struct list_head * objects,u64 size,unsigned int flags)103 igt_object_create(struct intel_memory_region *mem,
104 		  struct list_head *objects,
105 		  u64 size,
106 		  unsigned int flags)
107 {
108 	struct drm_i915_gem_object *obj;
109 	int err;
110 
111 	obj = i915_gem_object_create_region(mem, size, flags);
112 	if (IS_ERR(obj))
113 		return obj;
114 
115 	err = i915_gem_object_pin_pages(obj);
116 	if (err)
117 		goto put;
118 
119 	list_add(&obj->st_link, objects);
120 	return obj;
121 
122 put:
123 	i915_gem_object_put(obj);
124 	return ERR_PTR(err);
125 }
126 
igt_object_release(struct drm_i915_gem_object * obj)127 static void igt_object_release(struct drm_i915_gem_object *obj)
128 {
129 	i915_gem_object_unpin_pages(obj);
130 	__i915_gem_object_put_pages(obj);
131 	list_del(&obj->st_link);
132 	i915_gem_object_put(obj);
133 }
134 
igt_mock_contiguous(void * arg)135 static int igt_mock_contiguous(void *arg)
136 {
137 	struct intel_memory_region *mem = arg;
138 	struct drm_i915_gem_object *obj;
139 	unsigned long n_objects;
140 	LIST_HEAD(objects);
141 	LIST_HEAD(holes);
142 	I915_RND_STATE(prng);
143 	resource_size_t total;
144 	resource_size_t min;
145 	u64 target;
146 	int err = 0;
147 
148 	total = resource_size(&mem->region);
149 
150 	/* Min size */
151 	obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
152 				I915_BO_ALLOC_CONTIGUOUS);
153 	if (IS_ERR(obj))
154 		return PTR_ERR(obj);
155 
156 	if (obj->mm.pages->nents != 1) {
157 		pr_err("%s min object spans multiple sg entries\n", __func__);
158 		err = -EINVAL;
159 		goto err_close_objects;
160 	}
161 
162 	igt_object_release(obj);
163 
164 	/* Max size */
165 	obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
166 	if (IS_ERR(obj))
167 		return PTR_ERR(obj);
168 
169 	if (obj->mm.pages->nents != 1) {
170 		pr_err("%s max object spans multiple sg entries\n", __func__);
171 		err = -EINVAL;
172 		goto err_close_objects;
173 	}
174 
175 	igt_object_release(obj);
176 
177 	/* Internal fragmentation should not bleed into the object size */
178 	target = i915_prandom_u64_state(&prng);
179 	div64_u64_rem(target, total, &target);
180 	target = round_up(target, PAGE_SIZE);
181 	target = max_t(u64, PAGE_SIZE, target);
182 
183 	obj = igt_object_create(mem, &objects, target,
184 				I915_BO_ALLOC_CONTIGUOUS);
185 	if (IS_ERR(obj))
186 		return PTR_ERR(obj);
187 
188 	if (obj->base.size != target) {
189 		pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
190 		       obj->base.size, target);
191 		err = -EINVAL;
192 		goto err_close_objects;
193 	}
194 
195 	if (obj->mm.pages->nents != 1) {
196 		pr_err("%s object spans multiple sg entries\n", __func__);
197 		err = -EINVAL;
198 		goto err_close_objects;
199 	}
200 
201 	igt_object_release(obj);
202 
203 	/*
204 	 * Try to fragment the address space, such that half of it is free, but
205 	 * the max contiguous block size is SZ_64K.
206 	 */
207 
208 	target = SZ_64K;
209 	n_objects = div64_u64(total, target);
210 
211 	while (n_objects--) {
212 		struct list_head *list;
213 
214 		if (n_objects % 2)
215 			list = &holes;
216 		else
217 			list = &objects;
218 
219 		obj = igt_object_create(mem, list, target,
220 					I915_BO_ALLOC_CONTIGUOUS);
221 		if (IS_ERR(obj)) {
222 			err = PTR_ERR(obj);
223 			goto err_close_objects;
224 		}
225 	}
226 
227 	close_objects(mem, &holes);
228 
229 	min = target;
230 	target = total >> 1;
231 
232 	/* Make sure we can still allocate all the fragmented space */
233 	obj = igt_object_create(mem, &objects, target, 0);
234 	if (IS_ERR(obj)) {
235 		err = PTR_ERR(obj);
236 		goto err_close_objects;
237 	}
238 
239 	igt_object_release(obj);
240 
241 	/*
242 	 * Even though we have enough free space, we don't have a big enough
243 	 * contiguous block. Make sure that holds true.
244 	 */
245 
246 	do {
247 		bool should_fail = target > min;
248 
249 		obj = igt_object_create(mem, &objects, target,
250 					I915_BO_ALLOC_CONTIGUOUS);
251 		if (should_fail != IS_ERR(obj)) {
252 			pr_err("%s target allocation(%llx) mismatch\n",
253 			       __func__, target);
254 			err = -EINVAL;
255 			goto err_close_objects;
256 		}
257 
258 		target >>= 1;
259 	} while (target >= mem->mm.chunk_size);
260 
261 err_close_objects:
262 	list_splice_tail(&holes, &objects);
263 	close_objects(mem, &objects);
264 	return err;
265 }
266 
igt_gpu_write_dw(struct intel_context * ce,struct i915_vma * vma,u32 dword,u32 value)267 static int igt_gpu_write_dw(struct intel_context *ce,
268 			    struct i915_vma *vma,
269 			    u32 dword,
270 			    u32 value)
271 {
272 	return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
273 			       vma->size >> PAGE_SHIFT, value);
274 }
275 
igt_cpu_check(struct drm_i915_gem_object * obj,u32 dword,u32 val)276 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
277 {
278 	unsigned long n = obj->base.size >> PAGE_SHIFT;
279 	u32 *ptr;
280 	int err;
281 
282 	err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
283 	if (err)
284 		return err;
285 
286 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
287 	if (IS_ERR(ptr))
288 		return PTR_ERR(ptr);
289 
290 	ptr += dword;
291 	while (n--) {
292 		if (*ptr != val) {
293 			pr_err("base[%u]=%08x, val=%08x\n",
294 			       dword, *ptr, val);
295 			err = -EINVAL;
296 			break;
297 		}
298 
299 		ptr += PAGE_SIZE / sizeof(*ptr);
300 	}
301 
302 	i915_gem_object_unpin_map(obj);
303 	return err;
304 }
305 
igt_gpu_write(struct i915_gem_context * ctx,struct drm_i915_gem_object * obj)306 static int igt_gpu_write(struct i915_gem_context *ctx,
307 			 struct drm_i915_gem_object *obj)
308 {
309 	struct i915_gem_engines *engines;
310 	struct i915_gem_engines_iter it;
311 	struct i915_address_space *vm;
312 	struct intel_context *ce;
313 	I915_RND_STATE(prng);
314 	IGT_TIMEOUT(end_time);
315 	unsigned int count;
316 	struct i915_vma *vma;
317 	int *order;
318 	int i, n;
319 	int err = 0;
320 
321 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
322 
323 	n = 0;
324 	count = 0;
325 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
326 		count++;
327 		if (!intel_engine_can_store_dword(ce->engine))
328 			continue;
329 
330 		vm = ce->vm;
331 		n++;
332 	}
333 	i915_gem_context_unlock_engines(ctx);
334 	if (!n)
335 		return 0;
336 
337 	order = i915_random_order(count * count, &prng);
338 	if (!order)
339 		return -ENOMEM;
340 
341 	vma = i915_vma_instance(obj, vm, NULL);
342 	if (IS_ERR(vma)) {
343 		err = PTR_ERR(vma);
344 		goto out_free;
345 	}
346 
347 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
348 	if (err)
349 		goto out_free;
350 
351 	i = 0;
352 	engines = i915_gem_context_lock_engines(ctx);
353 	do {
354 		u32 rng = prandom_u32_state(&prng);
355 		u32 dword = offset_in_page(rng) / 4;
356 
357 		ce = engines->engines[order[i] % engines->num_engines];
358 		i = (i + 1) % (count * count);
359 		if (!ce || !intel_engine_can_store_dword(ce->engine))
360 			continue;
361 
362 		err = igt_gpu_write_dw(ce, vma, dword, rng);
363 		if (err)
364 			break;
365 
366 		err = igt_cpu_check(obj, dword, rng);
367 		if (err)
368 			break;
369 	} while (!__igt_timeout(end_time, NULL));
370 	i915_gem_context_unlock_engines(ctx);
371 
372 out_free:
373 	kfree(order);
374 
375 	if (err == -ENOMEM)
376 		err = 0;
377 
378 	return err;
379 }
380 
igt_lmem_create(void * arg)381 static int igt_lmem_create(void *arg)
382 {
383 	struct drm_i915_private *i915 = arg;
384 	struct drm_i915_gem_object *obj;
385 	int err = 0;
386 
387 	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
388 	if (IS_ERR(obj))
389 		return PTR_ERR(obj);
390 
391 	err = i915_gem_object_pin_pages(obj);
392 	if (err)
393 		goto out_put;
394 
395 	i915_gem_object_unpin_pages(obj);
396 out_put:
397 	i915_gem_object_put(obj);
398 
399 	return err;
400 }
401 
igt_lmem_write_gpu(void * arg)402 static int igt_lmem_write_gpu(void *arg)
403 {
404 	struct drm_i915_private *i915 = arg;
405 	struct drm_i915_gem_object *obj;
406 	struct i915_gem_context *ctx;
407 	struct file *file;
408 	I915_RND_STATE(prng);
409 	u32 sz;
410 	int err;
411 
412 	file = mock_file(i915);
413 	if (IS_ERR(file))
414 		return PTR_ERR(file);
415 
416 	ctx = live_context(i915, file);
417 	if (IS_ERR(ctx)) {
418 		err = PTR_ERR(ctx);
419 		goto out_file;
420 	}
421 
422 	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
423 
424 	obj = i915_gem_object_create_lmem(i915, sz, 0);
425 	if (IS_ERR(obj)) {
426 		err = PTR_ERR(obj);
427 		goto out_file;
428 	}
429 
430 	err = i915_gem_object_pin_pages(obj);
431 	if (err)
432 		goto out_put;
433 
434 	err = igt_gpu_write(ctx, obj);
435 	if (err)
436 		pr_err("igt_gpu_write failed(%d)\n", err);
437 
438 	i915_gem_object_unpin_pages(obj);
439 out_put:
440 	i915_gem_object_put(obj);
441 out_file:
442 	fput(file);
443 	return err;
444 }
445 
446 static struct intel_engine_cs *
random_engine_class(struct drm_i915_private * i915,unsigned int class,struct rnd_state * prng)447 random_engine_class(struct drm_i915_private *i915,
448 		    unsigned int class,
449 		    struct rnd_state *prng)
450 {
451 	struct intel_engine_cs *engine;
452 	unsigned int count;
453 
454 	count = 0;
455 	for (engine = intel_engine_lookup_user(i915, class, 0);
456 	     engine && engine->uabi_class == class;
457 	     engine = rb_entry_safe(rb_next(&engine->uabi_node),
458 				    typeof(*engine), uabi_node))
459 		count++;
460 
461 	count = i915_prandom_u32_max_state(count, prng);
462 	return intel_engine_lookup_user(i915, class, count);
463 }
464 
igt_lmem_write_cpu(void * arg)465 static int igt_lmem_write_cpu(void *arg)
466 {
467 	struct drm_i915_private *i915 = arg;
468 	struct drm_i915_gem_object *obj;
469 	I915_RND_STATE(prng);
470 	IGT_TIMEOUT(end_time);
471 	u32 bytes[] = {
472 		0, /* rng placeholder */
473 		sizeof(u32),
474 		sizeof(u64),
475 		64, /* cl */
476 		PAGE_SIZE,
477 		PAGE_SIZE - sizeof(u32),
478 		PAGE_SIZE - sizeof(u64),
479 		PAGE_SIZE - 64,
480 	};
481 	struct intel_engine_cs *engine;
482 	u32 *vaddr;
483 	u32 sz;
484 	u32 i;
485 	int *order;
486 	int count;
487 	int err;
488 
489 	engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
490 	if (!engine)
491 		return 0;
492 
493 	pr_info("%s: using %s\n", __func__, engine->name);
494 
495 	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
496 	sz = max_t(u32, 2 * PAGE_SIZE, sz);
497 
498 	obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
499 	if (IS_ERR(obj))
500 		return PTR_ERR(obj);
501 
502 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
503 	if (IS_ERR(vaddr)) {
504 		err = PTR_ERR(vaddr);
505 		goto out_put;
506 	}
507 
508 	/* Put the pages into a known state -- from the gpu for added fun */
509 	intel_engine_pm_get(engine);
510 	err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
511 	intel_engine_pm_put(engine);
512 	if (err)
513 		goto out_unpin;
514 
515 	i915_gem_object_lock(obj);
516 	err = i915_gem_object_set_to_wc_domain(obj, true);
517 	i915_gem_object_unlock(obj);
518 	if (err)
519 		goto out_unpin;
520 
521 	count = ARRAY_SIZE(bytes);
522 	order = i915_random_order(count * count, &prng);
523 	if (!order) {
524 		err = -ENOMEM;
525 		goto out_unpin;
526 	}
527 
528 	/* We want to throw in a random width/align */
529 	bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
530 				     sizeof(u32));
531 
532 	i = 0;
533 	do {
534 		u32 offset;
535 		u32 align;
536 		u32 dword;
537 		u32 size;
538 		u32 val;
539 
540 		size = bytes[order[i] % count];
541 		i = (i + 1) % (count * count);
542 
543 		align = bytes[order[i] % count];
544 		i = (i + 1) % (count * count);
545 
546 		align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
547 
548 		offset = igt_random_offset(&prng, 0, obj->base.size,
549 					   size, align);
550 
551 		val = prandom_u32_state(&prng);
552 		memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
553 			 size / sizeof(u32));
554 
555 		/*
556 		 * Sample random dw -- don't waste precious time reading every
557 		 * single dw.
558 		 */
559 		dword = igt_random_offset(&prng, offset,
560 					  offset + size,
561 					  sizeof(u32), sizeof(u32));
562 		dword /= sizeof(u32);
563 		if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
564 			pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
565 			       __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
566 			       size, align, offset);
567 			err = -EINVAL;
568 			break;
569 		}
570 	} while (!__igt_timeout(end_time, NULL));
571 
572 out_unpin:
573 	i915_gem_object_unpin_map(obj);
574 out_put:
575 	i915_gem_object_put(obj);
576 
577 	return err;
578 }
579 
intel_memory_region_mock_selftests(void)580 int intel_memory_region_mock_selftests(void)
581 {
582 	static const struct i915_subtest tests[] = {
583 		SUBTEST(igt_mock_fill),
584 		SUBTEST(igt_mock_contiguous),
585 	};
586 	struct intel_memory_region *mem;
587 	struct drm_i915_private *i915;
588 	int err;
589 
590 	i915 = mock_gem_device();
591 	if (!i915)
592 		return -ENOMEM;
593 
594 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
595 	if (IS_ERR(mem)) {
596 		pr_err("failed to create memory region\n");
597 		err = PTR_ERR(mem);
598 		goto out_unref;
599 	}
600 
601 	err = i915_subtests(tests, mem);
602 
603 	intel_memory_region_put(mem);
604 out_unref:
605 	drm_dev_put(&i915->drm);
606 	return err;
607 }
608 
intel_memory_region_live_selftests(struct drm_i915_private * i915)609 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
610 {
611 	static const struct i915_subtest tests[] = {
612 		SUBTEST(igt_lmem_create),
613 		SUBTEST(igt_lmem_write_cpu),
614 		SUBTEST(igt_lmem_write_gpu),
615 	};
616 
617 	if (!HAS_LMEM(i915)) {
618 		pr_info("device lacks LMEM support, skipping\n");
619 		return 0;
620 	}
621 
622 	if (intel_gt_is_wedged(&i915->gt))
623 		return 0;
624 
625 	return i915_live_subtests(tests, i915);
626 }
627