xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/selftests/i915_gem_gtt.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: i915_gem_gtt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2016 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
29 
30 #include <linux/list_sort.h>
31 #include <linux/prime_numbers.h>
32 
33 #include "gem/i915_gem_context.h"
34 #include "gem/selftests/mock_context.h"
35 #include "gt/intel_context.h"
36 
37 #include "i915_random.h"
38 #include "i915_selftest.h"
39 
40 #include "mock_drm.h"
41 #include "mock_gem_device.h"
42 #include "mock_gtt.h"
43 #include "igt_flush_test.h"
44 
cleanup_freed_objects(struct drm_i915_private * i915)45 static void cleanup_freed_objects(struct drm_i915_private *i915)
46 {
47 	i915_gem_drain_freed_objects(i915);
48 }
49 
fake_free_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)50 static void fake_free_pages(struct drm_i915_gem_object *obj,
51 			    struct sg_table *pages)
52 {
53 	sg_free_table(pages);
54 	kfree(pages);
55 }
56 
fake_get_pages(struct drm_i915_gem_object * obj)57 static int fake_get_pages(struct drm_i915_gem_object *obj)
58 {
59 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
60 #define PFN_BIAS 0x1000
61 	struct sg_table *pages;
62 	struct scatterlist *sg;
63 	unsigned int sg_page_sizes;
64 	typeof(obj->base.size) rem;
65 
66 	pages = kmalloc(sizeof(*pages), GFP);
67 	if (!pages)
68 		return -ENOMEM;
69 
70 	rem = round_up(obj->base.size, BIT(31)) >> 31;
71 	if (sg_alloc_table(pages, rem, GFP)) {
72 		kfree(pages);
73 		return -ENOMEM;
74 	}
75 
76 	sg_page_sizes = 0;
77 	rem = obj->base.size;
78 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
79 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
80 
81 		GEM_BUG_ON(!len);
82 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
83 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
84 		sg_dma_len(sg) = len;
85 		sg_page_sizes |= len;
86 
87 		rem -= len;
88 	}
89 	GEM_BUG_ON(rem);
90 
91 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
92 
93 	return 0;
94 #undef GFP
95 }
96 
fake_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)97 static void fake_put_pages(struct drm_i915_gem_object *obj,
98 			   struct sg_table *pages)
99 {
100 	fake_free_pages(obj, pages);
101 	obj->mm.dirty = false;
102 }
103 
104 static const struct drm_i915_gem_object_ops fake_ops = {
105 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
106 	.get_pages = fake_get_pages,
107 	.put_pages = fake_put_pages,
108 };
109 
110 static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private * i915,u64 size)111 fake_dma_object(struct drm_i915_private *i915, u64 size)
112 {
113 	static struct lock_class_key lock_class;
114 	struct drm_i915_gem_object *obj;
115 
116 	GEM_BUG_ON(!size);
117 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
118 
119 	if (overflows_type(size, obj->base.size))
120 		return ERR_PTR(-E2BIG);
121 
122 	obj = i915_gem_object_alloc();
123 	if (!obj)
124 		goto err;
125 
126 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
127 	i915_gem_object_init(obj, &fake_ops, &lock_class);
128 
129 	i915_gem_object_set_volatile(obj);
130 
131 	obj->write_domain = I915_GEM_DOMAIN_CPU;
132 	obj->read_domains = I915_GEM_DOMAIN_CPU;
133 	obj->cache_level = I915_CACHE_NONE;
134 
135 	/* Preallocate the "backing storage" */
136 	if (i915_gem_object_pin_pages(obj))
137 		goto err_obj;
138 
139 	i915_gem_object_unpin_pages(obj);
140 	return obj;
141 
142 err_obj:
143 	i915_gem_object_put(obj);
144 err:
145 	return ERR_PTR(-ENOMEM);
146 }
147 
igt_ppgtt_alloc(void * arg)148 static int igt_ppgtt_alloc(void *arg)
149 {
150 	struct drm_i915_private *dev_priv = arg;
151 	struct i915_ppgtt *ppgtt;
152 	u64 size, last, limit;
153 	int err = 0;
154 
155 	/* Allocate a ppggt and try to fill the entire range */
156 
157 	if (!HAS_PPGTT(dev_priv))
158 		return 0;
159 
160 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
161 	if (IS_ERR(ppgtt))
162 		return PTR_ERR(ppgtt);
163 
164 	if (!ppgtt->vm.allocate_va_range)
165 		goto err_ppgtt_cleanup;
166 
167 	/*
168 	 * While we only allocate the page tables here and so we could
169 	 * address a much larger GTT than we could actually fit into
170 	 * RAM, a practical limit is the amount of physical pages in the system.
171 	 * This should ensure that we do not run into the oomkiller during
172 	 * the test and take down the machine wilfully.
173 	 */
174 	limit = totalram_pages() << PAGE_SHIFT;
175 	limit = min(ppgtt->vm.total, limit);
176 
177 	/* Check we can allocate the entire range */
178 	for (size = 4096; size <= limit; size <<= 2) {
179 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
180 		if (err) {
181 			if (err == -ENOMEM) {
182 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
183 					size, ilog2(size));
184 				err = 0; /* virtual space too large! */
185 			}
186 			goto err_ppgtt_cleanup;
187 		}
188 
189 		cond_resched();
190 
191 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
192 	}
193 
194 	/* Check we can incrementally allocate the entire range */
195 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
196 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
197 						  last, size - last);
198 		if (err) {
199 			if (err == -ENOMEM) {
200 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
201 					last, size - last, ilog2(size));
202 				err = 0; /* virtual space too large! */
203 			}
204 			goto err_ppgtt_cleanup;
205 		}
206 
207 		cond_resched();
208 	}
209 
210 err_ppgtt_cleanup:
211 	i915_vm_put(&ppgtt->vm);
212 	return err;
213 }
214 
lowlevel_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)215 static int lowlevel_hole(struct i915_address_space *vm,
216 			 u64 hole_start, u64 hole_end,
217 			 unsigned long end_time)
218 {
219 	I915_RND_STATE(seed_prng);
220 	struct i915_vma *mock_vma;
221 	unsigned int size;
222 
223 	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
224 	if (!mock_vma)
225 		return -ENOMEM;
226 
227 	/* Keep creating larger objects until one cannot fit into the hole */
228 	for (size = 12; (hole_end - hole_start) >> size; size++) {
229 		I915_RND_SUBSTATE(prng, seed_prng);
230 		struct drm_i915_gem_object *obj;
231 		unsigned int *order, count, n;
232 		u64 hole_size;
233 
234 		hole_size = (hole_end - hole_start) >> size;
235 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
236 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
237 		count = hole_size >> 1;
238 		if (!count) {
239 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
240 				 __func__, hole_start, hole_end, size, hole_size);
241 			break;
242 		}
243 
244 		do {
245 			order = i915_random_order(count, &prng);
246 			if (order)
247 				break;
248 		} while (count >>= 1);
249 		if (!count) {
250 			kfree(mock_vma);
251 			return -ENOMEM;
252 		}
253 		GEM_BUG_ON(!order);
254 
255 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
256 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
257 
258 		/* Ignore allocation failures (i.e. don't report them as
259 		 * a test failure) as we are purposefully allocating very
260 		 * large objects without checking that we have sufficient
261 		 * memory. We expect to hit -ENOMEM.
262 		 */
263 
264 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
265 		if (IS_ERR(obj)) {
266 			kfree(order);
267 			break;
268 		}
269 
270 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
271 
272 		if (i915_gem_object_pin_pages(obj)) {
273 			i915_gem_object_put(obj);
274 			kfree(order);
275 			break;
276 		}
277 
278 		for (n = 0; n < count; n++) {
279 			u64 addr = hole_start + order[n] * BIT_ULL(size);
280 			intel_wakeref_t wakeref;
281 
282 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
283 
284 			if (igt_timeout(end_time,
285 					"%s timed out before %d/%d\n",
286 					__func__, n, count)) {
287 				hole_end = hole_start; /* quit */
288 				break;
289 			}
290 
291 			if (vm->allocate_va_range &&
292 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
293 				break;
294 
295 			mock_vma->pages = obj->mm.pages;
296 			mock_vma->node.size = BIT_ULL(size);
297 			mock_vma->node.start = addr;
298 
299 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
300 				vm->insert_entries(vm, mock_vma,
301 						   I915_CACHE_NONE, 0);
302 		}
303 		count = n;
304 
305 		i915_random_reorder(order, count, &prng);
306 		for (n = 0; n < count; n++) {
307 			u64 addr = hole_start + order[n] * BIT_ULL(size);
308 			intel_wakeref_t wakeref;
309 
310 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
311 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
312 				vm->clear_range(vm, addr, BIT_ULL(size));
313 		}
314 
315 		i915_gem_object_unpin_pages(obj);
316 		i915_gem_object_put(obj);
317 
318 		kfree(order);
319 
320 		cleanup_freed_objects(vm->i915);
321 	}
322 
323 	kfree(mock_vma);
324 	return 0;
325 }
326 
close_object_list(struct list_head * objects,struct i915_address_space * vm)327 static void close_object_list(struct list_head *objects,
328 			      struct i915_address_space *vm)
329 {
330 	struct drm_i915_gem_object *obj, *on;
331 	int ignored;
332 
333 	list_for_each_entry_safe(obj, on, objects, st_link) {
334 		struct i915_vma *vma;
335 
336 		vma = i915_vma_instance(obj, vm, NULL);
337 		if (!IS_ERR(vma))
338 			ignored = i915_vma_unbind(vma);
339 		/* Only ppgtt vma may be closed before the object is freed */
340 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
341 			i915_vma_close(vma);
342 
343 		list_del(&obj->st_link);
344 		i915_gem_object_put(obj);
345 	}
346 }
347 
fill_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)348 static int fill_hole(struct i915_address_space *vm,
349 		     u64 hole_start, u64 hole_end,
350 		     unsigned long end_time)
351 {
352 	const u64 hole_size = hole_end - hole_start;
353 	struct drm_i915_gem_object *obj;
354 	const unsigned long max_pages =
355 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
356 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
357 	unsigned long npages, prime, flags;
358 	struct i915_vma *vma;
359 	LIST_HEAD(objects);
360 	int err;
361 
362 	/* Try binding many VMA working inwards from either edge */
363 
364 	flags = PIN_OFFSET_FIXED | PIN_USER;
365 	if (i915_is_ggtt(vm))
366 		flags |= PIN_GLOBAL;
367 
368 	for_each_prime_number_from(prime, 2, max_step) {
369 		for (npages = 1; npages <= max_pages; npages *= prime) {
370 			const u64 full_size = npages << PAGE_SHIFT;
371 			const struct {
372 				const char *name;
373 				u64 offset;
374 				int step;
375 			} phases[] = {
376 				{ "top-down", hole_end, -1, },
377 				{ "bottom-up", hole_start, 1, },
378 				{ }
379 			}, *p;
380 
381 			obj = fake_dma_object(vm->i915, full_size);
382 			if (IS_ERR(obj))
383 				break;
384 
385 			list_add(&obj->st_link, &objects);
386 
387 			/* Align differing sized objects against the edges, and
388 			 * check we don't walk off into the void when binding
389 			 * them into the GTT.
390 			 */
391 			for (p = phases; p->name; p++) {
392 				u64 offset;
393 
394 				offset = p->offset;
395 				list_for_each_entry(obj, &objects, st_link) {
396 					vma = i915_vma_instance(obj, vm, NULL);
397 					if (IS_ERR(vma))
398 						continue;
399 
400 					if (p->step < 0) {
401 						if (offset < hole_start + obj->base.size)
402 							break;
403 						offset -= obj->base.size;
404 					}
405 
406 					err = i915_vma_pin(vma, 0, 0, offset | flags);
407 					if (err) {
408 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
409 						       __func__, p->name, err, npages, prime, offset);
410 						goto err;
411 					}
412 
413 					if (!drm_mm_node_allocated(&vma->node) ||
414 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
415 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
416 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
417 						       offset);
418 						err = -EINVAL;
419 						goto err;
420 					}
421 
422 					i915_vma_unpin(vma);
423 
424 					if (p->step > 0) {
425 						if (offset + obj->base.size > hole_end)
426 							break;
427 						offset += obj->base.size;
428 					}
429 				}
430 
431 				offset = p->offset;
432 				list_for_each_entry(obj, &objects, st_link) {
433 					vma = i915_vma_instance(obj, vm, NULL);
434 					if (IS_ERR(vma))
435 						continue;
436 
437 					if (p->step < 0) {
438 						if (offset < hole_start + obj->base.size)
439 							break;
440 						offset -= obj->base.size;
441 					}
442 
443 					if (!drm_mm_node_allocated(&vma->node) ||
444 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
445 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
446 						       __func__, p->name, vma->node.start, vma->node.size,
447 						       offset);
448 						err = -EINVAL;
449 						goto err;
450 					}
451 
452 					err = i915_vma_unbind(vma);
453 					if (err) {
454 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
455 						       __func__, p->name, vma->node.start, vma->node.size,
456 						       err);
457 						goto err;
458 					}
459 
460 					if (p->step > 0) {
461 						if (offset + obj->base.size > hole_end)
462 							break;
463 						offset += obj->base.size;
464 					}
465 				}
466 
467 				offset = p->offset;
468 				list_for_each_entry_reverse(obj, &objects, st_link) {
469 					vma = i915_vma_instance(obj, vm, NULL);
470 					if (IS_ERR(vma))
471 						continue;
472 
473 					if (p->step < 0) {
474 						if (offset < hole_start + obj->base.size)
475 							break;
476 						offset -= obj->base.size;
477 					}
478 
479 					err = i915_vma_pin(vma, 0, 0, offset | flags);
480 					if (err) {
481 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
482 						       __func__, p->name, err, npages, prime, offset);
483 						goto err;
484 					}
485 
486 					if (!drm_mm_node_allocated(&vma->node) ||
487 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
488 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
489 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
490 						       offset);
491 						err = -EINVAL;
492 						goto err;
493 					}
494 
495 					i915_vma_unpin(vma);
496 
497 					if (p->step > 0) {
498 						if (offset + obj->base.size > hole_end)
499 							break;
500 						offset += obj->base.size;
501 					}
502 				}
503 
504 				offset = p->offset;
505 				list_for_each_entry_reverse(obj, &objects, st_link) {
506 					vma = i915_vma_instance(obj, vm, NULL);
507 					if (IS_ERR(vma))
508 						continue;
509 
510 					if (p->step < 0) {
511 						if (offset < hole_start + obj->base.size)
512 							break;
513 						offset -= obj->base.size;
514 					}
515 
516 					if (!drm_mm_node_allocated(&vma->node) ||
517 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
518 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
519 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
520 						       offset);
521 						err = -EINVAL;
522 						goto err;
523 					}
524 
525 					err = i915_vma_unbind(vma);
526 					if (err) {
527 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
528 						       __func__, p->name, vma->node.start, vma->node.size,
529 						       err);
530 						goto err;
531 					}
532 
533 					if (p->step > 0) {
534 						if (offset + obj->base.size > hole_end)
535 							break;
536 						offset += obj->base.size;
537 					}
538 				}
539 			}
540 
541 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
542 					__func__, npages, prime)) {
543 				err = -EINTR;
544 				goto err;
545 			}
546 		}
547 
548 		close_object_list(&objects, vm);
549 		cleanup_freed_objects(vm->i915);
550 	}
551 
552 	return 0;
553 
554 err:
555 	close_object_list(&objects, vm);
556 	return err;
557 }
558 
walk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)559 static int walk_hole(struct i915_address_space *vm,
560 		     u64 hole_start, u64 hole_end,
561 		     unsigned long end_time)
562 {
563 	const u64 hole_size = hole_end - hole_start;
564 	const unsigned long max_pages =
565 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
566 	unsigned long flags;
567 	u64 size;
568 
569 	/* Try binding a single VMA in different positions within the hole */
570 
571 	flags = PIN_OFFSET_FIXED | PIN_USER;
572 	if (i915_is_ggtt(vm))
573 		flags |= PIN_GLOBAL;
574 
575 	for_each_prime_number_from(size, 1, max_pages) {
576 		struct drm_i915_gem_object *obj;
577 		struct i915_vma *vma;
578 		u64 addr;
579 		int err = 0;
580 
581 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
582 		if (IS_ERR(obj))
583 			break;
584 
585 		vma = i915_vma_instance(obj, vm, NULL);
586 		if (IS_ERR(vma)) {
587 			err = PTR_ERR(vma);
588 			goto err_put;
589 		}
590 
591 		for (addr = hole_start;
592 		     addr + obj->base.size < hole_end;
593 		     addr += obj->base.size) {
594 			err = i915_vma_pin(vma, 0, 0, addr | flags);
595 			if (err) {
596 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
597 				       __func__, addr, vma->size,
598 				       hole_start, hole_end, err);
599 				goto err_close;
600 			}
601 			i915_vma_unpin(vma);
602 
603 			if (!drm_mm_node_allocated(&vma->node) ||
604 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
605 				pr_err("%s incorrect at %llx + %llx\n",
606 				       __func__, addr, vma->size);
607 				err = -EINVAL;
608 				goto err_close;
609 			}
610 
611 			err = i915_vma_unbind(vma);
612 			if (err) {
613 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
614 				       __func__, addr, vma->size, err);
615 				goto err_close;
616 			}
617 
618 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
619 
620 			if (igt_timeout(end_time,
621 					"%s timed out at %llx\n",
622 					__func__, addr)) {
623 				err = -EINTR;
624 				goto err_close;
625 			}
626 		}
627 
628 err_close:
629 		if (!i915_vma_is_ggtt(vma))
630 			i915_vma_close(vma);
631 err_put:
632 		i915_gem_object_put(obj);
633 		if (err)
634 			return err;
635 
636 		cleanup_freed_objects(vm->i915);
637 	}
638 
639 	return 0;
640 }
641 
pot_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)642 static int pot_hole(struct i915_address_space *vm,
643 		    u64 hole_start, u64 hole_end,
644 		    unsigned long end_time)
645 {
646 	struct drm_i915_gem_object *obj;
647 	struct i915_vma *vma;
648 	unsigned long flags;
649 	unsigned int pot;
650 	int err = 0;
651 
652 	flags = PIN_OFFSET_FIXED | PIN_USER;
653 	if (i915_is_ggtt(vm))
654 		flags |= PIN_GLOBAL;
655 
656 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
657 	if (IS_ERR(obj))
658 		return PTR_ERR(obj);
659 
660 	vma = i915_vma_instance(obj, vm, NULL);
661 	if (IS_ERR(vma)) {
662 		err = PTR_ERR(vma);
663 		goto err_obj;
664 	}
665 
666 	/* Insert a pair of pages across every pot boundary within the hole */
667 	for (pot = fls64(hole_end - 1) - 1;
668 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
669 	     pot--) {
670 		u64 step = BIT_ULL(pot);
671 		u64 addr;
672 
673 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
674 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
675 		     addr += step) {
676 			err = i915_vma_pin(vma, 0, 0, addr | flags);
677 			if (err) {
678 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
679 				       __func__,
680 				       addr,
681 				       hole_start, hole_end,
682 				       err);
683 				goto err;
684 			}
685 
686 			if (!drm_mm_node_allocated(&vma->node) ||
687 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
688 				pr_err("%s incorrect at %llx + %llx\n",
689 				       __func__, addr, vma->size);
690 				i915_vma_unpin(vma);
691 				err = i915_vma_unbind(vma);
692 				err = -EINVAL;
693 				goto err;
694 			}
695 
696 			i915_vma_unpin(vma);
697 			err = i915_vma_unbind(vma);
698 			GEM_BUG_ON(err);
699 		}
700 
701 		if (igt_timeout(end_time,
702 				"%s timed out after %d/%d\n",
703 				__func__, pot, fls64(hole_end - 1) - 1)) {
704 			err = -EINTR;
705 			goto err;
706 		}
707 	}
708 
709 err:
710 	if (!i915_vma_is_ggtt(vma))
711 		i915_vma_close(vma);
712 err_obj:
713 	i915_gem_object_put(obj);
714 	return err;
715 }
716 
drunk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)717 static int drunk_hole(struct i915_address_space *vm,
718 		      u64 hole_start, u64 hole_end,
719 		      unsigned long end_time)
720 {
721 	I915_RND_STATE(prng);
722 	unsigned int size;
723 	unsigned long flags;
724 
725 	flags = PIN_OFFSET_FIXED | PIN_USER;
726 	if (i915_is_ggtt(vm))
727 		flags |= PIN_GLOBAL;
728 
729 	/* Keep creating larger objects until one cannot fit into the hole */
730 	for (size = 12; (hole_end - hole_start) >> size; size++) {
731 		struct drm_i915_gem_object *obj;
732 		unsigned int *order, count, n;
733 		struct i915_vma *vma;
734 		u64 hole_size;
735 		int err = -ENODEV;
736 
737 		hole_size = (hole_end - hole_start) >> size;
738 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
739 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
740 		count = hole_size >> 1;
741 		if (!count) {
742 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
743 				 __func__, hole_start, hole_end, size, hole_size);
744 			break;
745 		}
746 
747 		do {
748 			order = i915_random_order(count, &prng);
749 			if (order)
750 				break;
751 		} while (count >>= 1);
752 		if (!count)
753 			return -ENOMEM;
754 		GEM_BUG_ON(!order);
755 
756 		/* Ignore allocation failures (i.e. don't report them as
757 		 * a test failure) as we are purposefully allocating very
758 		 * large objects without checking that we have sufficient
759 		 * memory. We expect to hit -ENOMEM.
760 		 */
761 
762 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
763 		if (IS_ERR(obj)) {
764 			kfree(order);
765 			break;
766 		}
767 
768 		vma = i915_vma_instance(obj, vm, NULL);
769 		if (IS_ERR(vma)) {
770 			err = PTR_ERR(vma);
771 			goto err_obj;
772 		}
773 
774 		GEM_BUG_ON(vma->size != BIT_ULL(size));
775 
776 		for (n = 0; n < count; n++) {
777 			u64 addr = hole_start + order[n] * BIT_ULL(size);
778 
779 			err = i915_vma_pin(vma, 0, 0, addr | flags);
780 			if (err) {
781 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
782 				       __func__,
783 				       addr, BIT_ULL(size),
784 				       hole_start, hole_end,
785 				       err);
786 				goto err;
787 			}
788 
789 			if (!drm_mm_node_allocated(&vma->node) ||
790 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
791 				pr_err("%s incorrect at %llx + %llx\n",
792 				       __func__, addr, BIT_ULL(size));
793 				i915_vma_unpin(vma);
794 				err = i915_vma_unbind(vma);
795 				err = -EINVAL;
796 				goto err;
797 			}
798 
799 			i915_vma_unpin(vma);
800 			err = i915_vma_unbind(vma);
801 			GEM_BUG_ON(err);
802 
803 			if (igt_timeout(end_time,
804 					"%s timed out after %d/%d\n",
805 					__func__, n, count)) {
806 				err = -EINTR;
807 				goto err;
808 			}
809 		}
810 
811 err:
812 		if (!i915_vma_is_ggtt(vma))
813 			i915_vma_close(vma);
814 err_obj:
815 		i915_gem_object_put(obj);
816 		kfree(order);
817 		if (err)
818 			return err;
819 
820 		cleanup_freed_objects(vm->i915);
821 	}
822 
823 	return 0;
824 }
825 
__shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)826 static int __shrink_hole(struct i915_address_space *vm,
827 			 u64 hole_start, u64 hole_end,
828 			 unsigned long end_time)
829 {
830 	struct drm_i915_gem_object *obj;
831 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
832 	unsigned int order = 12;
833 	LIST_HEAD(objects);
834 	int err = 0;
835 	u64 addr;
836 
837 	/* Keep creating larger objects until one cannot fit into the hole */
838 	for (addr = hole_start; addr < hole_end; ) {
839 		struct i915_vma *vma;
840 		u64 size = BIT_ULL(order++);
841 
842 		size = min(size, hole_end - addr);
843 		obj = fake_dma_object(vm->i915, size);
844 		if (IS_ERR(obj)) {
845 			err = PTR_ERR(obj);
846 			break;
847 		}
848 
849 		list_add(&obj->st_link, &objects);
850 
851 		vma = i915_vma_instance(obj, vm, NULL);
852 		if (IS_ERR(vma)) {
853 			err = PTR_ERR(vma);
854 			break;
855 		}
856 
857 		GEM_BUG_ON(vma->size != size);
858 
859 		err = i915_vma_pin(vma, 0, 0, addr | flags);
860 		if (err) {
861 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
862 			       __func__, addr, size, hole_start, hole_end, err);
863 			break;
864 		}
865 
866 		if (!drm_mm_node_allocated(&vma->node) ||
867 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
868 			pr_err("%s incorrect at %llx + %llx\n",
869 			       __func__, addr, size);
870 			i915_vma_unpin(vma);
871 			err = i915_vma_unbind(vma);
872 			err = -EINVAL;
873 			break;
874 		}
875 
876 		i915_vma_unpin(vma);
877 		addr += size;
878 
879 		/*
880 		 * Since we are injecting allocation faults at random intervals,
881 		 * wait for this allocation to complete before we change the
882 		 * faultinjection.
883 		 */
884 		err = i915_vma_sync(vma);
885 		if (err)
886 			break;
887 
888 		if (igt_timeout(end_time,
889 				"%s timed out at ofset %llx [%llx - %llx]\n",
890 				__func__, addr, hole_start, hole_end)) {
891 			err = -EINTR;
892 			break;
893 		}
894 	}
895 
896 	close_object_list(&objects, vm);
897 	cleanup_freed_objects(vm->i915);
898 	return err;
899 }
900 
shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)901 static int shrink_hole(struct i915_address_space *vm,
902 		       u64 hole_start, u64 hole_end,
903 		       unsigned long end_time)
904 {
905 	unsigned long prime;
906 	int err;
907 
908 	vm->fault_attr.probability = 999;
909 	atomic_set(&vm->fault_attr.times, -1);
910 
911 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
912 		vm->fault_attr.interval = prime;
913 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
914 		if (err)
915 			break;
916 	}
917 
918 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
919 
920 	return err;
921 }
922 
shrink_boom(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)923 static int shrink_boom(struct i915_address_space *vm,
924 		       u64 hole_start, u64 hole_end,
925 		       unsigned long end_time)
926 {
927 	unsigned int sizes[] = { SZ_2M, SZ_1G };
928 	struct drm_i915_gem_object *purge;
929 	struct drm_i915_gem_object *explode;
930 	int err;
931 	int i;
932 
933 	/*
934 	 * Catch the case which shrink_hole seems to miss. The setup here
935 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
936 	 * ensuring that all vma assiocated with the respective pd/pdp are
937 	 * unpinned at the time.
938 	 */
939 
940 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
941 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
942 		unsigned int size = sizes[i];
943 		struct i915_vma *vma;
944 
945 		purge = fake_dma_object(vm->i915, size);
946 		if (IS_ERR(purge))
947 			return PTR_ERR(purge);
948 
949 		vma = i915_vma_instance(purge, vm, NULL);
950 		if (IS_ERR(vma)) {
951 			err = PTR_ERR(vma);
952 			goto err_purge;
953 		}
954 
955 		err = i915_vma_pin(vma, 0, 0, flags);
956 		if (err)
957 			goto err_purge;
958 
959 		/* Should now be ripe for purging */
960 		i915_vma_unpin(vma);
961 
962 		explode = fake_dma_object(vm->i915, size);
963 		if (IS_ERR(explode)) {
964 			err = PTR_ERR(explode);
965 			goto err_purge;
966 		}
967 
968 		vm->fault_attr.probability = 100;
969 		vm->fault_attr.interval = 1;
970 		atomic_set(&vm->fault_attr.times, -1);
971 
972 		vma = i915_vma_instance(explode, vm, NULL);
973 		if (IS_ERR(vma)) {
974 			err = PTR_ERR(vma);
975 			goto err_explode;
976 		}
977 
978 		err = i915_vma_pin(vma, 0, 0, flags | size);
979 		if (err)
980 			goto err_explode;
981 
982 		i915_vma_unpin(vma);
983 
984 		i915_gem_object_put(purge);
985 		i915_gem_object_put(explode);
986 
987 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
988 		cleanup_freed_objects(vm->i915);
989 	}
990 
991 	return 0;
992 
993 err_explode:
994 	i915_gem_object_put(explode);
995 err_purge:
996 	i915_gem_object_put(purge);
997 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
998 	return err;
999 }
1000 
exercise_ppgtt(struct drm_i915_private * dev_priv,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1001 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1002 			  int (*func)(struct i915_address_space *vm,
1003 				      u64 hole_start, u64 hole_end,
1004 				      unsigned long end_time))
1005 {
1006 	struct i915_ppgtt *ppgtt;
1007 	IGT_TIMEOUT(end_time);
1008 	struct file *file;
1009 	int err;
1010 
1011 	if (!HAS_FULL_PPGTT(dev_priv))
1012 		return 0;
1013 
1014 	file = mock_file(dev_priv);
1015 	if (IS_ERR(file))
1016 		return PTR_ERR(file);
1017 
1018 	ppgtt = i915_ppgtt_create(&dev_priv->gt);
1019 	if (IS_ERR(ppgtt)) {
1020 		err = PTR_ERR(ppgtt);
1021 		goto out_free;
1022 	}
1023 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1024 	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1025 
1026 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1027 
1028 	i915_vm_put(&ppgtt->vm);
1029 
1030 out_free:
1031 	fput(file);
1032 	return err;
1033 }
1034 
igt_ppgtt_fill(void * arg)1035 static int igt_ppgtt_fill(void *arg)
1036 {
1037 	return exercise_ppgtt(arg, fill_hole);
1038 }
1039 
igt_ppgtt_walk(void * arg)1040 static int igt_ppgtt_walk(void *arg)
1041 {
1042 	return exercise_ppgtt(arg, walk_hole);
1043 }
1044 
igt_ppgtt_pot(void * arg)1045 static int igt_ppgtt_pot(void *arg)
1046 {
1047 	return exercise_ppgtt(arg, pot_hole);
1048 }
1049 
igt_ppgtt_drunk(void * arg)1050 static int igt_ppgtt_drunk(void *arg)
1051 {
1052 	return exercise_ppgtt(arg, drunk_hole);
1053 }
1054 
igt_ppgtt_lowlevel(void * arg)1055 static int igt_ppgtt_lowlevel(void *arg)
1056 {
1057 	return exercise_ppgtt(arg, lowlevel_hole);
1058 }
1059 
igt_ppgtt_shrink(void * arg)1060 static int igt_ppgtt_shrink(void *arg)
1061 {
1062 	return exercise_ppgtt(arg, shrink_hole);
1063 }
1064 
igt_ppgtt_shrink_boom(void * arg)1065 static int igt_ppgtt_shrink_boom(void *arg)
1066 {
1067 	return exercise_ppgtt(arg, shrink_boom);
1068 }
1069 
sort_holes(void * priv,struct list_head * A,struct list_head * B)1070 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1071 {
1072 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1073 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1074 
1075 	if (a->start < b->start)
1076 		return -1;
1077 	else
1078 		return 1;
1079 }
1080 
exercise_ggtt(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1081 static int exercise_ggtt(struct drm_i915_private *i915,
1082 			 int (*func)(struct i915_address_space *vm,
1083 				     u64 hole_start, u64 hole_end,
1084 				     unsigned long end_time))
1085 {
1086 	struct i915_ggtt *ggtt = &i915->ggtt;
1087 	u64 hole_start, hole_end, last = 0;
1088 	struct drm_mm_node *node;
1089 	IGT_TIMEOUT(end_time);
1090 	int err = 0;
1091 
1092 restart:
1093 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1094 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1095 		if (hole_start < last)
1096 			continue;
1097 
1098 		if (ggtt->vm.mm.color_adjust)
1099 			ggtt->vm.mm.color_adjust(node, 0,
1100 						 &hole_start, &hole_end);
1101 		if (hole_start >= hole_end)
1102 			continue;
1103 
1104 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1105 		if (err)
1106 			break;
1107 
1108 		/* As we have manipulated the drm_mm, the list may be corrupt */
1109 		last = hole_end;
1110 		goto restart;
1111 	}
1112 
1113 	return err;
1114 }
1115 
igt_ggtt_fill(void * arg)1116 static int igt_ggtt_fill(void *arg)
1117 {
1118 	return exercise_ggtt(arg, fill_hole);
1119 }
1120 
igt_ggtt_walk(void * arg)1121 static int igt_ggtt_walk(void *arg)
1122 {
1123 	return exercise_ggtt(arg, walk_hole);
1124 }
1125 
igt_ggtt_pot(void * arg)1126 static int igt_ggtt_pot(void *arg)
1127 {
1128 	return exercise_ggtt(arg, pot_hole);
1129 }
1130 
igt_ggtt_drunk(void * arg)1131 static int igt_ggtt_drunk(void *arg)
1132 {
1133 	return exercise_ggtt(arg, drunk_hole);
1134 }
1135 
igt_ggtt_lowlevel(void * arg)1136 static int igt_ggtt_lowlevel(void *arg)
1137 {
1138 	return exercise_ggtt(arg, lowlevel_hole);
1139 }
1140 
igt_ggtt_page(void * arg)1141 static int igt_ggtt_page(void *arg)
1142 {
1143 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1144 	I915_RND_STATE(prng);
1145 	struct drm_i915_private *i915 = arg;
1146 	struct i915_ggtt *ggtt = &i915->ggtt;
1147 	struct drm_i915_gem_object *obj;
1148 	intel_wakeref_t wakeref;
1149 	struct drm_mm_node tmp;
1150 	unsigned int *order, n;
1151 	int err;
1152 
1153 	if (!i915_ggtt_has_aperture(ggtt))
1154 		return 0;
1155 
1156 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1157 	if (IS_ERR(obj))
1158 		return PTR_ERR(obj);
1159 
1160 	err = i915_gem_object_pin_pages(obj);
1161 	if (err)
1162 		goto out_free;
1163 
1164 	memset(&tmp, 0, sizeof(tmp));
1165 	mutex_lock(&ggtt->vm.mutex);
1166 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1167 					  count * PAGE_SIZE, 0,
1168 					  I915_COLOR_UNEVICTABLE,
1169 					  0, ggtt->mappable_end,
1170 					  DRM_MM_INSERT_LOW);
1171 	mutex_unlock(&ggtt->vm.mutex);
1172 	if (err)
1173 		goto out_unpin;
1174 
1175 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1176 
1177 	for (n = 0; n < count; n++) {
1178 		u64 offset = tmp.start + n * PAGE_SIZE;
1179 
1180 		ggtt->vm.insert_page(&ggtt->vm,
1181 				     i915_gem_object_get_dma_address(obj, 0),
1182 				     offset, I915_CACHE_NONE, 0);
1183 	}
1184 
1185 	order = i915_random_order(count, &prng);
1186 	if (!order) {
1187 		err = -ENOMEM;
1188 		goto out_remove;
1189 	}
1190 
1191 	for (n = 0; n < count; n++) {
1192 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1193 		u32 __iomem *vaddr;
1194 
1195 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1196 		iowrite32(n, vaddr + n);
1197 		io_mapping_unmap_atomic(vaddr);
1198 	}
1199 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1200 
1201 	i915_random_reorder(order, count, &prng);
1202 	for (n = 0; n < count; n++) {
1203 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1204 		u32 __iomem *vaddr;
1205 		u32 val;
1206 
1207 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1208 		val = ioread32(vaddr + n);
1209 		io_mapping_unmap_atomic(vaddr);
1210 
1211 		if (val != n) {
1212 			pr_err("insert page failed: found %d, expected %d\n",
1213 			       val, n);
1214 			err = -EINVAL;
1215 			break;
1216 		}
1217 	}
1218 
1219 	kfree(order);
1220 out_remove:
1221 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1222 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1223 	mutex_lock(&ggtt->vm.mutex);
1224 	drm_mm_remove_node(&tmp);
1225 	mutex_unlock(&ggtt->vm.mutex);
1226 out_unpin:
1227 	i915_gem_object_unpin_pages(obj);
1228 out_free:
1229 	i915_gem_object_put(obj);
1230 	return err;
1231 }
1232 
track_vma_bind(struct i915_vma * vma)1233 static void track_vma_bind(struct i915_vma *vma)
1234 {
1235 	struct drm_i915_gem_object *obj = vma->obj;
1236 
1237 	atomic_inc(&obj->bind_count); /* track for eviction later */
1238 	__i915_gem_object_pin_pages(obj);
1239 
1240 	GEM_BUG_ON(vma->pages);
1241 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1242 	__i915_gem_object_pin_pages(obj);
1243 	vma->pages = obj->mm.pages;
1244 
1245 	mutex_lock(&vma->vm->mutex);
1246 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1247 	mutex_unlock(&vma->vm->mutex);
1248 }
1249 
exercise_mock(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1250 static int exercise_mock(struct drm_i915_private *i915,
1251 			 int (*func)(struct i915_address_space *vm,
1252 				     u64 hole_start, u64 hole_end,
1253 				     unsigned long end_time))
1254 {
1255 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1256 	struct i915_address_space *vm;
1257 	struct i915_gem_context *ctx;
1258 	IGT_TIMEOUT(end_time);
1259 	int err;
1260 
1261 	ctx = mock_context(i915, "mock");
1262 	if (!ctx)
1263 		return -ENOMEM;
1264 
1265 	vm = i915_gem_context_get_vm_rcu(ctx);
1266 	err = func(vm, 0, min(vm->total, limit), end_time);
1267 	i915_vm_put(vm);
1268 
1269 	mock_context_close(ctx);
1270 	return err;
1271 }
1272 
igt_mock_fill(void * arg)1273 static int igt_mock_fill(void *arg)
1274 {
1275 	struct i915_ggtt *ggtt = arg;
1276 
1277 	return exercise_mock(ggtt->vm.i915, fill_hole);
1278 }
1279 
igt_mock_walk(void * arg)1280 static int igt_mock_walk(void *arg)
1281 {
1282 	struct i915_ggtt *ggtt = arg;
1283 
1284 	return exercise_mock(ggtt->vm.i915, walk_hole);
1285 }
1286 
igt_mock_pot(void * arg)1287 static int igt_mock_pot(void *arg)
1288 {
1289 	struct i915_ggtt *ggtt = arg;
1290 
1291 	return exercise_mock(ggtt->vm.i915, pot_hole);
1292 }
1293 
igt_mock_drunk(void * arg)1294 static int igt_mock_drunk(void *arg)
1295 {
1296 	struct i915_ggtt *ggtt = arg;
1297 
1298 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1299 }
1300 
igt_gtt_reserve(void * arg)1301 static int igt_gtt_reserve(void *arg)
1302 {
1303 	struct i915_ggtt *ggtt = arg;
1304 	struct drm_i915_gem_object *obj, *on;
1305 	I915_RND_STATE(prng);
1306 	LIST_HEAD(objects);
1307 	u64 total;
1308 	int err = -ENODEV;
1309 
1310 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1311 	 * for the node, and evicts if it has to. So our test checks that
1312 	 * it can give us the requsted space and prevent overlaps.
1313 	 */
1314 
1315 	/* Start by filling the GGTT */
1316 	for (total = 0;
1317 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1318 	     total += 2 * I915_GTT_PAGE_SIZE) {
1319 		struct i915_vma *vma;
1320 
1321 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1322 						      2 * PAGE_SIZE);
1323 		if (IS_ERR(obj)) {
1324 			err = PTR_ERR(obj);
1325 			goto out;
1326 		}
1327 
1328 		err = i915_gem_object_pin_pages(obj);
1329 		if (err) {
1330 			i915_gem_object_put(obj);
1331 			goto out;
1332 		}
1333 
1334 		list_add(&obj->st_link, &objects);
1335 
1336 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1337 		if (IS_ERR(vma)) {
1338 			err = PTR_ERR(vma);
1339 			goto out;
1340 		}
1341 
1342 		mutex_lock(&ggtt->vm.mutex);
1343 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1344 					   obj->base.size,
1345 					   total,
1346 					   obj->cache_level,
1347 					   0);
1348 		mutex_unlock(&ggtt->vm.mutex);
1349 		if (err) {
1350 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1351 			       total, ggtt->vm.total, err);
1352 			goto out;
1353 		}
1354 		track_vma_bind(vma);
1355 
1356 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1357 		if (vma->node.start != total ||
1358 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1359 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1360 			       vma->node.start, vma->node.size,
1361 			       total, 2*I915_GTT_PAGE_SIZE);
1362 			err = -EINVAL;
1363 			goto out;
1364 		}
1365 	}
1366 
1367 	/* Now we start forcing evictions */
1368 	for (total = I915_GTT_PAGE_SIZE;
1369 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1370 	     total += 2 * I915_GTT_PAGE_SIZE) {
1371 		struct i915_vma *vma;
1372 
1373 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1374 						      2 * PAGE_SIZE);
1375 		if (IS_ERR(obj)) {
1376 			err = PTR_ERR(obj);
1377 			goto out;
1378 		}
1379 
1380 		err = i915_gem_object_pin_pages(obj);
1381 		if (err) {
1382 			i915_gem_object_put(obj);
1383 			goto out;
1384 		}
1385 
1386 		list_add(&obj->st_link, &objects);
1387 
1388 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1389 		if (IS_ERR(vma)) {
1390 			err = PTR_ERR(vma);
1391 			goto out;
1392 		}
1393 
1394 		mutex_lock(&ggtt->vm.mutex);
1395 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1396 					   obj->base.size,
1397 					   total,
1398 					   obj->cache_level,
1399 					   0);
1400 		mutex_unlock(&ggtt->vm.mutex);
1401 		if (err) {
1402 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1403 			       total, ggtt->vm.total, err);
1404 			goto out;
1405 		}
1406 		track_vma_bind(vma);
1407 
1408 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1409 		if (vma->node.start != total ||
1410 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1411 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1412 			       vma->node.start, vma->node.size,
1413 			       total, 2*I915_GTT_PAGE_SIZE);
1414 			err = -EINVAL;
1415 			goto out;
1416 		}
1417 	}
1418 
1419 	/* And then try at random */
1420 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1421 		struct i915_vma *vma;
1422 		u64 offset;
1423 
1424 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1425 		if (IS_ERR(vma)) {
1426 			err = PTR_ERR(vma);
1427 			goto out;
1428 		}
1429 
1430 		err = i915_vma_unbind(vma);
1431 		if (err) {
1432 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1433 			goto out;
1434 		}
1435 
1436 		offset = igt_random_offset(&prng,
1437 					   0, ggtt->vm.total,
1438 					   2 * I915_GTT_PAGE_SIZE,
1439 					   I915_GTT_MIN_ALIGNMENT);
1440 
1441 		mutex_lock(&ggtt->vm.mutex);
1442 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1443 					   obj->base.size,
1444 					   offset,
1445 					   obj->cache_level,
1446 					   0);
1447 		mutex_unlock(&ggtt->vm.mutex);
1448 		if (err) {
1449 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1450 			       total, ggtt->vm.total, err);
1451 			goto out;
1452 		}
1453 		track_vma_bind(vma);
1454 
1455 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1456 		if (vma->node.start != offset ||
1457 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1458 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1459 			       vma->node.start, vma->node.size,
1460 			       offset, 2*I915_GTT_PAGE_SIZE);
1461 			err = -EINVAL;
1462 			goto out;
1463 		}
1464 	}
1465 
1466 out:
1467 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1468 		i915_gem_object_unpin_pages(obj);
1469 		i915_gem_object_put(obj);
1470 	}
1471 	return err;
1472 }
1473 
igt_gtt_insert(void * arg)1474 static int igt_gtt_insert(void *arg)
1475 {
1476 	struct i915_ggtt *ggtt = arg;
1477 	struct drm_i915_gem_object *obj, *on;
1478 	struct drm_mm_node tmp = {};
1479 	const struct invalid_insert {
1480 		u64 size;
1481 		u64 alignment;
1482 		u64 start, end;
1483 	} invalid_insert[] = {
1484 		{
1485 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1486 			0, ggtt->vm.total,
1487 		},
1488 		{
1489 			2*I915_GTT_PAGE_SIZE, 0,
1490 			0, I915_GTT_PAGE_SIZE,
1491 		},
1492 		{
1493 			-(u64)I915_GTT_PAGE_SIZE, 0,
1494 			0, 4*I915_GTT_PAGE_SIZE,
1495 		},
1496 		{
1497 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1498 			0, 4*I915_GTT_PAGE_SIZE,
1499 		},
1500 		{
1501 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1502 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1503 		},
1504 		{}
1505 	}, *ii;
1506 	LIST_HEAD(objects);
1507 	u64 total;
1508 	int err = -ENODEV;
1509 
1510 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1511 	 * to the node, evicting if required.
1512 	 */
1513 
1514 	/* Check a couple of obviously invalid requests */
1515 	for (ii = invalid_insert; ii->size; ii++) {
1516 		mutex_lock(&ggtt->vm.mutex);
1517 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1518 					  ii->size, ii->alignment,
1519 					  I915_COLOR_UNEVICTABLE,
1520 					  ii->start, ii->end,
1521 					  0);
1522 		mutex_unlock(&ggtt->vm.mutex);
1523 		if (err != -ENOSPC) {
1524 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1525 			       ii->size, ii->alignment, ii->start, ii->end,
1526 			       err);
1527 			return -EINVAL;
1528 		}
1529 	}
1530 
1531 	/* Start by filling the GGTT */
1532 	for (total = 0;
1533 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1534 	     total += I915_GTT_PAGE_SIZE) {
1535 		struct i915_vma *vma;
1536 
1537 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1538 						      I915_GTT_PAGE_SIZE);
1539 		if (IS_ERR(obj)) {
1540 			err = PTR_ERR(obj);
1541 			goto out;
1542 		}
1543 
1544 		err = i915_gem_object_pin_pages(obj);
1545 		if (err) {
1546 			i915_gem_object_put(obj);
1547 			goto out;
1548 		}
1549 
1550 		list_add(&obj->st_link, &objects);
1551 
1552 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1553 		if (IS_ERR(vma)) {
1554 			err = PTR_ERR(vma);
1555 			goto out;
1556 		}
1557 
1558 		mutex_lock(&ggtt->vm.mutex);
1559 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1560 					  obj->base.size, 0, obj->cache_level,
1561 					  0, ggtt->vm.total,
1562 					  0);
1563 		mutex_unlock(&ggtt->vm.mutex);
1564 		if (err == -ENOSPC) {
1565 			/* maxed out the GGTT space */
1566 			i915_gem_object_put(obj);
1567 			break;
1568 		}
1569 		if (err) {
1570 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1571 			       total, ggtt->vm.total, err);
1572 			goto out;
1573 		}
1574 		track_vma_bind(vma);
1575 		__i915_vma_pin(vma);
1576 
1577 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1578 	}
1579 
1580 	list_for_each_entry(obj, &objects, st_link) {
1581 		struct i915_vma *vma;
1582 
1583 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1584 		if (IS_ERR(vma)) {
1585 			err = PTR_ERR(vma);
1586 			goto out;
1587 		}
1588 
1589 		if (!drm_mm_node_allocated(&vma->node)) {
1590 			pr_err("VMA was unexpectedly evicted!\n");
1591 			err = -EINVAL;
1592 			goto out;
1593 		}
1594 
1595 		__i915_vma_unpin(vma);
1596 	}
1597 
1598 	/* If we then reinsert, we should find the same hole */
1599 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1600 		struct i915_vma *vma;
1601 		u64 offset;
1602 
1603 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1604 		if (IS_ERR(vma)) {
1605 			err = PTR_ERR(vma);
1606 			goto out;
1607 		}
1608 
1609 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1610 		offset = vma->node.start;
1611 
1612 		err = i915_vma_unbind(vma);
1613 		if (err) {
1614 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1615 			goto out;
1616 		}
1617 
1618 		mutex_lock(&ggtt->vm.mutex);
1619 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1620 					  obj->base.size, 0, obj->cache_level,
1621 					  0, ggtt->vm.total,
1622 					  0);
1623 		mutex_unlock(&ggtt->vm.mutex);
1624 		if (err) {
1625 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1626 			       total, ggtt->vm.total, err);
1627 			goto out;
1628 		}
1629 		track_vma_bind(vma);
1630 
1631 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1632 		if (vma->node.start != offset) {
1633 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1634 			       offset, vma->node.start);
1635 			err = -EINVAL;
1636 			goto out;
1637 		}
1638 	}
1639 
1640 	/* And then force evictions */
1641 	for (total = 0;
1642 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1643 	     total += 2 * I915_GTT_PAGE_SIZE) {
1644 		struct i915_vma *vma;
1645 
1646 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1647 						      2 * I915_GTT_PAGE_SIZE);
1648 		if (IS_ERR(obj)) {
1649 			err = PTR_ERR(obj);
1650 			goto out;
1651 		}
1652 
1653 		err = i915_gem_object_pin_pages(obj);
1654 		if (err) {
1655 			i915_gem_object_put(obj);
1656 			goto out;
1657 		}
1658 
1659 		list_add(&obj->st_link, &objects);
1660 
1661 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1662 		if (IS_ERR(vma)) {
1663 			err = PTR_ERR(vma);
1664 			goto out;
1665 		}
1666 
1667 		mutex_lock(&ggtt->vm.mutex);
1668 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1669 					  obj->base.size, 0, obj->cache_level,
1670 					  0, ggtt->vm.total,
1671 					  0);
1672 		mutex_unlock(&ggtt->vm.mutex);
1673 		if (err) {
1674 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1675 			       total, ggtt->vm.total, err);
1676 			goto out;
1677 		}
1678 		track_vma_bind(vma);
1679 
1680 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1681 	}
1682 
1683 out:
1684 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1685 		i915_gem_object_unpin_pages(obj);
1686 		i915_gem_object_put(obj);
1687 	}
1688 	return err;
1689 }
1690 
i915_gem_gtt_mock_selftests(void)1691 int i915_gem_gtt_mock_selftests(void)
1692 {
1693 	static const struct i915_subtest tests[] = {
1694 		SUBTEST(igt_mock_drunk),
1695 		SUBTEST(igt_mock_walk),
1696 		SUBTEST(igt_mock_pot),
1697 		SUBTEST(igt_mock_fill),
1698 		SUBTEST(igt_gtt_reserve),
1699 		SUBTEST(igt_gtt_insert),
1700 	};
1701 	struct drm_i915_private *i915;
1702 	struct i915_ggtt *ggtt;
1703 	int err;
1704 
1705 	i915 = mock_gem_device();
1706 	if (!i915)
1707 		return -ENOMEM;
1708 
1709 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1710 	if (!ggtt) {
1711 		err = -ENOMEM;
1712 		goto out_put;
1713 	}
1714 	mock_init_ggtt(i915, ggtt);
1715 
1716 	err = i915_subtests(tests, ggtt);
1717 
1718 	mock_device_flush(i915);
1719 	i915_gem_drain_freed_objects(i915);
1720 	mock_fini_ggtt(ggtt);
1721 	kfree(ggtt);
1722 out_put:
1723 	drm_dev_put(&i915->drm);
1724 	return err;
1725 }
1726 
context_sync(struct intel_context * ce)1727 static int context_sync(struct intel_context *ce)
1728 {
1729 	struct i915_request *rq;
1730 	long timeout;
1731 
1732 	rq = intel_context_create_request(ce);
1733 	if (IS_ERR(rq))
1734 		return PTR_ERR(rq);
1735 
1736 	i915_request_get(rq);
1737 	i915_request_add(rq);
1738 
1739 	timeout = i915_request_wait(rq, 0, HZ / 5);
1740 	i915_request_put(rq);
1741 
1742 	return timeout < 0 ? -EIO : 0;
1743 }
1744 
1745 static struct i915_request *
submit_batch(struct intel_context * ce,u64 addr)1746 submit_batch(struct intel_context *ce, u64 addr)
1747 {
1748 	struct i915_request *rq;
1749 	int err;
1750 
1751 	rq = intel_context_create_request(ce);
1752 	if (IS_ERR(rq))
1753 		return rq;
1754 
1755 	err = 0;
1756 	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1757 		err = rq->engine->emit_init_breadcrumb(rq);
1758 	if (err == 0)
1759 		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1760 
1761 	if (err == 0)
1762 		i915_request_get(rq);
1763 	i915_request_add(rq);
1764 
1765 	return err ? ERR_PTR(err) : rq;
1766 }
1767 
spinner(u32 * batch,int i)1768 static u32 *spinner(u32 *batch, int i)
1769 {
1770 	return batch + i * 64 / sizeof(*batch) + 4;
1771 }
1772 
end_spin(u32 * batch,int i)1773 static void end_spin(u32 *batch, int i)
1774 {
1775 	*spinner(batch, i) = MI_BATCH_BUFFER_END;
1776 	wmb();
1777 }
1778 
igt_cs_tlb(void * arg)1779 static int igt_cs_tlb(void *arg)
1780 {
1781 	const unsigned int count = PAGE_SIZE / 64;
1782 	const unsigned int chunk_size = count * PAGE_SIZE;
1783 	struct drm_i915_private *i915 = arg;
1784 	struct drm_i915_gem_object *bbe, *act, *out;
1785 	struct i915_gem_engines_iter it;
1786 	struct i915_address_space *vm;
1787 	struct i915_gem_context *ctx;
1788 	struct intel_context *ce;
1789 	struct i915_vma *vma;
1790 	I915_RND_STATE(prng);
1791 	struct file *file;
1792 	unsigned int i;
1793 	u32 *result;
1794 	u32 *batch;
1795 	int err = 0;
1796 
1797 	/*
1798 	 * Our mission here is to fool the hardware to execute something
1799 	 * from scratch as it has not seen the batch move (due to missing
1800 	 * the TLB invalidate).
1801 	 */
1802 
1803 	file = mock_file(i915);
1804 	if (IS_ERR(file))
1805 		return PTR_ERR(file);
1806 
1807 	ctx = live_context(i915, file);
1808 	if (IS_ERR(ctx)) {
1809 		err = PTR_ERR(ctx);
1810 		goto out_unlock;
1811 	}
1812 
1813 	vm = i915_gem_context_get_vm_rcu(ctx);
1814 	if (i915_is_ggtt(vm))
1815 		goto out_vm;
1816 
1817 	/* Create two pages; dummy we prefill the TLB, and intended */
1818 	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1819 	if (IS_ERR(bbe)) {
1820 		err = PTR_ERR(bbe);
1821 		goto out_vm;
1822 	}
1823 
1824 	batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1825 	if (IS_ERR(batch)) {
1826 		err = PTR_ERR(batch);
1827 		goto out_put_bbe;
1828 	}
1829 	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1830 	i915_gem_object_flush_map(bbe);
1831 	i915_gem_object_unpin_map(bbe);
1832 
1833 	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1834 	if (IS_ERR(act)) {
1835 		err = PTR_ERR(act);
1836 		goto out_put_bbe;
1837 	}
1838 
1839 	/* Track the execution of each request by writing into different slot */
1840 	batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1841 	if (IS_ERR(batch)) {
1842 		err = PTR_ERR(batch);
1843 		goto out_put_act;
1844 	}
1845 	for (i = 0; i < count; i++) {
1846 		u32 *cs = batch + i * 64 / sizeof(*cs);
1847 		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1848 
1849 		GEM_BUG_ON(INTEL_GEN(i915) < 6);
1850 		cs[0] = MI_STORE_DWORD_IMM_GEN4;
1851 		if (INTEL_GEN(i915) >= 8) {
1852 			cs[1] = lower_32_bits(addr);
1853 			cs[2] = upper_32_bits(addr);
1854 			cs[3] = i;
1855 			cs[4] = MI_NOOP;
1856 			cs[5] = MI_BATCH_BUFFER_START_GEN8;
1857 		} else {
1858 			cs[1] = 0;
1859 			cs[2] = lower_32_bits(addr);
1860 			cs[3] = i;
1861 			cs[4] = MI_NOOP;
1862 			cs[5] = MI_BATCH_BUFFER_START;
1863 		}
1864 	}
1865 
1866 	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1867 	if (IS_ERR(out)) {
1868 		err = PTR_ERR(out);
1869 		goto out_put_batch;
1870 	}
1871 	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1872 
1873 	vma = i915_vma_instance(out, vm, NULL);
1874 	if (IS_ERR(vma)) {
1875 		err = PTR_ERR(vma);
1876 		goto out_put_batch;
1877 	}
1878 
1879 	err = i915_vma_pin(vma, 0, 0,
1880 			   PIN_USER |
1881 			   PIN_OFFSET_FIXED |
1882 			   (vm->total - PAGE_SIZE));
1883 	if (err)
1884 		goto out_put_out;
1885 	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1886 
1887 	result = i915_gem_object_pin_map(out, I915_MAP_WB);
1888 	if (IS_ERR(result)) {
1889 		err = PTR_ERR(result);
1890 		goto out_put_out;
1891 	}
1892 
1893 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1894 		IGT_TIMEOUT(end_time);
1895 		unsigned long pass = 0;
1896 
1897 		if (!intel_engine_can_store_dword(ce->engine))
1898 			continue;
1899 
1900 		while (!__igt_timeout(end_time, NULL)) {
1901 			struct i915_request *rq;
1902 			u64 offset;
1903 
1904 			offset = igt_random_offset(&prng,
1905 						   0, vm->total - PAGE_SIZE,
1906 						   chunk_size, PAGE_SIZE);
1907 
1908 			err = vm->allocate_va_range(vm, offset, chunk_size);
1909 			if (err)
1910 				goto end;
1911 
1912 			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1913 
1914 			vma = i915_vma_instance(bbe, vm, NULL);
1915 			if (IS_ERR(vma)) {
1916 				err = PTR_ERR(vma);
1917 				goto end;
1918 			}
1919 
1920 			err = vma->ops->set_pages(vma);
1921 			if (err)
1922 				goto end;
1923 
1924 			/* Prime the TLB with the dummy pages */
1925 			for (i = 0; i < count; i++) {
1926 				vma->node.start = offset + i * PAGE_SIZE;
1927 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1928 
1929 				rq = submit_batch(ce, vma->node.start);
1930 				if (IS_ERR(rq)) {
1931 					err = PTR_ERR(rq);
1932 					goto end;
1933 				}
1934 				i915_request_put(rq);
1935 			}
1936 
1937 			vma->ops->clear_pages(vma);
1938 
1939 			err = context_sync(ce);
1940 			if (err) {
1941 				pr_err("%s: dummy setup timed out\n",
1942 				       ce->engine->name);
1943 				goto end;
1944 			}
1945 
1946 			vma = i915_vma_instance(act, vm, NULL);
1947 			if (IS_ERR(vma)) {
1948 				err = PTR_ERR(vma);
1949 				goto end;
1950 			}
1951 
1952 			err = vma->ops->set_pages(vma);
1953 			if (err)
1954 				goto end;
1955 
1956 			/* Replace the TLB with target batches */
1957 			for (i = 0; i < count; i++) {
1958 				struct i915_request *rq;
1959 				u32 *cs = batch + i * 64 / sizeof(*cs);
1960 				u64 addr;
1961 
1962 				vma->node.start = offset + i * PAGE_SIZE;
1963 				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1964 
1965 				addr = vma->node.start + i * 64;
1966 				cs[4] = MI_NOOP;
1967 				cs[6] = lower_32_bits(addr);
1968 				cs[7] = upper_32_bits(addr);
1969 				wmb();
1970 
1971 				rq = submit_batch(ce, addr);
1972 				if (IS_ERR(rq)) {
1973 					err = PTR_ERR(rq);
1974 					goto end;
1975 				}
1976 
1977 				/* Wait until the context chain has started */
1978 				if (i == 0) {
1979 					while (READ_ONCE(result[i]) &&
1980 					       !i915_request_completed(rq))
1981 						cond_resched();
1982 				} else {
1983 					end_spin(batch, i - 1);
1984 				}
1985 
1986 				i915_request_put(rq);
1987 			}
1988 			end_spin(batch, count - 1);
1989 
1990 			vma->ops->clear_pages(vma);
1991 
1992 			err = context_sync(ce);
1993 			if (err) {
1994 				pr_err("%s: writes timed out\n",
1995 				       ce->engine->name);
1996 				goto end;
1997 			}
1998 
1999 			for (i = 0; i < count; i++) {
2000 				if (result[i] != i) {
2001 					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2002 					       ce->engine->name, pass,
2003 					       offset, i, result[i], i);
2004 					err = -EINVAL;
2005 					goto end;
2006 				}
2007 			}
2008 
2009 			vm->clear_range(vm, offset, chunk_size);
2010 			pass++;
2011 		}
2012 	}
2013 end:
2014 	if (igt_flush_test(i915))
2015 		err = -EIO;
2016 	i915_gem_context_unlock_engines(ctx);
2017 	i915_gem_object_unpin_map(out);
2018 out_put_out:
2019 	i915_gem_object_put(out);
2020 out_put_batch:
2021 	i915_gem_object_unpin_map(act);
2022 out_put_act:
2023 	i915_gem_object_put(act);
2024 out_put_bbe:
2025 	i915_gem_object_put(bbe);
2026 out_vm:
2027 	i915_vm_put(vm);
2028 out_unlock:
2029 	fput(file);
2030 	return err;
2031 }
2032 
i915_gem_gtt_live_selftests(struct drm_i915_private * i915)2033 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2034 {
2035 	static const struct i915_subtest tests[] = {
2036 		SUBTEST(igt_ppgtt_alloc),
2037 		SUBTEST(igt_ppgtt_lowlevel),
2038 		SUBTEST(igt_ppgtt_drunk),
2039 		SUBTEST(igt_ppgtt_walk),
2040 		SUBTEST(igt_ppgtt_pot),
2041 		SUBTEST(igt_ppgtt_fill),
2042 		SUBTEST(igt_ppgtt_shrink),
2043 		SUBTEST(igt_ppgtt_shrink_boom),
2044 		SUBTEST(igt_ggtt_lowlevel),
2045 		SUBTEST(igt_ggtt_drunk),
2046 		SUBTEST(igt_ggtt_walk),
2047 		SUBTEST(igt_ggtt_pot),
2048 		SUBTEST(igt_ggtt_fill),
2049 		SUBTEST(igt_ggtt_page),
2050 		SUBTEST(igt_cs_tlb),
2051 	};
2052 
2053 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2054 
2055 	return i915_subtests(tests, i915);
2056 }
2057