xref: /openbsd-src/sys/dev/pci/drm/i915/i915_gem.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
39 
40 #include <dev/pci/agpvar.h>
41 
42 #include "display/intel_display.h"
43 #include "display/intel_frontbuffer.h"
44 
45 #include "gem/i915_gem_clflush.h"
46 #include "gem/i915_gem_context.h"
47 #include "gem/i915_gem_ioctls.h"
48 #include "gem/i915_gem_mman.h"
49 #include "gem/i915_gem_region.h"
50 #include "gt/intel_engine_user.h"
51 #include "gt/intel_gt.h"
52 #include "gt/intel_gt_pm.h"
53 #include "gt/intel_workarounds.h"
54 
55 #include "i915_drv.h"
56 #include "i915_trace.h"
57 #include "i915_vgpu.h"
58 
59 #include "intel_pm.h"
60 
61 static int
62 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
63 {
64 	int err;
65 
66 	err = mutex_lock_interruptible(&ggtt->vm.mutex);
67 	if (err)
68 		return err;
69 
70 	memset(node, 0, sizeof(*node));
71 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
72 					  size, 0, I915_COLOR_UNEVICTABLE,
73 					  0, ggtt->mappable_end,
74 					  DRM_MM_INSERT_LOW);
75 
76 	mutex_unlock(&ggtt->vm.mutex);
77 
78 	return err;
79 }
80 
81 static void
82 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
83 {
84 	mutex_lock(&ggtt->vm.mutex);
85 	drm_mm_remove_node(node);
86 	mutex_unlock(&ggtt->vm.mutex);
87 }
88 
89 int
90 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 			    struct drm_file *file)
92 {
93 	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
94 	struct drm_i915_gem_get_aperture *args = data;
95 	struct i915_vma *vma;
96 	u64 pinned;
97 
98 	if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 		return -EINTR;
100 
101 	pinned = ggtt->vm.reserved;
102 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
103 		if (i915_vma_is_pinned(vma))
104 			pinned += vma->node.size;
105 
106 	mutex_unlock(&ggtt->vm.mutex);
107 
108 	args->aper_size = ggtt->vm.total;
109 	args->aper_available_size = args->aper_size - pinned;
110 
111 	return 0;
112 }
113 
114 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 			   unsigned long flags)
116 {
117 	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
118 	DRM_LIST_HEAD(still_in_list);
119 	intel_wakeref_t wakeref;
120 	struct i915_vma *vma;
121 	int ret;
122 
123 	if (list_empty(&obj->vma.list))
124 		return 0;
125 
126 	/*
127 	 * As some machines use ACPI to handle runtime-resume callbacks, and
128 	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
129 	 * as they are required by the shrinker. Ergo, we wake the device up
130 	 * first just in case.
131 	 */
132 	wakeref = intel_runtime_pm_get(rpm);
133 
134 try_again:
135 	ret = 0;
136 	spin_lock(&obj->vma.lock);
137 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
138 						       struct i915_vma,
139 						       obj_link))) {
140 		struct i915_address_space *vm = vma->vm;
141 
142 		list_move_tail(&vma->obj_link, &still_in_list);
143 		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
144 			continue;
145 
146 		if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
147 			ret = -EBUSY;
148 			break;
149 		}
150 
151 		ret = -EAGAIN;
152 		if (!i915_vm_tryopen(vm))
153 			break;
154 
155 		/* Prevent vma being freed by i915_vma_parked as we unbind */
156 		vma = __i915_vma_get(vma);
157 		spin_unlock(&obj->vma.lock);
158 
159 		if (vma) {
160 			ret = -EBUSY;
161 			if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
162 			    !i915_vma_is_active(vma))
163 				ret = i915_vma_unbind(vma);
164 
165 			__i915_vma_put(vma);
166 		}
167 
168 		i915_vm_close(vm);
169 		spin_lock(&obj->vma.lock);
170 	}
171 	list_splice_init(&still_in_list, &obj->vma.list);
172 	spin_unlock(&obj->vma.lock);
173 
174 	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
175 		rcu_barrier(); /* flush the i915_vm_release() */
176 		goto try_again;
177 	}
178 
179 	intel_runtime_pm_put(rpm, wakeref);
180 
181 	return ret;
182 }
183 
184 static int
185 i915_gem_create(struct drm_file *file,
186 		struct intel_memory_region *mr,
187 		u64 *size_p,
188 		u32 *handle_p)
189 {
190 	struct drm_i915_gem_object *obj;
191 	u32 handle;
192 	u64 size;
193 	int ret;
194 
195 	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
196 	size = round_up(*size_p, mr->min_page_size);
197 	if (size == 0)
198 		return -EINVAL;
199 
200 	/* For most of the ABI (e.g. mmap) we think in system pages */
201 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
202 
203 	/* Allocate the new object */
204 	obj = i915_gem_object_create_region(mr, size, 0);
205 	if (IS_ERR(obj))
206 		return PTR_ERR(obj);
207 
208 	ret = drm_gem_handle_create(file, &obj->base, &handle);
209 	/* drop reference from allocate - handle holds it now */
210 	i915_gem_object_put(obj);
211 	if (ret)
212 		return ret;
213 
214 	*handle_p = handle;
215 	*size_p = size;
216 	return 0;
217 }
218 
219 int
220 i915_gem_dumb_create(struct drm_file *file,
221 		     struct drm_device *dev,
222 		     struct drm_mode_create_dumb *args)
223 {
224 	enum intel_memory_type mem_type;
225 	int cpp = DIV_ROUND_UP(args->bpp, 8);
226 	u32 format;
227 
228 	switch (cpp) {
229 	case 1:
230 		format = DRM_FORMAT_C8;
231 		break;
232 	case 2:
233 		format = DRM_FORMAT_RGB565;
234 		break;
235 	case 4:
236 		format = DRM_FORMAT_XRGB8888;
237 		break;
238 	default:
239 		return -EINVAL;
240 	}
241 
242 	/* have to work out size/pitch and return them */
243 	args->pitch = roundup2(args->width * cpp, 64);
244 
245 	/* align stride to page size so that we can remap */
246 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
247 						    DRM_FORMAT_MOD_LINEAR))
248 		args->pitch = roundup2(args->pitch, 4096);
249 
250 	if (args->pitch < args->width)
251 		return -EINVAL;
252 
253 	args->size = mul_u32_u32(args->pitch, args->height);
254 
255 	mem_type = INTEL_MEMORY_SYSTEM;
256 	if (HAS_LMEM(to_i915(dev)))
257 		mem_type = INTEL_MEMORY_LOCAL;
258 
259 	return i915_gem_create(file,
260 			       intel_memory_region_by_type(to_i915(dev),
261 							   mem_type),
262 			       &args->size, &args->handle);
263 }
264 
265 /**
266  * Creates a new mm object and returns a handle to it.
267  * @dev: drm device pointer
268  * @data: ioctl data blob
269  * @file: drm file pointer
270  */
271 int
272 i915_gem_create_ioctl(struct drm_device *dev, void *data,
273 		      struct drm_file *file)
274 {
275 	struct drm_i915_private *i915 = to_i915(dev);
276 	struct drm_i915_gem_create *args = data;
277 
278 	i915_gem_flush_free_objects(i915);
279 
280 	return i915_gem_create(file,
281 			       intel_memory_region_by_type(i915,
282 							   INTEL_MEMORY_SYSTEM),
283 			       &args->size, &args->handle);
284 }
285 
286 static int
287 shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data,
288 	    bool needs_clflush)
289 {
290 	char *vaddr;
291 	int ret;
292 
293 	vaddr = kmap(page);
294 
295 	if (needs_clflush)
296 		drm_clflush_virt_range(vaddr + offset, len);
297 
298 	ret = __copy_to_user(user_data, vaddr + offset, len);
299 
300 	kunmap_va(vaddr);
301 
302 	return ret ? -EFAULT : 0;
303 }
304 
305 static int
306 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
307 		     struct drm_i915_gem_pread *args)
308 {
309 	unsigned int needs_clflush;
310 	unsigned int idx, offset;
311 	struct dma_fence *fence;
312 	char __user *user_data;
313 	u64 remain;
314 	int ret;
315 
316 	ret = i915_gem_object_lock_interruptible(obj, NULL);
317 	if (ret)
318 		return ret;
319 
320 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
321 	if (ret) {
322 		i915_gem_object_unlock(obj);
323 		return ret;
324 	}
325 
326 	fence = i915_gem_object_lock_fence(obj);
327 	i915_gem_object_finish_access(obj);
328 	i915_gem_object_unlock(obj);
329 
330 	if (!fence)
331 		return -ENOMEM;
332 
333 	remain = args->size;
334 	user_data = u64_to_user_ptr(args->data_ptr);
335 	offset = offset_in_page(args->offset);
336 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
337 		struct vm_page *page = i915_gem_object_get_page(obj, idx);
338 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
339 
340 		ret = shmem_pread(page, offset, length, user_data,
341 				  needs_clflush);
342 		if (ret)
343 			break;
344 
345 		remain -= length;
346 		user_data += length;
347 		offset = 0;
348 	}
349 
350 	i915_gem_object_unlock_fence(obj, fence);
351 	return ret;
352 }
353 
354 #ifdef __linux__
355 static inline bool
356 gtt_user_read(struct io_mapping *mapping,
357 	      loff_t base, int offset,
358 	      char __user *user_data, int length)
359 {
360 	void __iomem *vaddr;
361 	unsigned long unwritten;
362 
363 	/* We can use the cpu mem copy function because this is X86. */
364 	vaddr = io_mapping_map_atomic_wc(mapping, base);
365 	unwritten = __copy_to_user_inatomic(user_data,
366 					    (void __force *)vaddr + offset,
367 					    length);
368 	io_mapping_unmap_atomic(vaddr);
369 	if (unwritten) {
370 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
371 		unwritten = copy_to_user(user_data,
372 					 (void __force *)vaddr + offset,
373 					 length);
374 		io_mapping_unmap(vaddr);
375 	}
376 	return unwritten;
377 }
378 #else
379 static inline bool
380 gtt_user_read(struct drm_i915_private *dev_priv,
381 	      loff_t base, int offset,
382 	      char __user *user_data, int length)
383 {
384 	bus_space_handle_t bsh;
385 	void __iomem *vaddr;
386 	unsigned long unwritten;
387 
388 	/* We can use the cpu mem copy function because this is X86. */
389 	agp_map_atomic(dev_priv->agph, base, &bsh);
390 	vaddr = bus_space_vaddr(dev_priv->bst, bsh);
391 	unwritten = __copy_to_user_inatomic(user_data,
392 					    (void __force *)vaddr + offset,
393 					    length);
394 	agp_unmap_atomic(dev_priv->agph, bsh);
395 	if (unwritten) {
396 		agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
397 		vaddr = bus_space_vaddr(dev_priv->bst, bsh);
398 		unwritten = copy_to_user(user_data,
399 					 (void __force *)vaddr + offset,
400 					 length);
401 		agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
402 	}
403 	return unwritten;
404 }
405 #endif
406 
407 static int
408 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
409 		   const struct drm_i915_gem_pread *args)
410 {
411 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
412 	struct i915_ggtt *ggtt = &i915->ggtt;
413 	intel_wakeref_t wakeref;
414 	struct drm_mm_node node;
415 	struct dma_fence *fence;
416 	void __user *user_data;
417 	struct i915_vma *vma;
418 	u64 remain, offset;
419 	int ret;
420 
421 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
422 	vma = ERR_PTR(-ENODEV);
423 	if (!i915_gem_object_is_tiled(obj))
424 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
425 					       PIN_MAPPABLE |
426 					       PIN_NONBLOCK /* NOWARN */ |
427 					       PIN_NOEVICT);
428 	if (!IS_ERR(vma)) {
429 		node.start = i915_ggtt_offset(vma);
430 		node.flags = 0;
431 	} else {
432 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
433 		if (ret)
434 			goto out_rpm;
435 		GEM_BUG_ON(!drm_mm_node_allocated(&node));
436 	}
437 
438 	ret = i915_gem_object_lock_interruptible(obj, NULL);
439 	if (ret)
440 		goto out_unpin;
441 
442 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
443 	if (ret) {
444 		i915_gem_object_unlock(obj);
445 		goto out_unpin;
446 	}
447 
448 	fence = i915_gem_object_lock_fence(obj);
449 	i915_gem_object_unlock(obj);
450 	if (!fence) {
451 		ret = -ENOMEM;
452 		goto out_unpin;
453 	}
454 
455 	user_data = u64_to_user_ptr(args->data_ptr);
456 	remain = args->size;
457 	offset = args->offset;
458 
459 	while (remain > 0) {
460 		/* Operation in this page
461 		 *
462 		 * page_base = page offset within aperture
463 		 * page_offset = offset within page
464 		 * page_length = bytes to copy for this page
465 		 */
466 		u32 page_base = node.start;
467 		unsigned page_offset = offset_in_page(offset);
468 		unsigned page_length = PAGE_SIZE - page_offset;
469 		page_length = remain < page_length ? remain : page_length;
470 		if (drm_mm_node_allocated(&node)) {
471 			ggtt->vm.insert_page(&ggtt->vm,
472 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
473 					     node.start, I915_CACHE_NONE, 0);
474 		} else {
475 			page_base += offset & LINUX_PAGE_MASK;
476 		}
477 
478 		if (gtt_user_read(i915, page_base, page_offset,
479 				  user_data, page_length)) {
480 			ret = -EFAULT;
481 			break;
482 		}
483 
484 		remain -= page_length;
485 		user_data += page_length;
486 		offset += page_length;
487 	}
488 
489 	i915_gem_object_unlock_fence(obj, fence);
490 out_unpin:
491 	if (drm_mm_node_allocated(&node)) {
492 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
493 		remove_mappable_node(ggtt, &node);
494 	} else {
495 		i915_vma_unpin(vma);
496 	}
497 out_rpm:
498 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
499 	return ret;
500 }
501 
502 /**
503  * Reads data from the object referenced by handle.
504  * @dev: drm device pointer
505  * @data: ioctl data blob
506  * @file: drm file pointer
507  *
508  * On error, the contents of *data are undefined.
509  */
510 int
511 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
512 		     struct drm_file *file)
513 {
514 	struct drm_i915_gem_pread *args = data;
515 	struct drm_i915_gem_object *obj;
516 	int ret;
517 
518 	if (args->size == 0)
519 		return 0;
520 
521 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
522 		       args->size))
523 		return -EFAULT;
524 
525 	obj = i915_gem_object_lookup(file, args->handle);
526 	if (!obj)
527 		return -ENOENT;
528 
529 	/* Bounds check source.  */
530 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
531 		ret = -EINVAL;
532 		goto out;
533 	}
534 
535 	trace_i915_gem_object_pread(obj, args->offset, args->size);
536 
537 	ret = -ENODEV;
538 	if (obj->ops->pread)
539 		ret = obj->ops->pread(obj, args);
540 	if (ret != -ENODEV)
541 		goto out;
542 
543 	ret = i915_gem_object_wait(obj,
544 				   I915_WAIT_INTERRUPTIBLE,
545 				   MAX_SCHEDULE_TIMEOUT);
546 	if (ret)
547 		goto out;
548 
549 	ret = i915_gem_object_pin_pages(obj);
550 	if (ret)
551 		goto out;
552 
553 	ret = i915_gem_shmem_pread(obj, args);
554 	if (ret == -EFAULT || ret == -ENODEV)
555 		ret = i915_gem_gtt_pread(obj, args);
556 
557 	i915_gem_object_unpin_pages(obj);
558 out:
559 	i915_gem_object_put(obj);
560 	return ret;
561 }
562 
563 /* This is the fast write path which cannot handle
564  * page faults in the source data
565  */
566 #ifdef __linux__
567 static inline bool
568 ggtt_write(struct io_mapping *mapping,
569 	   loff_t base, int offset,
570 	   char __user *user_data, int length)
571 {
572 	void __iomem *vaddr;
573 	unsigned long unwritten;
574 
575 	/* We can use the cpu mem copy function because this is X86. */
576 	vaddr = io_mapping_map_atomic_wc(mapping, base);
577 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
578 						      user_data, length);
579 	io_mapping_unmap_atomic(vaddr);
580 	if (unwritten) {
581 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
582 		unwritten = copy_from_user((void __force *)vaddr + offset,
583 					   user_data, length);
584 		io_mapping_unmap(vaddr);
585 	}
586 
587 	return unwritten;
588 }
589 #else
590 static inline bool
591 ggtt_write(struct drm_i915_private *dev_priv,
592 	   loff_t base, int offset,
593 	   char __user *user_data, int length)
594 {
595 	bus_space_handle_t bsh;
596 	void __iomem *vaddr;
597 	unsigned long unwritten;
598 
599 	/* We can use the cpu mem copy function because this is X86. */
600 	agp_map_atomic(dev_priv->agph, base, &bsh);
601 	vaddr = bus_space_vaddr(dev_priv->bst, bsh);
602 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
603 						      user_data, length);
604 	agp_unmap_atomic(dev_priv->agph, bsh);
605 	if (unwritten) {
606 		agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
607 		vaddr = bus_space_vaddr(dev_priv->bst, bsh);
608 		unwritten = copy_from_user((void __force *)vaddr + offset,
609 					   user_data, length);
610 		agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
611 	}
612 
613 	return unwritten;
614 }
615 #endif
616 
617 /**
618  * This is the fast pwrite path, where we copy the data directly from the
619  * user into the GTT, uncached.
620  * @obj: i915 GEM object
621  * @args: pwrite arguments structure
622  */
623 static int
624 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
625 			 const struct drm_i915_gem_pwrite *args)
626 {
627 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
628 	struct i915_ggtt *ggtt = &i915->ggtt;
629 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
630 	intel_wakeref_t wakeref;
631 	struct drm_mm_node node;
632 	struct dma_fence *fence;
633 	struct i915_vma *vma;
634 	u64 remain, offset;
635 	void __user *user_data;
636 	int ret;
637 
638 	if (i915_gem_object_has_struct_page(obj)) {
639 		/*
640 		 * Avoid waking the device up if we can fallback, as
641 		 * waking/resuming is very slow (worst-case 10-100 ms
642 		 * depending on PCI sleeps and our own resume time).
643 		 * This easily dwarfs any performance advantage from
644 		 * using the cache bypass of indirect GGTT access.
645 		 */
646 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
647 		if (!wakeref)
648 			return -EFAULT;
649 	} else {
650 		/* No backing pages, no fallback, we must force GGTT access */
651 		wakeref = intel_runtime_pm_get(rpm);
652 	}
653 
654 	vma = ERR_PTR(-ENODEV);
655 	if (!i915_gem_object_is_tiled(obj))
656 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
657 					       PIN_MAPPABLE |
658 					       PIN_NONBLOCK /* NOWARN */ |
659 					       PIN_NOEVICT);
660 	if (!IS_ERR(vma)) {
661 		node.start = i915_ggtt_offset(vma);
662 		node.flags = 0;
663 	} else {
664 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
665 		if (ret)
666 			goto out_rpm;
667 		GEM_BUG_ON(!drm_mm_node_allocated(&node));
668 	}
669 
670 	ret = i915_gem_object_lock_interruptible(obj, NULL);
671 	if (ret)
672 		goto out_unpin;
673 
674 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
675 	if (ret) {
676 		i915_gem_object_unlock(obj);
677 		goto out_unpin;
678 	}
679 
680 	fence = i915_gem_object_lock_fence(obj);
681 	i915_gem_object_unlock(obj);
682 	if (!fence) {
683 		ret = -ENOMEM;
684 		goto out_unpin;
685 	}
686 
687 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
688 
689 	user_data = u64_to_user_ptr(args->data_ptr);
690 	offset = args->offset;
691 	remain = args->size;
692 	while (remain) {
693 		/* Operation in this page
694 		 *
695 		 * page_base = page offset within aperture
696 		 * page_offset = offset within page
697 		 * page_length = bytes to copy for this page
698 		 */
699 		u32 page_base = node.start;
700 		unsigned int page_offset = offset_in_page(offset);
701 		unsigned int page_length = PAGE_SIZE - page_offset;
702 		page_length = remain < page_length ? remain : page_length;
703 		if (drm_mm_node_allocated(&node)) {
704 			/* flush the write before we modify the GGTT */
705 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
706 			ggtt->vm.insert_page(&ggtt->vm,
707 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
708 					     node.start, I915_CACHE_NONE, 0);
709 			wmb(); /* flush modifications to the GGTT (insert_page) */
710 		} else {
711 			page_base += offset & LINUX_PAGE_MASK;
712 		}
713 		/* If we get a fault while copying data, then (presumably) our
714 		 * source page isn't available.  Return the error and we'll
715 		 * retry in the slow path.
716 		 * If the object is non-shmem backed, we retry again with the
717 		 * path that handles page fault.
718 		 */
719 		if (ggtt_write(i915, page_base, page_offset,
720 			       user_data, page_length)) {
721 			ret = -EFAULT;
722 			break;
723 		}
724 
725 		remain -= page_length;
726 		user_data += page_length;
727 		offset += page_length;
728 	}
729 
730 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
731 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
732 
733 	i915_gem_object_unlock_fence(obj, fence);
734 out_unpin:
735 	if (drm_mm_node_allocated(&node)) {
736 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
737 		remove_mappable_node(ggtt, &node);
738 	} else {
739 		i915_vma_unpin(vma);
740 	}
741 out_rpm:
742 	intel_runtime_pm_put(rpm, wakeref);
743 	return ret;
744 }
745 
746 /* Per-page copy function for the shmem pwrite fastpath.
747  * Flushes invalid cachelines before writing to the target if
748  * needs_clflush_before is set and flushes out any written cachelines after
749  * writing if needs_clflush is set.
750  */
751 static int
752 shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data,
753 	     bool needs_clflush_before,
754 	     bool needs_clflush_after)
755 {
756 	char *vaddr;
757 	int ret;
758 
759 	vaddr = kmap(page);
760 
761 	if (needs_clflush_before)
762 		drm_clflush_virt_range(vaddr + offset, len);
763 
764 	ret = __copy_from_user(vaddr + offset, user_data, len);
765 	if (!ret && needs_clflush_after)
766 		drm_clflush_virt_range(vaddr + offset, len);
767 
768 	kunmap_va(vaddr);
769 
770 	return ret ? -EFAULT : 0;
771 }
772 
773 static int
774 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
775 		      const struct drm_i915_gem_pwrite *args)
776 {
777 	unsigned int partial_cacheline_write;
778 	unsigned int needs_clflush;
779 	unsigned int offset, idx;
780 	struct dma_fence *fence;
781 	void __user *user_data;
782 	u64 remain;
783 	int ret;
784 
785 	ret = i915_gem_object_lock_interruptible(obj, NULL);
786 	if (ret)
787 		return ret;
788 
789 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
790 	if (ret) {
791 		i915_gem_object_unlock(obj);
792 		return ret;
793 	}
794 
795 	fence = i915_gem_object_lock_fence(obj);
796 	i915_gem_object_finish_access(obj);
797 	i915_gem_object_unlock(obj);
798 
799 	if (!fence)
800 		return -ENOMEM;
801 
802 	/* If we don't overwrite a cacheline completely we need to be
803 	 * careful to have up-to-date data by first clflushing. Don't
804 	 * overcomplicate things and flush the entire patch.
805 	 */
806 	partial_cacheline_write = 0;
807 	if (needs_clflush & CLFLUSH_BEFORE)
808 		partial_cacheline_write = curcpu()->ci_cflushsz - 1;
809 
810 	user_data = u64_to_user_ptr(args->data_ptr);
811 	remain = args->size;
812 	offset = offset_in_page(args->offset);
813 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
814 		struct vm_page *page = i915_gem_object_get_page(obj, idx);
815 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
816 
817 		ret = shmem_pwrite(page, offset, length, user_data,
818 				   (offset | length) & partial_cacheline_write,
819 				   needs_clflush & CLFLUSH_AFTER);
820 		if (ret)
821 			break;
822 
823 		remain -= length;
824 		user_data += length;
825 		offset = 0;
826 	}
827 
828 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
829 	i915_gem_object_unlock_fence(obj, fence);
830 
831 	return ret;
832 }
833 
834 /**
835  * Writes data to the object referenced by handle.
836  * @dev: drm device
837  * @data: ioctl data blob
838  * @file: drm file
839  *
840  * On error, the contents of the buffer that were to be modified are undefined.
841  */
842 int
843 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
844 		      struct drm_file *file)
845 {
846 	struct drm_i915_gem_pwrite *args = data;
847 	struct drm_i915_gem_object *obj;
848 	int ret;
849 
850 	if (args->size == 0)
851 		return 0;
852 
853 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
854 		return -EFAULT;
855 
856 	obj = i915_gem_object_lookup(file, args->handle);
857 	if (!obj)
858 		return -ENOENT;
859 
860 	/* Bounds check destination. */
861 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
862 		ret = -EINVAL;
863 		goto err;
864 	}
865 
866 	/* Writes not allowed into this read-only object */
867 	if (i915_gem_object_is_readonly(obj)) {
868 		ret = -EINVAL;
869 		goto err;
870 	}
871 
872 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
873 
874 	ret = -ENODEV;
875 	if (obj->ops->pwrite)
876 		ret = obj->ops->pwrite(obj, args);
877 	if (ret != -ENODEV)
878 		goto err;
879 
880 	ret = i915_gem_object_wait(obj,
881 				   I915_WAIT_INTERRUPTIBLE |
882 				   I915_WAIT_ALL,
883 				   MAX_SCHEDULE_TIMEOUT);
884 	if (ret)
885 		goto err;
886 
887 	ret = i915_gem_object_pin_pages(obj);
888 	if (ret)
889 		goto err;
890 
891 	ret = -EFAULT;
892 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
893 	 * it would end up going through the fenced access, and we'll get
894 	 * different detiling behavior between reading and writing.
895 	 * pread/pwrite currently are reading and writing from the CPU
896 	 * perspective, requiring manual detiling by the client.
897 	 */
898 	if (!i915_gem_object_has_struct_page(obj) ||
899 	    cpu_write_needs_clflush(obj))
900 		/* Note that the gtt paths might fail with non-page-backed user
901 		 * pointers (e.g. gtt mappings when moving data between
902 		 * textures). Fallback to the shmem path in that case.
903 		 */
904 		ret = i915_gem_gtt_pwrite_fast(obj, args);
905 
906 	if (ret == -EFAULT || ret == -ENOSPC) {
907 		if (i915_gem_object_has_struct_page(obj))
908 			ret = i915_gem_shmem_pwrite(obj, args);
909 	}
910 
911 	i915_gem_object_unpin_pages(obj);
912 err:
913 	i915_gem_object_put(obj);
914 	return ret;
915 }
916 
917 /**
918  * Called when user space has done writes to this buffer
919  * @dev: drm device
920  * @data: ioctl data blob
921  * @file: drm file
922  */
923 int
924 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
925 			 struct drm_file *file)
926 {
927 	struct drm_i915_gem_sw_finish *args = data;
928 	struct drm_i915_gem_object *obj;
929 
930 	obj = i915_gem_object_lookup(file, args->handle);
931 	if (!obj)
932 		return -ENOENT;
933 
934 	/*
935 	 * Proxy objects are barred from CPU access, so there is no
936 	 * need to ban sw_finish as it is a nop.
937 	 */
938 
939 	/* Pinned buffers may be scanout, so flush the cache */
940 	i915_gem_object_flush_if_display(obj);
941 	i915_gem_object_put(obj);
942 
943 	return 0;
944 }
945 
946 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
947 {
948 	struct drm_i915_gem_object *obj, *on;
949 	int i;
950 
951 	/*
952 	 * Only called during RPM suspend. All users of the userfault_list
953 	 * must be holding an RPM wakeref to ensure that this can not
954 	 * run concurrently with themselves (and use the struct_mutex for
955 	 * protection between themselves).
956 	 */
957 
958 	list_for_each_entry_safe(obj, on,
959 				 &i915->ggtt.userfault_list, userfault_link)
960 		__i915_gem_object_release_mmap_gtt(obj);
961 
962 	/*
963 	 * The fence will be lost when the device powers down. If any were
964 	 * in use by hardware (i.e. they are pinned), we should not be powering
965 	 * down! All other fences will be reacquired by the user upon waking.
966 	 */
967 	for (i = 0; i < i915->ggtt.num_fences; i++) {
968 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
969 
970 		/*
971 		 * Ideally we want to assert that the fence register is not
972 		 * live at this point (i.e. that no piece of code will be
973 		 * trying to write through fence + GTT, as that both violates
974 		 * our tracking of activity and associated locking/barriers,
975 		 * but also is illegal given that the hw is powered down).
976 		 *
977 		 * Previously we used reg->pin_count as a "liveness" indicator.
978 		 * That is not sufficient, and we need a more fine-grained
979 		 * tool if we want to have a sanity check here.
980 		 */
981 
982 		if (!reg->vma)
983 			continue;
984 
985 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
986 		reg->dirty = true;
987 	}
988 }
989 
990 static void discard_ggtt_vma(struct i915_vma *vma)
991 {
992 	struct drm_i915_gem_object *obj = vma->obj;
993 
994 	spin_lock(&obj->vma.lock);
995 	if (!RB_EMPTY_NODE(&vma->obj_node)) {
996 		rb_erase(&vma->obj_node, &obj->vma.tree);
997 		RB_CLEAR_NODE(&vma->obj_node);
998 	}
999 	spin_unlock(&obj->vma.lock);
1000 }
1001 
1002 struct i915_vma *
1003 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1004 			    struct i915_gem_ww_ctx *ww,
1005 			    const struct i915_ggtt_view *view,
1006 			    u64 size, u64 alignment, u64 flags)
1007 {
1008 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1009 	struct i915_ggtt *ggtt = &i915->ggtt;
1010 	struct i915_vma *vma;
1011 	int ret;
1012 
1013 	if (flags & PIN_MAPPABLE &&
1014 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1015 		/*
1016 		 * If the required space is larger than the available
1017 		 * aperture, we will not able to find a slot for the
1018 		 * object and unbinding the object now will be in
1019 		 * vain. Worse, doing so may cause us to ping-pong
1020 		 * the object in and out of the Global GTT and
1021 		 * waste a lot of cycles under the mutex.
1022 		 */
1023 		if (obj->base.size > ggtt->mappable_end)
1024 			return ERR_PTR(-E2BIG);
1025 
1026 		/*
1027 		 * If NONBLOCK is set the caller is optimistically
1028 		 * trying to cache the full object within the mappable
1029 		 * aperture, and *must* have a fallback in place for
1030 		 * situations where we cannot bind the object. We
1031 		 * can be a little more lax here and use the fallback
1032 		 * more often to avoid costly migrations of ourselves
1033 		 * and other objects within the aperture.
1034 		 *
1035 		 * Half-the-aperture is used as a simple heuristic.
1036 		 * More interesting would to do search for a free
1037 		 * block prior to making the commitment to unbind.
1038 		 * That caters for the self-harm case, and with a
1039 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
1040 		 * we could try to minimise harm to others.
1041 		 */
1042 		if (flags & PIN_NONBLOCK &&
1043 		    obj->base.size > ggtt->mappable_end / 2)
1044 			return ERR_PTR(-ENOSPC);
1045 	}
1046 
1047 new_vma:
1048 	vma = i915_vma_instance(obj, &ggtt->vm, view);
1049 	if (IS_ERR(vma))
1050 		return vma;
1051 
1052 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
1053 		if (flags & PIN_NONBLOCK) {
1054 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1055 				return ERR_PTR(-ENOSPC);
1056 
1057 			if (flags & PIN_MAPPABLE &&
1058 			    vma->fence_size > ggtt->mappable_end / 2)
1059 				return ERR_PTR(-ENOSPC);
1060 		}
1061 
1062 		if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
1063 			discard_ggtt_vma(vma);
1064 			goto new_vma;
1065 		}
1066 
1067 		ret = i915_vma_unbind(vma);
1068 		if (ret)
1069 			return ERR_PTR(ret);
1070 	}
1071 
1072 	ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
1073 	if (ret)
1074 		return ERR_PTR(ret);
1075 
1076 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1077 		mutex_lock(&ggtt->vm.mutex);
1078 		i915_vma_revoke_fence(vma);
1079 		mutex_unlock(&ggtt->vm.mutex);
1080 	}
1081 
1082 	ret = i915_vma_wait_for_bind(vma);
1083 	if (ret) {
1084 		i915_vma_unpin(vma);
1085 		return ERR_PTR(ret);
1086 	}
1087 
1088 	return vma;
1089 }
1090 
1091 int
1092 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1093 		       struct drm_file *file_priv)
1094 {
1095 	struct drm_i915_private *i915 = to_i915(dev);
1096 	struct drm_i915_gem_madvise *args = data;
1097 	struct drm_i915_gem_object *obj;
1098 	int err;
1099 
1100 	switch (args->madv) {
1101 	case I915_MADV_DONTNEED:
1102 	case I915_MADV_WILLNEED:
1103 	    break;
1104 	default:
1105 	    return -EINVAL;
1106 	}
1107 
1108 	obj = i915_gem_object_lookup(file_priv, args->handle);
1109 	if (!obj)
1110 		return -ENOENT;
1111 
1112 	err = mutex_lock_interruptible(&obj->mm.lock);
1113 	if (err)
1114 		goto out;
1115 
1116 	if (i915_gem_object_has_pages(obj) &&
1117 	    i915_gem_object_is_tiled(obj) &&
1118 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1119 		if (obj->mm.madv == I915_MADV_WILLNEED) {
1120 			GEM_BUG_ON(!obj->mm.quirked);
1121 			__i915_gem_object_unpin_pages(obj);
1122 			obj->mm.quirked = false;
1123 		}
1124 		if (args->madv == I915_MADV_WILLNEED) {
1125 			GEM_BUG_ON(obj->mm.quirked);
1126 			__i915_gem_object_pin_pages(obj);
1127 			obj->mm.quirked = true;
1128 		}
1129 	}
1130 
1131 	if (obj->mm.madv != __I915_MADV_PURGED)
1132 		obj->mm.madv = args->madv;
1133 
1134 	if (i915_gem_object_has_pages(obj)) {
1135 		struct list_head *list;
1136 
1137 		if (i915_gem_object_is_shrinkable(obj)) {
1138 			unsigned long flags;
1139 
1140 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
1141 
1142 			if (obj->mm.madv != I915_MADV_WILLNEED)
1143 				list = &i915->mm.purge_list;
1144 			else
1145 				list = &i915->mm.shrink_list;
1146 			list_move_tail(&obj->mm.link, list);
1147 
1148 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1149 		}
1150 	}
1151 
1152 	/* if the object is no longer attached, discard its backing storage */
1153 	if (obj->mm.madv == I915_MADV_DONTNEED &&
1154 	    !i915_gem_object_has_pages(obj))
1155 		i915_gem_object_truncate(obj);
1156 
1157 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1158 	mutex_unlock(&obj->mm.lock);
1159 
1160 out:
1161 	i915_gem_object_put(obj);
1162 	return err;
1163 }
1164 
1165 int i915_gem_init(struct drm_i915_private *dev_priv)
1166 {
1167 	int ret;
1168 
1169 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
1170 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1171 		mkwrite_device_info(dev_priv)->page_sizes =
1172 			I915_GTT_PAGE_SIZE_4K;
1173 
1174 	ret = i915_gem_init_userptr(dev_priv);
1175 	if (ret)
1176 		return ret;
1177 
1178 	intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1179 	intel_wopcm_init(&dev_priv->wopcm);
1180 
1181 	ret = i915_init_ggtt(dev_priv);
1182 	if (ret) {
1183 		GEM_BUG_ON(ret == -EIO);
1184 		goto err_unlock;
1185 	}
1186 
1187 	/*
1188 	 * Despite its name intel_init_clock_gating applies both display
1189 	 * clock gating workarounds; GT mmio workarounds and the occasional
1190 	 * GT power context workaround. Worse, sometimes it includes a context
1191 	 * register workaround which we need to apply before we record the
1192 	 * default HW state for all contexts.
1193 	 *
1194 	 * FIXME: break up the workarounds and apply them at the right time!
1195 	 */
1196 	intel_init_clock_gating(dev_priv);
1197 
1198 	ret = intel_gt_init(&dev_priv->gt);
1199 	if (ret)
1200 		goto err_unlock;
1201 
1202 	return 0;
1203 
1204 	/*
1205 	 * Unwinding is complicated by that we want to handle -EIO to mean
1206 	 * disable GPU submission but keep KMS alive. We want to mark the
1207 	 * HW as irrevisibly wedged, but keep enough state around that the
1208 	 * driver doesn't explode during runtime.
1209 	 */
1210 err_unlock:
1211 	i915_gem_drain_workqueue(dev_priv);
1212 
1213 	if (ret != -EIO) {
1214 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1215 		i915_gem_cleanup_userptr(dev_priv);
1216 	}
1217 
1218 	if (ret == -EIO) {
1219 		/*
1220 		 * Allow engines or uC initialisation to fail by marking the GPU
1221 		 * as wedged. But we only want to do this when the GPU is angry,
1222 		 * for all other failure, such as an allocation failure, bail.
1223 		 */
1224 		if (!intel_gt_is_wedged(&dev_priv->gt)) {
1225 			i915_probe_error(dev_priv,
1226 					 "Failed to initialize GPU, declaring it wedged!\n");
1227 			intel_gt_set_wedged(&dev_priv->gt);
1228 		}
1229 
1230 		/* Minimal basic recovery for KMS */
1231 		ret = i915_ggtt_enable_hw(dev_priv);
1232 		i915_ggtt_resume(&dev_priv->ggtt);
1233 		intel_init_clock_gating(dev_priv);
1234 	}
1235 
1236 	i915_gem_drain_freed_objects(dev_priv);
1237 	return ret;
1238 }
1239 
1240 void i915_gem_driver_register(struct drm_i915_private *i915)
1241 {
1242 	i915_gem_driver_register__shrinker(i915);
1243 
1244 	intel_engines_driver_register(i915);
1245 }
1246 
1247 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1248 {
1249 	i915_gem_driver_unregister__shrinker(i915);
1250 }
1251 
1252 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1253 {
1254 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1255 
1256 	i915_gem_suspend_late(dev_priv);
1257 	intel_gt_driver_remove(&dev_priv->gt);
1258 	dev_priv->uabi_engines = RB_ROOT;
1259 
1260 	/* Flush any outstanding unpin_work. */
1261 	i915_gem_drain_workqueue(dev_priv);
1262 
1263 	i915_gem_drain_freed_objects(dev_priv);
1264 }
1265 
1266 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1267 {
1268 	i915_gem_driver_release__contexts(dev_priv);
1269 
1270 	intel_gt_driver_release(&dev_priv->gt);
1271 
1272 	intel_wa_list_free(&dev_priv->gt_wa_list);
1273 
1274 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1275 	i915_gem_cleanup_userptr(dev_priv);
1276 
1277 	i915_gem_drain_freed_objects(dev_priv);
1278 
1279 	drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1280 }
1281 
1282 static void i915_gem_init__mm(struct drm_i915_private *i915)
1283 {
1284 	mtx_init(&i915->mm.obj_lock, IPL_NONE);
1285 
1286 	init_llist_head(&i915->mm.free_list);
1287 
1288 	INIT_LIST_HEAD(&i915->mm.purge_list);
1289 	INIT_LIST_HEAD(&i915->mm.shrink_list);
1290 
1291 	i915_gem_init__objects(i915);
1292 }
1293 
1294 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1295 {
1296 	i915_gem_init__mm(dev_priv);
1297 	i915_gem_init__contexts(dev_priv);
1298 
1299 	mtx_init(&dev_priv->fb_tracking.lock, IPL_NONE);
1300 }
1301 
1302 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1303 {
1304 	i915_gem_drain_freed_objects(dev_priv);
1305 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1306 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1307 	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1308 }
1309 
1310 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1311 {
1312 	/* Discard all purgeable objects, let userspace recover those as
1313 	 * required after resuming.
1314 	 */
1315 	i915_gem_shrink_all(dev_priv);
1316 
1317 	return 0;
1318 }
1319 
1320 int i915_gem_freeze_late(struct drm_i915_private *i915)
1321 {
1322 	struct drm_i915_gem_object *obj;
1323 	intel_wakeref_t wakeref;
1324 
1325 	/*
1326 	 * Called just before we write the hibernation image.
1327 	 *
1328 	 * We need to update the domain tracking to reflect that the CPU
1329 	 * will be accessing all the pages to create and restore from the
1330 	 * hibernation, and so upon restoration those pages will be in the
1331 	 * CPU domain.
1332 	 *
1333 	 * To make sure the hibernation image contains the latest state,
1334 	 * we update that state just before writing out the image.
1335 	 *
1336 	 * To try and reduce the hibernation image, we manually shrink
1337 	 * the objects as well, see i915_gem_freeze()
1338 	 */
1339 
1340 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1341 
1342 	i915_gem_shrink(i915, -1UL, NULL, ~0);
1343 	i915_gem_drain_freed_objects(i915);
1344 
1345 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1346 		i915_gem_object_lock(obj, NULL);
1347 		drm_WARN_ON(&i915->drm,
1348 			    i915_gem_object_set_to_cpu_domain(obj, true));
1349 		i915_gem_object_unlock(obj);
1350 	}
1351 
1352 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1353 
1354 	return 0;
1355 }
1356 
1357 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1358 {
1359 	struct drm_i915_file_private *file_priv;
1360 	int ret;
1361 
1362 	DRM_DEBUG("\n");
1363 
1364 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1365 	if (!file_priv)
1366 		return -ENOMEM;
1367 
1368 	file->driver_priv = file_priv;
1369 	file_priv->dev_priv = i915;
1370 	file_priv->file = file;
1371 
1372 	file_priv->bsd_engine = -1;
1373 	file_priv->hang_timestamp = jiffies;
1374 
1375 	ret = i915_gem_context_open(i915, file);
1376 	if (ret)
1377 		kfree(file_priv);
1378 
1379 	return ret;
1380 }
1381 
1382 void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
1383 {
1384 	ww_acquire_init(&ww->ctx, &reservation_ww_class);
1385 	INIT_LIST_HEAD(&ww->obj_list);
1386 	ww->intr = intr;
1387 	ww->contended = NULL;
1388 }
1389 
1390 static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
1391 {
1392 	struct drm_i915_gem_object *obj;
1393 
1394 	while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
1395 		list_del(&obj->obj_link);
1396 		i915_gem_object_unlock(obj);
1397 	}
1398 }
1399 
1400 void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
1401 {
1402 	list_del(&obj->obj_link);
1403 	i915_gem_object_unlock(obj);
1404 }
1405 
1406 void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
1407 {
1408 	i915_gem_ww_ctx_unlock_all(ww);
1409 	WARN_ON(ww->contended);
1410 	ww_acquire_fini(&ww->ctx);
1411 }
1412 
1413 int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
1414 {
1415 	int ret = 0;
1416 
1417 	if (WARN_ON(!ww->contended))
1418 		return -EINVAL;
1419 
1420 	i915_gem_ww_ctx_unlock_all(ww);
1421 	if (ww->intr)
1422 		ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
1423 	else
1424 		dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
1425 
1426 	if (!ret)
1427 		list_add_tail(&ww->contended->obj_link, &ww->obj_list);
1428 
1429 	ww->contended = NULL;
1430 
1431 	return ret;
1432 }
1433 
1434 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1435 #include "selftests/mock_gem_device.c"
1436 #include "selftests/i915_gem.c"
1437 #endif
1438