xref: /openbsd-src/sys/dev/pci/drm/i915/i915_gem.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
39 
40 #include <dev/pci/agpvar.h>
41 
42 #include "display/intel_display.h"
43 #include "display/intel_frontbuffer.h"
44 
45 #include "gem/i915_gem_clflush.h"
46 #include "gem/i915_gem_context.h"
47 #include "gem/i915_gem_ioctls.h"
48 #include "gem/i915_gem_mman.h"
49 #include "gem/i915_gem_region.h"
50 #include "gt/intel_engine_user.h"
51 #include "gt/intel_gt.h"
52 #include "gt/intel_gt_pm.h"
53 #include "gt/intel_workarounds.h"
54 
55 #include "i915_drv.h"
56 #include "i915_trace.h"
57 #include "i915_vgpu.h"
58 
59 #include "intel_pm.h"
60 
61 static int
62 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
63 {
64 	int err;
65 
66 	err = mutex_lock_interruptible(&ggtt->vm.mutex);
67 	if (err)
68 		return err;
69 
70 	memset(node, 0, sizeof(*node));
71 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
72 					  size, 0, I915_COLOR_UNEVICTABLE,
73 					  0, ggtt->mappable_end,
74 					  DRM_MM_INSERT_LOW);
75 
76 	mutex_unlock(&ggtt->vm.mutex);
77 
78 	return err;
79 }
80 
81 static void
82 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
83 {
84 	mutex_lock(&ggtt->vm.mutex);
85 	drm_mm_remove_node(node);
86 	mutex_unlock(&ggtt->vm.mutex);
87 }
88 
89 int
90 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 			    struct drm_file *file)
92 {
93 	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
94 	struct drm_i915_gem_get_aperture *args = data;
95 	struct i915_vma *vma;
96 	u64 pinned;
97 
98 	if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 		return -EINTR;
100 
101 	pinned = ggtt->vm.reserved;
102 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
103 		if (i915_vma_is_pinned(vma))
104 			pinned += vma->node.size;
105 
106 	mutex_unlock(&ggtt->vm.mutex);
107 
108 	args->aper_size = ggtt->vm.total;
109 	args->aper_available_size = args->aper_size - pinned;
110 
111 	return 0;
112 }
113 
114 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 			   unsigned long flags)
116 {
117 	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
118 	DRM_LIST_HEAD(still_in_list);
119 	intel_wakeref_t wakeref;
120 	struct i915_vma *vma;
121 	int ret;
122 
123 	if (list_empty(&obj->vma.list))
124 		return 0;
125 
126 	/*
127 	 * As some machines use ACPI to handle runtime-resume callbacks, and
128 	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
129 	 * as they are required by the shrinker. Ergo, we wake the device up
130 	 * first just in case.
131 	 */
132 	wakeref = intel_runtime_pm_get(rpm);
133 
134 try_again:
135 	ret = 0;
136 	spin_lock(&obj->vma.lock);
137 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
138 						       struct i915_vma,
139 						       obj_link))) {
140 		struct i915_address_space *vm = vma->vm;
141 
142 		list_move_tail(&vma->obj_link, &still_in_list);
143 		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
144 			continue;
145 
146 		if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
147 			ret = -EBUSY;
148 			break;
149 		}
150 
151 		ret = -EAGAIN;
152 		if (!i915_vm_tryopen(vm))
153 			break;
154 
155 		/* Prevent vma being freed by i915_vma_parked as we unbind */
156 		vma = __i915_vma_get(vma);
157 		spin_unlock(&obj->vma.lock);
158 
159 		if (vma) {
160 			ret = -EBUSY;
161 			if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
162 			    !i915_vma_is_active(vma)) {
163 				if (flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) {
164 					if (mutex_trylock(&vma->vm->mutex)) {
165 						ret = __i915_vma_unbind(vma);
166 						mutex_unlock(&vma->vm->mutex);
167 					} else {
168 						ret = -EBUSY;
169 					}
170 				} else {
171 					ret = i915_vma_unbind(vma);
172 				}
173 			}
174 
175 			__i915_vma_put(vma);
176 		}
177 
178 		i915_vm_close(vm);
179 		spin_lock(&obj->vma.lock);
180 	}
181 	list_splice_init(&still_in_list, &obj->vma.list);
182 	spin_unlock(&obj->vma.lock);
183 
184 	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
185 		rcu_barrier(); /* flush the i915_vm_release() */
186 		goto try_again;
187 	}
188 
189 	intel_runtime_pm_put(rpm, wakeref);
190 
191 	return ret;
192 }
193 
194 static int
195 shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data,
196 	    bool needs_clflush)
197 {
198 	char *vaddr;
199 	int ret;
200 
201 	vaddr = kmap(page);
202 
203 	if (needs_clflush)
204 		drm_clflush_virt_range(vaddr + offset, len);
205 
206 	ret = __copy_to_user(user_data, vaddr + offset, len);
207 
208 	kunmap_va(vaddr);
209 
210 	return ret ? -EFAULT : 0;
211 }
212 
213 static int
214 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
215 		     struct drm_i915_gem_pread *args)
216 {
217 	unsigned int needs_clflush;
218 	unsigned int idx, offset;
219 	char __user *user_data;
220 	u64 remain;
221 	int ret;
222 
223 	ret = i915_gem_object_lock_interruptible(obj, NULL);
224 	if (ret)
225 		return ret;
226 
227 	ret = i915_gem_object_pin_pages(obj);
228 	if (ret)
229 		goto err_unlock;
230 
231 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
232 	if (ret)
233 		goto err_unpin;
234 
235 	i915_gem_object_finish_access(obj);
236 	i915_gem_object_unlock(obj);
237 
238 	remain = args->size;
239 	user_data = u64_to_user_ptr(args->data_ptr);
240 	offset = offset_in_page(args->offset);
241 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
242 		struct vm_page *page = i915_gem_object_get_page(obj, idx);
243 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
244 
245 		ret = shmem_pread(page, offset, length, user_data,
246 				  needs_clflush);
247 		if (ret)
248 			break;
249 
250 		remain -= length;
251 		user_data += length;
252 		offset = 0;
253 	}
254 
255 	i915_gem_object_unpin_pages(obj);
256 	return ret;
257 
258 err_unpin:
259 	i915_gem_object_unpin_pages(obj);
260 err_unlock:
261 	i915_gem_object_unlock(obj);
262 	return ret;
263 }
264 
265 #ifdef __linux__
266 static inline bool
267 gtt_user_read(struct io_mapping *mapping,
268 	      loff_t base, int offset,
269 	      char __user *user_data, int length)
270 {
271 	void __iomem *vaddr;
272 	unsigned long unwritten;
273 
274 	/* We can use the cpu mem copy function because this is X86. */
275 	vaddr = io_mapping_map_atomic_wc(mapping, base);
276 	unwritten = __copy_to_user_inatomic(user_data,
277 					    (void __force *)vaddr + offset,
278 					    length);
279 	io_mapping_unmap_atomic(vaddr);
280 	if (unwritten) {
281 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
282 		unwritten = copy_to_user(user_data,
283 					 (void __force *)vaddr + offset,
284 					 length);
285 		io_mapping_unmap(vaddr);
286 	}
287 	return unwritten;
288 }
289 #else
290 static inline bool
291 gtt_user_read(struct drm_i915_private *dev_priv,
292 	      loff_t base, int offset,
293 	      char __user *user_data, int length)
294 {
295 	bus_space_handle_t bsh;
296 	void __iomem *vaddr;
297 	unsigned long unwritten;
298 
299 	/* We can use the cpu mem copy function because this is X86. */
300 	agp_map_atomic(dev_priv->agph, base, &bsh);
301 	vaddr = bus_space_vaddr(dev_priv->bst, bsh);
302 	unwritten = __copy_to_user_inatomic(user_data,
303 					    (void __force *)vaddr + offset,
304 					    length);
305 	agp_unmap_atomic(dev_priv->agph, bsh);
306 	if (unwritten) {
307 		agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
308 		vaddr = bus_space_vaddr(dev_priv->bst, bsh);
309 		unwritten = copy_to_user(user_data,
310 					 (void __force *)vaddr + offset,
311 					 length);
312 		agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
313 	}
314 	return unwritten;
315 }
316 #endif
317 
318 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
319 					     struct drm_mm_node *node,
320 					     bool write)
321 {
322 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
323 	struct i915_ggtt *ggtt = &i915->ggtt;
324 	struct i915_vma *vma;
325 	struct i915_gem_ww_ctx ww;
326 	int ret;
327 
328 	i915_gem_ww_ctx_init(&ww, true);
329 retry:
330 	vma = ERR_PTR(-ENODEV);
331 	ret = i915_gem_object_lock(obj, &ww);
332 	if (ret)
333 		goto err_ww;
334 
335 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
336 	if (ret)
337 		goto err_ww;
338 
339 	if (!i915_gem_object_is_tiled(obj))
340 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
341 						  PIN_MAPPABLE |
342 						  PIN_NONBLOCK /* NOWARN */ |
343 						  PIN_NOEVICT);
344 	if (vma == ERR_PTR(-EDEADLK)) {
345 		ret = -EDEADLK;
346 		goto err_ww;
347 	} else if (!IS_ERR(vma)) {
348 		node->start = i915_ggtt_offset(vma);
349 		node->flags = 0;
350 	} else {
351 		ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
352 		if (ret)
353 			goto err_ww;
354 		GEM_BUG_ON(!drm_mm_node_allocated(node));
355 		vma = NULL;
356 	}
357 
358 	ret = i915_gem_object_pin_pages(obj);
359 	if (ret) {
360 		if (drm_mm_node_allocated(node)) {
361 			ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
362 			remove_mappable_node(ggtt, node);
363 		} else {
364 			i915_vma_unpin(vma);
365 		}
366 	}
367 
368 err_ww:
369 	if (ret == -EDEADLK) {
370 		ret = i915_gem_ww_ctx_backoff(&ww);
371 		if (!ret)
372 			goto retry;
373 	}
374 	i915_gem_ww_ctx_fini(&ww);
375 
376 	return ret ? ERR_PTR(ret) : vma;
377 }
378 
379 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
380 				 struct drm_mm_node *node,
381 				 struct i915_vma *vma)
382 {
383 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
384 	struct i915_ggtt *ggtt = &i915->ggtt;
385 
386 	i915_gem_object_unpin_pages(obj);
387 	if (drm_mm_node_allocated(node)) {
388 		ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
389 		remove_mappable_node(ggtt, node);
390 	} else {
391 		i915_vma_unpin(vma);
392 	}
393 }
394 
395 static int
396 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
397 		   const struct drm_i915_gem_pread *args)
398 {
399 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
400 	struct i915_ggtt *ggtt = &i915->ggtt;
401 	intel_wakeref_t wakeref;
402 	struct drm_mm_node node;
403 	void __user *user_data;
404 	struct i915_vma *vma;
405 	u64 remain, offset;
406 	int ret = 0;
407 
408 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
409 
410 	vma = i915_gem_gtt_prepare(obj, &node, false);
411 	if (IS_ERR(vma)) {
412 		ret = PTR_ERR(vma);
413 		goto out_rpm;
414 	}
415 
416 	user_data = u64_to_user_ptr(args->data_ptr);
417 	remain = args->size;
418 	offset = args->offset;
419 
420 	while (remain > 0) {
421 		/* Operation in this page
422 		 *
423 		 * page_base = page offset within aperture
424 		 * page_offset = offset within page
425 		 * page_length = bytes to copy for this page
426 		 */
427 		u32 page_base = node.start;
428 		unsigned page_offset = offset_in_page(offset);
429 		unsigned page_length = PAGE_SIZE - page_offset;
430 		page_length = remain < page_length ? remain : page_length;
431 		if (drm_mm_node_allocated(&node)) {
432 			ggtt->vm.insert_page(&ggtt->vm,
433 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
434 					     node.start, I915_CACHE_NONE, 0);
435 		} else {
436 			page_base += offset & LINUX_PAGE_MASK;
437 		}
438 
439 		if (gtt_user_read(i915, page_base, page_offset,
440 				  user_data, page_length)) {
441 			ret = -EFAULT;
442 			break;
443 		}
444 
445 		remain -= page_length;
446 		user_data += page_length;
447 		offset += page_length;
448 	}
449 
450 	i915_gem_gtt_cleanup(obj, &node, vma);
451 out_rpm:
452 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
453 	return ret;
454 }
455 
456 /**
457  * Reads data from the object referenced by handle.
458  * @dev: drm device pointer
459  * @data: ioctl data blob
460  * @file: drm file pointer
461  *
462  * On error, the contents of *data are undefined.
463  */
464 int
465 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
466 		     struct drm_file *file)
467 {
468 	struct drm_i915_private *i915 = to_i915(dev);
469 	struct drm_i915_gem_pread *args = data;
470 	struct drm_i915_gem_object *obj;
471 	int ret;
472 
473 	/* PREAD is disallowed for all platforms after TGL-LP.  This also
474 	 * covers all platforms with local memory.
475 	 */
476 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
477 		return -EOPNOTSUPP;
478 
479 	if (args->size == 0)
480 		return 0;
481 
482 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
483 		       args->size))
484 		return -EFAULT;
485 
486 	obj = i915_gem_object_lookup(file, args->handle);
487 	if (!obj)
488 		return -ENOENT;
489 
490 	/* Bounds check source.  */
491 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
492 		ret = -EINVAL;
493 		goto out;
494 	}
495 
496 	trace_i915_gem_object_pread(obj, args->offset, args->size);
497 	ret = -ENODEV;
498 	if (obj->ops->pread)
499 		ret = obj->ops->pread(obj, args);
500 	if (ret != -ENODEV)
501 		goto out;
502 
503 	ret = i915_gem_object_wait(obj,
504 				   I915_WAIT_INTERRUPTIBLE,
505 				   MAX_SCHEDULE_TIMEOUT);
506 	if (ret)
507 		goto out;
508 
509 	ret = i915_gem_shmem_pread(obj, args);
510 	if (ret == -EFAULT || ret == -ENODEV)
511 		ret = i915_gem_gtt_pread(obj, args);
512 
513 out:
514 	i915_gem_object_put(obj);
515 	return ret;
516 }
517 
518 /* This is the fast write path which cannot handle
519  * page faults in the source data
520  */
521 #ifdef __linux__
522 static inline bool
523 ggtt_write(struct io_mapping *mapping,
524 	   loff_t base, int offset,
525 	   char __user *user_data, int length)
526 {
527 	void __iomem *vaddr;
528 	unsigned long unwritten;
529 
530 	/* We can use the cpu mem copy function because this is X86. */
531 	vaddr = io_mapping_map_atomic_wc(mapping, base);
532 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
533 						      user_data, length);
534 	io_mapping_unmap_atomic(vaddr);
535 	if (unwritten) {
536 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
537 		unwritten = copy_from_user((void __force *)vaddr + offset,
538 					   user_data, length);
539 		io_mapping_unmap(vaddr);
540 	}
541 
542 	return unwritten;
543 }
544 #else
545 static inline bool
546 ggtt_write(struct drm_i915_private *dev_priv,
547 	   loff_t base, int offset,
548 	   char __user *user_data, int length)
549 {
550 	bus_space_handle_t bsh;
551 	void __iomem *vaddr;
552 	unsigned long unwritten;
553 
554 	/* We can use the cpu mem copy function because this is X86. */
555 	agp_map_atomic(dev_priv->agph, base, &bsh);
556 	vaddr = bus_space_vaddr(dev_priv->bst, bsh);
557 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
558 						      user_data, length);
559 	agp_unmap_atomic(dev_priv->agph, bsh);
560 	if (unwritten) {
561 		agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
562 		vaddr = bus_space_vaddr(dev_priv->bst, bsh);
563 		unwritten = copy_from_user((void __force *)vaddr + offset,
564 					   user_data, length);
565 		agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
566 	}
567 
568 	return unwritten;
569 }
570 #endif
571 
572 /**
573  * This is the fast pwrite path, where we copy the data directly from the
574  * user into the GTT, uncached.
575  * @obj: i915 GEM object
576  * @args: pwrite arguments structure
577  */
578 static int
579 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
580 			 const struct drm_i915_gem_pwrite *args)
581 {
582 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
583 	struct i915_ggtt *ggtt = &i915->ggtt;
584 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
585 	intel_wakeref_t wakeref;
586 	struct drm_mm_node node;
587 	struct i915_vma *vma;
588 	u64 remain, offset;
589 	void __user *user_data;
590 	int ret = 0;
591 
592 	if (i915_gem_object_has_struct_page(obj)) {
593 		/*
594 		 * Avoid waking the device up if we can fallback, as
595 		 * waking/resuming is very slow (worst-case 10-100 ms
596 		 * depending on PCI sleeps and our own resume time).
597 		 * This easily dwarfs any performance advantage from
598 		 * using the cache bypass of indirect GGTT access.
599 		 */
600 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
601 		if (!wakeref)
602 			return -EFAULT;
603 	} else {
604 		/* No backing pages, no fallback, we must force GGTT access */
605 		wakeref = intel_runtime_pm_get(rpm);
606 	}
607 
608 	vma = i915_gem_gtt_prepare(obj, &node, true);
609 	if (IS_ERR(vma)) {
610 		ret = PTR_ERR(vma);
611 		goto out_rpm;
612 	}
613 
614 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
615 
616 	user_data = u64_to_user_ptr(args->data_ptr);
617 	offset = args->offset;
618 	remain = args->size;
619 	while (remain) {
620 		/* Operation in this page
621 		 *
622 		 * page_base = page offset within aperture
623 		 * page_offset = offset within page
624 		 * page_length = bytes to copy for this page
625 		 */
626 		u32 page_base = node.start;
627 		unsigned int page_offset = offset_in_page(offset);
628 		unsigned int page_length = PAGE_SIZE - page_offset;
629 		page_length = remain < page_length ? remain : page_length;
630 		if (drm_mm_node_allocated(&node)) {
631 			/* flush the write before we modify the GGTT */
632 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
633 			ggtt->vm.insert_page(&ggtt->vm,
634 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
635 					     node.start, I915_CACHE_NONE, 0);
636 			wmb(); /* flush modifications to the GGTT (insert_page) */
637 		} else {
638 			page_base += offset & LINUX_PAGE_MASK;
639 		}
640 		/* If we get a fault while copying data, then (presumably) our
641 		 * source page isn't available.  Return the error and we'll
642 		 * retry in the slow path.
643 		 * If the object is non-shmem backed, we retry again with the
644 		 * path that handles page fault.
645 		 */
646 		if (ggtt_write(i915, page_base, page_offset,
647 			       user_data, page_length)) {
648 			ret = -EFAULT;
649 			break;
650 		}
651 
652 		remain -= page_length;
653 		user_data += page_length;
654 		offset += page_length;
655 	}
656 
657 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
658 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
659 
660 	i915_gem_gtt_cleanup(obj, &node, vma);
661 out_rpm:
662 	intel_runtime_pm_put(rpm, wakeref);
663 	return ret;
664 }
665 
666 /* Per-page copy function for the shmem pwrite fastpath.
667  * Flushes invalid cachelines before writing to the target if
668  * needs_clflush_before is set and flushes out any written cachelines after
669  * writing if needs_clflush is set.
670  */
671 static int
672 shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data,
673 	     bool needs_clflush_before,
674 	     bool needs_clflush_after)
675 {
676 	char *vaddr;
677 	int ret;
678 
679 	vaddr = kmap(page);
680 
681 	if (needs_clflush_before)
682 		drm_clflush_virt_range(vaddr + offset, len);
683 
684 	ret = __copy_from_user(vaddr + offset, user_data, len);
685 	if (!ret && needs_clflush_after)
686 		drm_clflush_virt_range(vaddr + offset, len);
687 
688 	kunmap_va(vaddr);
689 
690 	return ret ? -EFAULT : 0;
691 }
692 
693 static int
694 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
695 		      const struct drm_i915_gem_pwrite *args)
696 {
697 	unsigned int partial_cacheline_write;
698 	unsigned int needs_clflush;
699 	unsigned int offset, idx;
700 	void __user *user_data;
701 	u64 remain;
702 	int ret;
703 
704 	ret = i915_gem_object_lock_interruptible(obj, NULL);
705 	if (ret)
706 		return ret;
707 
708 	ret = i915_gem_object_pin_pages(obj);
709 	if (ret)
710 		goto err_unlock;
711 
712 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
713 	if (ret)
714 		goto err_unpin;
715 
716 	i915_gem_object_finish_access(obj);
717 	i915_gem_object_unlock(obj);
718 
719 	/* If we don't overwrite a cacheline completely we need to be
720 	 * careful to have up-to-date data by first clflushing. Don't
721 	 * overcomplicate things and flush the entire patch.
722 	 */
723 	partial_cacheline_write = 0;
724 	if (needs_clflush & CLFLUSH_BEFORE)
725 		partial_cacheline_write = curcpu()->ci_cflushsz - 1;
726 
727 	user_data = u64_to_user_ptr(args->data_ptr);
728 	remain = args->size;
729 	offset = offset_in_page(args->offset);
730 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
731 		struct vm_page *page = i915_gem_object_get_page(obj, idx);
732 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
733 
734 		ret = shmem_pwrite(page, offset, length, user_data,
735 				   (offset | length) & partial_cacheline_write,
736 				   needs_clflush & CLFLUSH_AFTER);
737 		if (ret)
738 			break;
739 
740 		remain -= length;
741 		user_data += length;
742 		offset = 0;
743 	}
744 
745 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
746 
747 	i915_gem_object_unpin_pages(obj);
748 	return ret;
749 
750 err_unpin:
751 	i915_gem_object_unpin_pages(obj);
752 err_unlock:
753 	i915_gem_object_unlock(obj);
754 	return ret;
755 }
756 
757 /**
758  * Writes data to the object referenced by handle.
759  * @dev: drm device
760  * @data: ioctl data blob
761  * @file: drm file
762  *
763  * On error, the contents of the buffer that were to be modified are undefined.
764  */
765 int
766 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
767 		      struct drm_file *file)
768 {
769 	struct drm_i915_private *i915 = to_i915(dev);
770 	struct drm_i915_gem_pwrite *args = data;
771 	struct drm_i915_gem_object *obj;
772 	int ret;
773 
774 	/* PWRITE is disallowed for all platforms after TGL-LP.  This also
775 	 * covers all platforms with local memory.
776 	 */
777 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
778 		return -EOPNOTSUPP;
779 
780 	if (args->size == 0)
781 		return 0;
782 
783 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
784 		return -EFAULT;
785 
786 	obj = i915_gem_object_lookup(file, args->handle);
787 	if (!obj)
788 		return -ENOENT;
789 
790 	/* Bounds check destination. */
791 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
792 		ret = -EINVAL;
793 		goto err;
794 	}
795 
796 	/* Writes not allowed into this read-only object */
797 	if (i915_gem_object_is_readonly(obj)) {
798 		ret = -EINVAL;
799 		goto err;
800 	}
801 
802 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
803 
804 	ret = -ENODEV;
805 	if (obj->ops->pwrite)
806 		ret = obj->ops->pwrite(obj, args);
807 	if (ret != -ENODEV)
808 		goto err;
809 
810 	ret = i915_gem_object_wait(obj,
811 				   I915_WAIT_INTERRUPTIBLE |
812 				   I915_WAIT_ALL,
813 				   MAX_SCHEDULE_TIMEOUT);
814 	if (ret)
815 		goto err;
816 
817 	ret = -EFAULT;
818 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
819 	 * it would end up going through the fenced access, and we'll get
820 	 * different detiling behavior between reading and writing.
821 	 * pread/pwrite currently are reading and writing from the CPU
822 	 * perspective, requiring manual detiling by the client.
823 	 */
824 	if (!i915_gem_object_has_struct_page(obj) ||
825 	    cpu_write_needs_clflush(obj))
826 		/* Note that the gtt paths might fail with non-page-backed user
827 		 * pointers (e.g. gtt mappings when moving data between
828 		 * textures). Fallback to the shmem path in that case.
829 		 */
830 		ret = i915_gem_gtt_pwrite_fast(obj, args);
831 
832 	if (ret == -EFAULT || ret == -ENOSPC) {
833 		if (i915_gem_object_has_struct_page(obj))
834 			ret = i915_gem_shmem_pwrite(obj, args);
835 	}
836 
837 err:
838 	i915_gem_object_put(obj);
839 	return ret;
840 }
841 
842 /**
843  * Called when user space has done writes to this buffer
844  * @dev: drm device
845  * @data: ioctl data blob
846  * @file: drm file
847  */
848 int
849 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
850 			 struct drm_file *file)
851 {
852 	struct drm_i915_gem_sw_finish *args = data;
853 	struct drm_i915_gem_object *obj;
854 
855 	obj = i915_gem_object_lookup(file, args->handle);
856 	if (!obj)
857 		return -ENOENT;
858 
859 	/*
860 	 * Proxy objects are barred from CPU access, so there is no
861 	 * need to ban sw_finish as it is a nop.
862 	 */
863 
864 	/* Pinned buffers may be scanout, so flush the cache */
865 	i915_gem_object_flush_if_display(obj);
866 	i915_gem_object_put(obj);
867 
868 	return 0;
869 }
870 
871 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
872 {
873 	struct drm_i915_gem_object *obj, *on;
874 	int i;
875 
876 	/*
877 	 * Only called during RPM suspend. All users of the userfault_list
878 	 * must be holding an RPM wakeref to ensure that this can not
879 	 * run concurrently with themselves (and use the struct_mutex for
880 	 * protection between themselves).
881 	 */
882 
883 	list_for_each_entry_safe(obj, on,
884 				 &i915->ggtt.userfault_list, userfault_link)
885 		__i915_gem_object_release_mmap_gtt(obj);
886 
887 	/*
888 	 * The fence will be lost when the device powers down. If any were
889 	 * in use by hardware (i.e. they are pinned), we should not be powering
890 	 * down! All other fences will be reacquired by the user upon waking.
891 	 */
892 	for (i = 0; i < i915->ggtt.num_fences; i++) {
893 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
894 
895 		/*
896 		 * Ideally we want to assert that the fence register is not
897 		 * live at this point (i.e. that no piece of code will be
898 		 * trying to write through fence + GTT, as that both violates
899 		 * our tracking of activity and associated locking/barriers,
900 		 * but also is illegal given that the hw is powered down).
901 		 *
902 		 * Previously we used reg->pin_count as a "liveness" indicator.
903 		 * That is not sufficient, and we need a more fine-grained
904 		 * tool if we want to have a sanity check here.
905 		 */
906 
907 		if (!reg->vma)
908 			continue;
909 
910 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
911 		reg->dirty = true;
912 	}
913 }
914 
915 static void discard_ggtt_vma(struct i915_vma *vma)
916 {
917 	struct drm_i915_gem_object *obj = vma->obj;
918 
919 	spin_lock(&obj->vma.lock);
920 	if (!RB_EMPTY_NODE(&vma->obj_node)) {
921 		rb_erase(&vma->obj_node, &obj->vma.tree);
922 		RB_CLEAR_NODE(&vma->obj_node);
923 	}
924 	spin_unlock(&obj->vma.lock);
925 }
926 
927 struct i915_vma *
928 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
929 			    struct i915_gem_ww_ctx *ww,
930 			    const struct i915_ggtt_view *view,
931 			    u64 size, u64 alignment, u64 flags)
932 {
933 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
934 	struct i915_ggtt *ggtt = &i915->ggtt;
935 	struct i915_vma *vma;
936 	int ret;
937 
938 	if (flags & PIN_MAPPABLE &&
939 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
940 		/*
941 		 * If the required space is larger than the available
942 		 * aperture, we will not able to find a slot for the
943 		 * object and unbinding the object now will be in
944 		 * vain. Worse, doing so may cause us to ping-pong
945 		 * the object in and out of the Global GTT and
946 		 * waste a lot of cycles under the mutex.
947 		 */
948 		if (obj->base.size > ggtt->mappable_end)
949 			return ERR_PTR(-E2BIG);
950 
951 		/*
952 		 * If NONBLOCK is set the caller is optimistically
953 		 * trying to cache the full object within the mappable
954 		 * aperture, and *must* have a fallback in place for
955 		 * situations where we cannot bind the object. We
956 		 * can be a little more lax here and use the fallback
957 		 * more often to avoid costly migrations of ourselves
958 		 * and other objects within the aperture.
959 		 *
960 		 * Half-the-aperture is used as a simple heuristic.
961 		 * More interesting would to do search for a free
962 		 * block prior to making the commitment to unbind.
963 		 * That caters for the self-harm case, and with a
964 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
965 		 * we could try to minimise harm to others.
966 		 */
967 		if (flags & PIN_NONBLOCK &&
968 		    obj->base.size > ggtt->mappable_end / 2)
969 			return ERR_PTR(-ENOSPC);
970 	}
971 
972 new_vma:
973 	vma = i915_vma_instance(obj, &ggtt->vm, view);
974 	if (IS_ERR(vma))
975 		return vma;
976 
977 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
978 		if (flags & PIN_NONBLOCK) {
979 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
980 				return ERR_PTR(-ENOSPC);
981 
982 			if (flags & PIN_MAPPABLE &&
983 			    vma->fence_size > ggtt->mappable_end / 2)
984 				return ERR_PTR(-ENOSPC);
985 		}
986 
987 		if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
988 			discard_ggtt_vma(vma);
989 			goto new_vma;
990 		}
991 
992 		ret = i915_vma_unbind(vma);
993 		if (ret)
994 			return ERR_PTR(ret);
995 	}
996 
997 	if (ww)
998 		ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
999 	else
1000 		ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1001 
1002 	if (ret)
1003 		return ERR_PTR(ret);
1004 
1005 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1006 		mutex_lock(&ggtt->vm.mutex);
1007 		i915_vma_revoke_fence(vma);
1008 		mutex_unlock(&ggtt->vm.mutex);
1009 	}
1010 
1011 	ret = i915_vma_wait_for_bind(vma);
1012 	if (ret) {
1013 		i915_vma_unpin(vma);
1014 		return ERR_PTR(ret);
1015 	}
1016 
1017 	return vma;
1018 }
1019 
1020 int
1021 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1022 		       struct drm_file *file_priv)
1023 {
1024 	struct drm_i915_private *i915 = to_i915(dev);
1025 	struct drm_i915_gem_madvise *args = data;
1026 	struct drm_i915_gem_object *obj;
1027 	int err;
1028 
1029 	switch (args->madv) {
1030 	case I915_MADV_DONTNEED:
1031 	case I915_MADV_WILLNEED:
1032 	    break;
1033 	default:
1034 	    return -EINVAL;
1035 	}
1036 
1037 	obj = i915_gem_object_lookup(file_priv, args->handle);
1038 	if (!obj)
1039 		return -ENOENT;
1040 
1041 	err = i915_gem_object_lock_interruptible(obj, NULL);
1042 	if (err)
1043 		goto out;
1044 
1045 	if (i915_gem_object_has_pages(obj) &&
1046 	    i915_gem_object_is_tiled(obj) &&
1047 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1048 		if (obj->mm.madv == I915_MADV_WILLNEED) {
1049 			GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1050 			i915_gem_object_clear_tiling_quirk(obj);
1051 			i915_gem_object_make_shrinkable(obj);
1052 		}
1053 		if (args->madv == I915_MADV_WILLNEED) {
1054 			GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1055 			i915_gem_object_make_unshrinkable(obj);
1056 			i915_gem_object_set_tiling_quirk(obj);
1057 		}
1058 	}
1059 
1060 	if (obj->mm.madv != __I915_MADV_PURGED) {
1061 		obj->mm.madv = args->madv;
1062 		if (obj->ops->adjust_lru)
1063 			obj->ops->adjust_lru(obj);
1064 	}
1065 
1066 	if (i915_gem_object_has_pages(obj)) {
1067 		unsigned long flags;
1068 
1069 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
1070 		if (!list_empty(&obj->mm.link)) {
1071 			struct list_head *list;
1072 
1073 			if (obj->mm.madv != I915_MADV_WILLNEED)
1074 				list = &i915->mm.purge_list;
1075 			else
1076 				list = &i915->mm.shrink_list;
1077 			list_move_tail(&obj->mm.link, list);
1078 
1079 		}
1080 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1081 	}
1082 
1083 	/* if the object is no longer attached, discard its backing storage */
1084 	if (obj->mm.madv == I915_MADV_DONTNEED &&
1085 	    !i915_gem_object_has_pages(obj))
1086 		i915_gem_object_truncate(obj);
1087 
1088 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1089 
1090 	i915_gem_object_unlock(obj);
1091 out:
1092 	i915_gem_object_put(obj);
1093 	return err;
1094 }
1095 
1096 int i915_gem_init(struct drm_i915_private *dev_priv)
1097 {
1098 	int ret;
1099 
1100 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
1101 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1102 		mkwrite_device_info(dev_priv)->page_sizes =
1103 			I915_GTT_PAGE_SIZE_4K;
1104 
1105 	ret = i915_gem_init_userptr(dev_priv);
1106 	if (ret)
1107 		return ret;
1108 
1109 	intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1110 	intel_wopcm_init(&dev_priv->wopcm);
1111 
1112 	ret = i915_init_ggtt(dev_priv);
1113 	if (ret) {
1114 		GEM_BUG_ON(ret == -EIO);
1115 		goto err_unlock;
1116 	}
1117 
1118 	/*
1119 	 * Despite its name intel_init_clock_gating applies both display
1120 	 * clock gating workarounds; GT mmio workarounds and the occasional
1121 	 * GT power context workaround. Worse, sometimes it includes a context
1122 	 * register workaround which we need to apply before we record the
1123 	 * default HW state for all contexts.
1124 	 *
1125 	 * FIXME: break up the workarounds and apply them at the right time!
1126 	 */
1127 	intel_init_clock_gating(dev_priv);
1128 
1129 	ret = intel_gt_init(&dev_priv->gt);
1130 	if (ret)
1131 		goto err_unlock;
1132 
1133 	return 0;
1134 
1135 	/*
1136 	 * Unwinding is complicated by that we want to handle -EIO to mean
1137 	 * disable GPU submission but keep KMS alive. We want to mark the
1138 	 * HW as irrevisibly wedged, but keep enough state around that the
1139 	 * driver doesn't explode during runtime.
1140 	 */
1141 err_unlock:
1142 	i915_gem_drain_workqueue(dev_priv);
1143 
1144 	if (ret != -EIO)
1145 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1146 
1147 	if (ret == -EIO) {
1148 		/*
1149 		 * Allow engines or uC initialisation to fail by marking the GPU
1150 		 * as wedged. But we only want to do this when the GPU is angry,
1151 		 * for all other failure, such as an allocation failure, bail.
1152 		 */
1153 		if (!intel_gt_is_wedged(&dev_priv->gt)) {
1154 			i915_probe_error(dev_priv,
1155 					 "Failed to initialize GPU, declaring it wedged!\n");
1156 			intel_gt_set_wedged(&dev_priv->gt);
1157 		}
1158 
1159 		/* Minimal basic recovery for KMS */
1160 		ret = i915_ggtt_enable_hw(dev_priv);
1161 		i915_ggtt_resume(&dev_priv->ggtt);
1162 		intel_init_clock_gating(dev_priv);
1163 	}
1164 
1165 	i915_gem_drain_freed_objects(dev_priv);
1166 
1167 	return ret;
1168 }
1169 
1170 void i915_gem_driver_register(struct drm_i915_private *i915)
1171 {
1172 	i915_gem_driver_register__shrinker(i915);
1173 
1174 	intel_engines_driver_register(i915);
1175 }
1176 
1177 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1178 {
1179 	i915_gem_driver_unregister__shrinker(i915);
1180 }
1181 
1182 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1183 {
1184 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1185 
1186 	i915_gem_suspend_late(dev_priv);
1187 	intel_gt_driver_remove(&dev_priv->gt);
1188 	dev_priv->uabi_engines = RB_ROOT;
1189 
1190 	/* Flush any outstanding unpin_work. */
1191 	i915_gem_drain_workqueue(dev_priv);
1192 
1193 	i915_gem_drain_freed_objects(dev_priv);
1194 }
1195 
1196 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1197 {
1198 	intel_gt_driver_release(&dev_priv->gt);
1199 
1200 	intel_wa_list_free(&dev_priv->gt_wa_list);
1201 
1202 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1203 
1204 	i915_gem_drain_freed_objects(dev_priv);
1205 
1206 	drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1207 }
1208 
1209 static void i915_gem_init__mm(struct drm_i915_private *i915)
1210 {
1211 	mtx_init(&i915->mm.obj_lock, IPL_TTY);
1212 
1213 	init_llist_head(&i915->mm.free_list);
1214 
1215 	INIT_LIST_HEAD(&i915->mm.purge_list);
1216 	INIT_LIST_HEAD(&i915->mm.shrink_list);
1217 
1218 	i915_gem_init__objects(i915);
1219 }
1220 
1221 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1222 {
1223 	i915_gem_init__mm(dev_priv);
1224 	i915_gem_init__contexts(dev_priv);
1225 
1226 	mtx_init(&dev_priv->fb_tracking.lock, IPL_NONE);
1227 }
1228 
1229 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1230 {
1231 	i915_gem_drain_freed_objects(dev_priv);
1232 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1233 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1234 	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1235 }
1236 
1237 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1238 {
1239 	struct drm_i915_file_private *file_priv;
1240 	int ret;
1241 
1242 	DRM_DEBUG("\n");
1243 
1244 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1245 	if (!file_priv)
1246 		return -ENOMEM;
1247 
1248 	file->driver_priv = file_priv;
1249 	file_priv->dev_priv = i915;
1250 	file_priv->file = file;
1251 
1252 	file_priv->bsd_engine = -1;
1253 	file_priv->hang_timestamp = jiffies;
1254 
1255 	ret = i915_gem_context_open(i915, file);
1256 	if (ret)
1257 		kfree(file_priv);
1258 
1259 	return ret;
1260 }
1261 
1262 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1263 #include "selftests/mock_gem_device.c"
1264 #include "selftests/i915_gem.c"
1265 #endif
1266