xref: /dflybsd-src/sys/dev/drm/i915/i915_gem_execbuffer.c (revision 872a09d51adf63b4bdae6adb1d96a53f76e161e2)
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28 
29 #include <linux/dma_remapping.h>
30 #include <linux/reservation.h>
31 #include <linux/uaccess.h>
32 #include <asm/cpufeature.h>
33 
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 
37 #include "i915_drv.h"
38 #include "i915_gem_dmabuf.h"
39 #include "i915_trace.h"
40 #include "intel_drv.h"
41 #include "intel_frontbuffer.h"
42 
43 #define  __EXEC_OBJECT_HAS_PIN		(1<<31)
44 #define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
45 #define  __EXEC_OBJECT_NEEDS_MAP	(1<<29)
46 #define  __EXEC_OBJECT_NEEDS_BIAS	(1<<28)
47 #define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
48 
49 #define BATCH_OFFSET_BIAS (256*1024)
50 
51 struct i915_execbuffer_params {
52 	struct drm_device               *dev;
53 	struct drm_file                 *file;
54 	struct i915_vma			*batch;
55 	u32				dispatch_flags;
56 	u32				args_batch_start_offset;
57 	struct intel_engine_cs          *engine;
58 	struct i915_gem_context         *ctx;
59 	struct drm_i915_gem_request     *request;
60 };
61 
62 struct eb_vmas {
63 	struct list_head vmas;
64 	int and;
65 	union {
66 		struct i915_vma *lut[0];
67 		struct hlist_head buckets[0];
68 	};
69 };
70 
71 static struct eb_vmas *
72 eb_create(struct drm_i915_gem_execbuffer2 *args)
73 {
74 	struct eb_vmas *eb = NULL;
75 
76 	if (args->flags & I915_EXEC_HANDLE_LUT) {
77 		unsigned size = args->buffer_count;
78 		size *= sizeof(struct i915_vma *);
79 		size += sizeof(struct eb_vmas);
80 		eb = kmalloc(size, M_DRM, M_NOWAIT);
81 	}
82 
83 	if (eb == NULL) {
84 		unsigned size = args->buffer_count;
85 		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
86 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
87 		while (count > 2*size)
88 			count >>= 1;
89 		eb = kzalloc(count*sizeof(struct hlist_head) +
90 			     sizeof(struct eb_vmas),
91 			     GFP_TEMPORARY);
92 		if (eb == NULL)
93 			return eb;
94 
95 		eb->and = count - 1;
96 	} else
97 		eb->and = -args->buffer_count;
98 
99 	INIT_LIST_HEAD(&eb->vmas);
100 	return eb;
101 }
102 
103 static void
104 eb_reset(struct eb_vmas *eb)
105 {
106 	if (eb->and >= 0)
107 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
108 }
109 
110 static struct i915_vma *
111 eb_get_batch(struct eb_vmas *eb)
112 {
113 	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
114 
115 	/*
116 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
117 	 * to negative relocation deltas. Usually that works out ok since the
118 	 * relocate address is still positive, except when the batch is placed
119 	 * very low in the GTT. Ensure this doesn't happen.
120 	 *
121 	 * Note that actual hangs have only been observed on gen7, but for
122 	 * paranoia do it everywhere.
123 	 */
124 	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
125 		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
126 
127 	return vma;
128 }
129 
130 static int
131 eb_lookup_vmas(struct eb_vmas *eb,
132 	       struct drm_i915_gem_exec_object2 *exec,
133 	       const struct drm_i915_gem_execbuffer2 *args,
134 	       struct i915_address_space *vm,
135 	       struct drm_file *file)
136 {
137 	struct drm_i915_gem_object *obj;
138 	struct list_head objects;
139 	int i, ret;
140 
141 	INIT_LIST_HEAD(&objects);
142 	lockmgr(&file->table_lock, LK_EXCLUSIVE);
143 	/* Grab a reference to the object and release the lock so we can lookup
144 	 * or create the VMA without using GFP_ATOMIC */
145 	for (i = 0; i < args->buffer_count; i++) {
146 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
147 		if (obj == NULL) {
148 			lockmgr(&file->table_lock, LK_RELEASE);
149 			DRM_DEBUG("Invalid object handle %d at index %d\n",
150 				   exec[i].handle, i);
151 			ret = -ENOENT;
152 			goto err;
153 		}
154 
155 		if (!list_empty(&obj->obj_exec_link)) {
156 			lockmgr(&file->table_lock, LK_RELEASE);
157 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
158 				   obj, exec[i].handle, i);
159 			ret = -EINVAL;
160 			goto err;
161 		}
162 
163 		i915_gem_object_get(obj);
164 		list_add_tail(&obj->obj_exec_link, &objects);
165 	}
166 	lockmgr(&file->table_lock, LK_RELEASE);
167 
168 	i = 0;
169 	while (!list_empty(&objects)) {
170 		struct i915_vma *vma;
171 
172 		obj = list_first_entry(&objects,
173 				       struct drm_i915_gem_object,
174 				       obj_exec_link);
175 
176 		/*
177 		 * NOTE: We can leak any vmas created here when something fails
178 		 * later on. But that's no issue since vma_unbind can deal with
179 		 * vmas which are not actually bound. And since only
180 		 * lookup_or_create exists as an interface to get at the vma
181 		 * from the (obj, vm) we don't run the risk of creating
182 		 * duplicated vmas for the same vm.
183 		 */
184 		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
185 		if (IS_ERR(vma)) {
186 			DRM_DEBUG("Failed to lookup VMA\n");
187 			ret = PTR_ERR(vma);
188 			goto err;
189 		}
190 
191 		/* Transfer ownership from the objects list to the vmas list. */
192 		list_add_tail(&vma->exec_list, &eb->vmas);
193 		list_del_init(&obj->obj_exec_link);
194 
195 		vma->exec_entry = &exec[i];
196 		if (eb->and < 0) {
197 			eb->lut[i] = vma;
198 		} else {
199 			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
200 			vma->exec_handle = handle;
201 			hlist_add_head(&vma->exec_node,
202 				       &eb->buckets[handle & eb->and]);
203 		}
204 		++i;
205 	}
206 
207 	return 0;
208 
209 
210 err:
211 	while (!list_empty(&objects)) {
212 		obj = list_first_entry(&objects,
213 				       struct drm_i915_gem_object,
214 				       obj_exec_link);
215 		list_del_init(&obj->obj_exec_link);
216 		i915_gem_object_put(obj);
217 	}
218 	/*
219 	 * Objects already transfered to the vmas list will be unreferenced by
220 	 * eb_destroy.
221 	 */
222 
223 	return ret;
224 }
225 
226 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
227 {
228 	if (eb->and < 0) {
229 		if (handle >= -eb->and)
230 			return NULL;
231 		return eb->lut[handle];
232 	} else {
233 		struct hlist_head *head;
234 		struct i915_vma *vma;
235 
236 		head = &eb->buckets[handle & eb->and];
237 		hlist_for_each_entry(vma, head, exec_node) {
238 			if (vma->exec_handle == handle)
239 				return vma;
240 		}
241 		return NULL;
242 	}
243 }
244 
245 static void
246 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
247 {
248 	struct drm_i915_gem_exec_object2 *entry;
249 	struct drm_i915_gem_object *obj = vma->obj;
250 
251 	if (!drm_mm_node_allocated(&vma->node))
252 		return;
253 
254 	entry = vma->exec_entry;
255 
256 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
257 		i915_gem_object_unpin_fence(obj);
258 
259 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
260 		__i915_vma_unpin(vma);
261 
262 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
263 }
264 
265 static void eb_destroy(struct eb_vmas *eb)
266 {
267 	while (!list_empty(&eb->vmas)) {
268 		struct i915_vma *vma;
269 
270 		vma = list_first_entry(&eb->vmas,
271 				       struct i915_vma,
272 				       exec_list);
273 		list_del_init(&vma->exec_list);
274 		i915_gem_execbuffer_unreserve_vma(vma);
275 		i915_gem_object_put(vma->obj);
276 	}
277 	kfree(eb);
278 }
279 
280 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
281 {
282 	return (HAS_LLC(obj->base.dev) ||
283 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
284 		obj->cache_level != I915_CACHE_NONE);
285 }
286 
287 /* Used to convert any address to canonical form.
288  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
289  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
290  * addresses to be in a canonical form:
291  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
292  * canonical form [63:48] == [47]."
293  */
294 #define GEN8_HIGH_ADDRESS_BIT 47
295 static inline uint64_t gen8_canonical_addr(uint64_t address)
296 {
297 	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
298 }
299 
300 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
301 {
302 	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
303 }
304 
305 static inline uint64_t
306 relocation_target(struct drm_i915_gem_relocation_entry *reloc,
307 		  uint64_t target_offset)
308 {
309 	return gen8_canonical_addr((int)reloc->delta + target_offset);
310 }
311 
312 static int
313 relocate_entry_cpu(struct drm_i915_gem_object *obj,
314 		   struct drm_i915_gem_relocation_entry *reloc,
315 		   uint64_t target_offset)
316 {
317 	struct drm_device *dev = obj->base.dev;
318 	uint32_t page_offset = offset_in_page(reloc->offset);
319 	uint64_t delta = relocation_target(reloc, target_offset);
320 	char *vaddr;
321 	int ret;
322 
323 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
324 	if (ret)
325 		return ret;
326 
327 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
328 				reloc->offset >> PAGE_SHIFT));
329 	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
330 
331 	if (INTEL_INFO(dev)->gen >= 8) {
332 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
333 
334 		if (page_offset == 0) {
335 			kunmap_atomic(vaddr);
336 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
337 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
338 		}
339 
340 		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
341 	}
342 
343 	kunmap_atomic(vaddr);
344 
345 	return 0;
346 }
347 
348 static int
349 relocate_entry_gtt(struct drm_i915_gem_object *obj,
350 		   struct drm_i915_gem_relocation_entry *reloc,
351 		   uint64_t target_offset)
352 {
353 	struct drm_device *dev = obj->base.dev;
354 	struct drm_i915_private *dev_priv = to_i915(dev);
355 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
356 	uint64_t delta = relocation_target(reloc, target_offset);
357 	uint64_t offset;
358 	void __iomem *reloc_page;
359 	int ret;
360 
361 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
362 	if (ret)
363 		return ret;
364 
365 	ret = i915_gem_object_put_fence(obj);
366 	if (ret)
367 		return ret;
368 
369 	/* Map the page containing the relocation we're going to perform.  */
370 	offset = i915_gem_obj_ggtt_offset(obj);
371 	offset += reloc->offset;
372 	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
373 					      offset & LINUX_PAGE_MASK);
374 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
375 
376 	if (INTEL_INFO(dev)->gen >= 8) {
377 		offset += sizeof(uint32_t);
378 
379 		if (offset_in_page(offset) == 0) {
380 			io_mapping_unmap_atomic(reloc_page);
381 			reloc_page =
382 				io_mapping_map_atomic_wc(ggtt->mappable,
383 							 offset);
384 		}
385 
386 		iowrite32(upper_32_bits(delta),
387 			  reloc_page + offset_in_page(offset));
388 	}
389 
390 	io_mapping_unmap_atomic(reloc_page);
391 
392 	return 0;
393 }
394 
395 static void
396 clflush_write32(void *addr, uint32_t value)
397 {
398 	/* This is not a fast path, so KISS. */
399 	drm_clflush_virt_range(addr, sizeof(uint32_t));
400 	*(uint32_t *)addr = value;
401 	drm_clflush_virt_range(addr, sizeof(uint32_t));
402 }
403 
404 static int
405 relocate_entry_clflush(struct drm_i915_gem_object *obj,
406 		       struct drm_i915_gem_relocation_entry *reloc,
407 		       uint64_t target_offset)
408 {
409 	struct drm_device *dev = obj->base.dev;
410 	uint32_t page_offset = offset_in_page(reloc->offset);
411 	uint64_t delta = relocation_target(reloc, target_offset);
412 	char *vaddr;
413 	int ret;
414 
415 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
416 	if (ret)
417 		return ret;
418 
419 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
420 				reloc->offset >> PAGE_SHIFT));
421 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
422 
423 	if (INTEL_INFO(dev)->gen >= 8) {
424 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
425 
426 		if (page_offset == 0) {
427 			kunmap_atomic(vaddr);
428 			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
429 			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
430 		}
431 
432 		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
433 	}
434 
435 	kunmap_atomic(vaddr);
436 
437 	return 0;
438 }
439 
440 static bool object_is_idle(struct drm_i915_gem_object *obj)
441 {
442 	unsigned long active = i915_gem_object_get_active(obj);
443 	int idx;
444 
445 	for_each_active(active, idx) {
446 		if (!i915_gem_active_is_idle(&obj->last_read[idx],
447 					     &obj->base.dev->struct_mutex))
448 			return false;
449 	}
450 
451 	return true;
452 }
453 
454 static int
455 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
456 				   struct eb_vmas *eb,
457 				   struct drm_i915_gem_relocation_entry *reloc)
458 {
459 	struct drm_device *dev = obj->base.dev;
460 	struct drm_gem_object *target_obj;
461 	struct drm_i915_gem_object *target_i915_obj;
462 	struct i915_vma *target_vma;
463 	uint64_t target_offset;
464 	int ret;
465 
466 	/* we've already hold a reference to all valid objects */
467 	target_vma = eb_get_vma(eb, reloc->target_handle);
468 	if (unlikely(target_vma == NULL))
469 		return -ENOENT;
470 	target_i915_obj = target_vma->obj;
471 	target_obj = &target_vma->obj->base;
472 
473 	target_offset = gen8_canonical_addr(target_vma->node.start);
474 
475 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
476 	 * pipe_control writes because the gpu doesn't properly redirect them
477 	 * through the ppgtt for non_secure batchbuffers. */
478 	if (unlikely(IS_GEN6(dev) &&
479 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
480 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
481 				    PIN_GLOBAL);
482 		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
483 			return ret;
484 	}
485 
486 	/* Validate that the target is in a valid r/w GPU domain */
487 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
488 		DRM_DEBUG("reloc with multiple write domains: "
489 			  "obj %p target %d offset %d "
490 			  "read %08x write %08x",
491 			  obj, reloc->target_handle,
492 			  (int) reloc->offset,
493 			  reloc->read_domains,
494 			  reloc->write_domain);
495 		return -EINVAL;
496 	}
497 	if (unlikely((reloc->write_domain | reloc->read_domains)
498 		     & ~I915_GEM_GPU_DOMAINS)) {
499 		DRM_DEBUG("reloc with read/write non-GPU domains: "
500 			  "obj %p target %d offset %d "
501 			  "read %08x write %08x",
502 			  obj, reloc->target_handle,
503 			  (int) reloc->offset,
504 			  reloc->read_domains,
505 			  reloc->write_domain);
506 		return -EINVAL;
507 	}
508 
509 	target_obj->pending_read_domains |= reloc->read_domains;
510 	target_obj->pending_write_domain |= reloc->write_domain;
511 
512 	/* If the relocation already has the right value in it, no
513 	 * more work needs to be done.
514 	 */
515 	if (target_offset == reloc->presumed_offset)
516 		return 0;
517 
518 	/* Check that the relocation address is valid... */
519 	if (unlikely(reloc->offset >
520 		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
521 		DRM_DEBUG("Relocation beyond object bounds: "
522 			  "obj %p target %d offset %d size %d.\n",
523 			  obj, reloc->target_handle,
524 			  (int) reloc->offset,
525 			  (int) obj->base.size);
526 		return -EINVAL;
527 	}
528 	if (unlikely(reloc->offset & 3)) {
529 		DRM_DEBUG("Relocation not 4-byte aligned: "
530 			  "obj %p target %d offset %d.\n",
531 			  obj, reloc->target_handle,
532 			  (int) reloc->offset);
533 		return -EINVAL;
534 	}
535 
536 	/* We can't wait for rendering with pagefaults disabled */
537 	if (pagefault_disabled() && !object_is_idle(obj))
538 		return -EFAULT;
539 
540 	if (use_cpu_reloc(obj))
541 		ret = relocate_entry_cpu(obj, reloc, target_offset);
542 	else if (obj->map_and_fenceable)
543 		ret = relocate_entry_gtt(obj, reloc, target_offset);
544 	else if (static_cpu_has(X86_FEATURE_CLFLUSH))
545 		ret = relocate_entry_clflush(obj, reloc, target_offset);
546 	else {
547 		WARN_ONCE(1, "Impossible case in relocation handling\n");
548 		ret = -ENODEV;
549 	}
550 
551 	if (ret)
552 		return ret;
553 
554 	/* and update the user's relocation entry */
555 	reloc->presumed_offset = target_offset;
556 
557 	return 0;
558 }
559 
560 static int
561 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
562 				 struct eb_vmas *eb)
563 {
564 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
565 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
566 	struct drm_i915_gem_relocation_entry __user *user_relocs;
567 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
568 	int remain, ret;
569 
570 	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
571 
572 	remain = entry->relocation_count;
573 	while (remain) {
574 		struct drm_i915_gem_relocation_entry *r = stack_reloc;
575 		int count = remain;
576 		if (count > ARRAY_SIZE(stack_reloc))
577 			count = ARRAY_SIZE(stack_reloc);
578 		remain -= count;
579 
580 		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
581 			return -EFAULT;
582 
583 		do {
584 			u64 offset = r->presumed_offset;
585 
586 			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
587 			if (ret)
588 				return ret;
589 
590 			if (r->presumed_offset != offset &&
591 			    __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
592 				return -EFAULT;
593 			}
594 
595 			user_relocs++;
596 			r++;
597 		} while (--count);
598 	}
599 
600 	return 0;
601 #undef N_RELOC
602 }
603 
604 static int
605 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
606 				      struct eb_vmas *eb,
607 				      struct drm_i915_gem_relocation_entry *relocs)
608 {
609 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
610 	int i, ret;
611 
612 	for (i = 0; i < entry->relocation_count; i++) {
613 		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
614 		if (ret)
615 			return ret;
616 	}
617 
618 	return 0;
619 }
620 
621 static int
622 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
623 {
624 	struct i915_vma *vma;
625 	int ret = 0;
626 
627 	/* This is the fast path and we cannot handle a pagefault whilst
628 	 * holding the struct mutex lest the user pass in the relocations
629 	 * contained within a mmaped bo. For in such a case we, the page
630 	 * fault handler would call i915_gem_fault() and we would try to
631 	 * acquire the struct mutex again. Obviously this is bad and so
632 	 * lockdep complains vehemently.
633 	 */
634 	pagefault_disable();
635 	list_for_each_entry(vma, &eb->vmas, exec_list) {
636 		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
637 		if (ret)
638 			break;
639 	}
640 	pagefault_enable();
641 
642 	return ret;
643 }
644 
645 static bool only_mappable_for_reloc(unsigned int flags)
646 {
647 	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
648 		__EXEC_OBJECT_NEEDS_MAP;
649 }
650 
651 static int
652 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
653 				struct intel_engine_cs *engine,
654 				bool *need_reloc)
655 {
656 	struct drm_i915_gem_object *obj = vma->obj;
657 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
658 	uint64_t flags;
659 	int ret;
660 
661 	flags = PIN_USER;
662 	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
663 		flags |= PIN_GLOBAL;
664 
665 	if (!drm_mm_node_allocated(&vma->node)) {
666 		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
667 		 * limit address to the first 4GBs for unflagged objects.
668 		 */
669 		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
670 			flags |= PIN_ZONE_4G;
671 		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
672 			flags |= PIN_GLOBAL | PIN_MAPPABLE;
673 		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
674 			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
675 		if (entry->flags & EXEC_OBJECT_PINNED)
676 			flags |= entry->offset | PIN_OFFSET_FIXED;
677 		if ((flags & PIN_MAPPABLE) == 0)
678 			flags |= PIN_HIGH;
679 	}
680 
681 	ret = i915_vma_pin(vma,
682 			   entry->pad_to_size,
683 			   entry->alignment,
684 			   flags);
685 	if ((ret == -ENOSPC || ret == -E2BIG) &&
686 	    only_mappable_for_reloc(entry->flags))
687 		ret = i915_vma_pin(vma,
688 				   entry->pad_to_size,
689 				   entry->alignment,
690 				   flags & ~PIN_MAPPABLE);
691 	if (ret)
692 		return ret;
693 
694 	entry->flags |= __EXEC_OBJECT_HAS_PIN;
695 
696 	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
697 		ret = i915_gem_object_get_fence(obj);
698 		if (ret)
699 			return ret;
700 
701 		if (i915_gem_object_pin_fence(obj))
702 			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
703 	}
704 
705 	if (entry->offset != vma->node.start) {
706 		entry->offset = vma->node.start;
707 		*need_reloc = true;
708 	}
709 
710 	if (entry->flags & EXEC_OBJECT_WRITE) {
711 		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
712 		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
713 	}
714 
715 	return 0;
716 }
717 
718 static bool
719 need_reloc_mappable(struct i915_vma *vma)
720 {
721 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
722 
723 	if (entry->relocation_count == 0)
724 		return false;
725 
726 	if (!i915_vma_is_ggtt(vma))
727 		return false;
728 
729 	/* See also use_cpu_reloc() */
730 	if (HAS_LLC(vma->obj->base.dev))
731 		return false;
732 
733 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
734 		return false;
735 
736 	return true;
737 }
738 
739 static bool
740 eb_vma_misplaced(struct i915_vma *vma)
741 {
742 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
743 	struct drm_i915_gem_object *obj = vma->obj;
744 
745 	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
746 		!i915_vma_is_ggtt(vma));
747 
748 	if (entry->alignment &&
749 	    vma->node.start & (entry->alignment - 1))
750 		return true;
751 
752 	if (vma->node.size < entry->pad_to_size)
753 		return true;
754 
755 	if (entry->flags & EXEC_OBJECT_PINNED &&
756 	    vma->node.start != entry->offset)
757 		return true;
758 
759 	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
760 	    vma->node.start < BATCH_OFFSET_BIAS)
761 		return true;
762 
763 	/* avoid costly ping-pong once a batch bo ended up non-mappable */
764 	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
765 		return !only_mappable_for_reloc(entry->flags);
766 
767 	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
768 	    (vma->node.start + vma->node.size - 1) >> 32)
769 		return true;
770 
771 	return false;
772 }
773 
774 static int
775 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
776 			    struct list_head *vmas,
777 			    struct i915_gem_context *ctx,
778 			    bool *need_relocs)
779 {
780 	struct drm_i915_gem_object *obj;
781 	struct i915_vma *vma;
782 	struct i915_address_space *vm;
783 	struct list_head ordered_vmas;
784 	struct list_head pinned_vmas;
785 	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
786 	int retry;
787 
788 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
789 
790 	INIT_LIST_HEAD(&ordered_vmas);
791 	INIT_LIST_HEAD(&pinned_vmas);
792 	while (!list_empty(vmas)) {
793 		struct drm_i915_gem_exec_object2 *entry;
794 		bool need_fence, need_mappable;
795 
796 		vma = list_first_entry(vmas, struct i915_vma, exec_list);
797 		obj = vma->obj;
798 		entry = vma->exec_entry;
799 
800 		if (ctx->flags & CONTEXT_NO_ZEROMAP)
801 			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
802 
803 		if (!has_fenced_gpu_access)
804 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
805 		need_fence =
806 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
807 			i915_gem_object_is_tiled(obj);
808 		need_mappable = need_fence || need_reloc_mappable(vma);
809 
810 		if (entry->flags & EXEC_OBJECT_PINNED)
811 			list_move_tail(&vma->exec_list, &pinned_vmas);
812 		else if (need_mappable) {
813 			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
814 			list_move(&vma->exec_list, &ordered_vmas);
815 		} else
816 			list_move_tail(&vma->exec_list, &ordered_vmas);
817 
818 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
819 		obj->base.pending_write_domain = 0;
820 	}
821 	list_splice(&ordered_vmas, vmas);
822 	list_splice(&pinned_vmas, vmas);
823 
824 	/* Attempt to pin all of the buffers into the GTT.
825 	 * This is done in 3 phases:
826 	 *
827 	 * 1a. Unbind all objects that do not match the GTT constraints for
828 	 *     the execbuffer (fenceable, mappable, alignment etc).
829 	 * 1b. Increment pin count for already bound objects.
830 	 * 2.  Bind new objects.
831 	 * 3.  Decrement pin count.
832 	 *
833 	 * This avoid unnecessary unbinding of later objects in order to make
834 	 * room for the earlier objects *unless* we need to defragment.
835 	 */
836 	retry = 0;
837 	do {
838 		int ret = 0;
839 
840 		/* Unbind any ill-fitting objects or pin. */
841 		list_for_each_entry(vma, vmas, exec_list) {
842 			if (!drm_mm_node_allocated(&vma->node))
843 				continue;
844 
845 			if (eb_vma_misplaced(vma))
846 				ret = i915_vma_unbind(vma);
847 			else
848 				ret = i915_gem_execbuffer_reserve_vma(vma,
849 								      engine,
850 								      need_relocs);
851 			if (ret)
852 				goto err;
853 		}
854 
855 		/* Bind fresh objects */
856 		list_for_each_entry(vma, vmas, exec_list) {
857 			if (drm_mm_node_allocated(&vma->node))
858 				continue;
859 
860 			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
861 							      need_relocs);
862 			if (ret)
863 				goto err;
864 		}
865 
866 err:
867 		if (ret != -ENOSPC || retry++)
868 			return ret;
869 
870 		/* Decrement pin count for bound objects */
871 		list_for_each_entry(vma, vmas, exec_list)
872 			i915_gem_execbuffer_unreserve_vma(vma);
873 
874 		ret = i915_gem_evict_vm(vm, true);
875 		if (ret)
876 			return ret;
877 	} while (1);
878 }
879 
880 static int
881 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
882 				  struct drm_i915_gem_execbuffer2 *args,
883 				  struct drm_file *file,
884 				  struct intel_engine_cs *engine,
885 				  struct eb_vmas *eb,
886 				  struct drm_i915_gem_exec_object2 *exec,
887 				  struct i915_gem_context *ctx)
888 {
889 	struct drm_i915_gem_relocation_entry *reloc;
890 	struct i915_address_space *vm;
891 	struct i915_vma *vma;
892 	bool need_relocs;
893 	int *reloc_offset;
894 	int i, total, ret;
895 	unsigned count = args->buffer_count;
896 
897 	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
898 
899 	/* We may process another execbuffer during the unlock... */
900 	while (!list_empty(&eb->vmas)) {
901 		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
902 		list_del_init(&vma->exec_list);
903 		i915_gem_execbuffer_unreserve_vma(vma);
904 		i915_gem_object_put(vma->obj);
905 	}
906 
907 	mutex_unlock(&dev->struct_mutex);
908 
909 	total = 0;
910 	for (i = 0; i < count; i++)
911 		total += exec[i].relocation_count;
912 
913 	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
914 	reloc = drm_malloc_ab(total, sizeof(*reloc));
915 	if (reloc == NULL || reloc_offset == NULL) {
916 		drm_free_large(reloc);
917 		drm_free_large(reloc_offset);
918 		mutex_lock(&dev->struct_mutex);
919 		return -ENOMEM;
920 	}
921 
922 	total = 0;
923 	for (i = 0; i < count; i++) {
924 		struct drm_i915_gem_relocation_entry __user *user_relocs;
925 		u64 invalid_offset = (u64)-1;
926 		int j;
927 
928 		user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
929 
930 		if (copy_from_user(reloc+total, user_relocs,
931 				   exec[i].relocation_count * sizeof(*reloc))) {
932 			ret = -EFAULT;
933 			mutex_lock(&dev->struct_mutex);
934 			goto err;
935 		}
936 
937 		/* As we do not update the known relocation offsets after
938 		 * relocating (due to the complexities in lock handling),
939 		 * we need to mark them as invalid now so that we force the
940 		 * relocation processing next time. Just in case the target
941 		 * object is evicted and then rebound into its old
942 		 * presumed_offset before the next execbuffer - if that
943 		 * happened we would make the mistake of assuming that the
944 		 * relocations were valid.
945 		 */
946 		for (j = 0; j < exec[i].relocation_count; j++) {
947 			if (__copy_to_user(&user_relocs[j].presumed_offset,
948 					   &invalid_offset,
949 					   sizeof(invalid_offset))) {
950 				ret = -EFAULT;
951 				mutex_lock(&dev->struct_mutex);
952 				goto err;
953 			}
954 		}
955 
956 		reloc_offset[i] = total;
957 		total += exec[i].relocation_count;
958 	}
959 
960 	ret = i915_mutex_lock_interruptible(dev);
961 	if (ret) {
962 		mutex_lock(&dev->struct_mutex);
963 		goto err;
964 	}
965 
966 	/* reacquire the objects */
967 	eb_reset(eb);
968 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
969 	if (ret)
970 		goto err;
971 
972 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
973 	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
974 					  &need_relocs);
975 	if (ret)
976 		goto err;
977 
978 	list_for_each_entry(vma, &eb->vmas, exec_list) {
979 		int offset = vma->exec_entry - exec;
980 		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
981 							    reloc + reloc_offset[offset]);
982 		if (ret)
983 			goto err;
984 	}
985 
986 	/* Leave the user relocations as are, this is the painfully slow path,
987 	 * and we want to avoid the complication of dropping the lock whilst
988 	 * having buffers reserved in the aperture and so causing spurious
989 	 * ENOSPC for random operations.
990 	 */
991 
992 err:
993 	drm_free_large(reloc);
994 	drm_free_large(reloc_offset);
995 	return ret;
996 }
997 
998 static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
999 {
1000 	unsigned int mask;
1001 
1002 	mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
1003 	mask <<= I915_BO_ACTIVE_SHIFT;
1004 
1005 	return mask;
1006 }
1007 
1008 static int
1009 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
1010 				struct list_head *vmas)
1011 {
1012 	const unsigned int other_rings = eb_other_engines(req);
1013 	struct i915_vma *vma;
1014 	uint32_t flush_domains = 0;
1015 	bool flush_chipset = false;
1016 	int ret;
1017 
1018 	list_for_each_entry(vma, vmas, exec_list) {
1019 		struct drm_i915_gem_object *obj = vma->obj;
1020 
1021 		if (obj->flags & other_rings) {
1022 			ret = i915_gem_object_sync(obj, req);
1023 			if (ret)
1024 				return ret;
1025 		}
1026 
1027 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
1028 			flush_chipset |= i915_gem_clflush_object(obj, false);
1029 
1030 		flush_domains |= obj->base.write_domain;
1031 	}
1032 
1033 	if (flush_chipset)
1034 		i915_gem_chipset_flush(req->engine->i915);
1035 
1036 	if (flush_domains & I915_GEM_DOMAIN_GTT)
1037 		wmb();
1038 
1039 	/* Unconditionally invalidate GPU caches and TLBs. */
1040 	return req->engine->emit_flush(req, EMIT_INVALIDATE);
1041 }
1042 
1043 static bool
1044 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1045 {
1046 	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1047 		return false;
1048 
1049 	/* Kernel clipping was a DRI1 misfeature */
1050 	if (exec->num_cliprects || exec->cliprects_ptr)
1051 		return false;
1052 
1053 	if (exec->DR4 == 0xffffffff) {
1054 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1055 		exec->DR4 = 0;
1056 	}
1057 	if (exec->DR1 || exec->DR4)
1058 		return false;
1059 
1060 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1061 		return false;
1062 
1063 	return true;
1064 }
1065 
1066 static int
1067 validate_exec_list(struct drm_device *dev,
1068 		   struct drm_i915_gem_exec_object2 *exec,
1069 		   int count)
1070 {
1071 	unsigned relocs_total = 0;
1072 	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1073 	unsigned invalid_flags;
1074 	int i;
1075 
1076 	/* INTERNAL flags must not overlap with external ones */
1077 	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1078 
1079 	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1080 	if (USES_FULL_PPGTT(dev))
1081 		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1082 
1083 	for (i = 0; i < count; i++) {
1084 		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1085 		int length; /* limited by fault_in_pages_readable() */
1086 
1087 		if (exec[i].flags & invalid_flags)
1088 			return -EINVAL;
1089 
1090 		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1091 		 * any non-page-aligned or non-canonical addresses.
1092 		 */
1093 		if (exec[i].flags & EXEC_OBJECT_PINNED) {
1094 			if (exec[i].offset !=
1095 			    gen8_canonical_addr(exec[i].offset & I915_GTT_PAGE_MASK))
1096 				return -EINVAL;
1097 
1098 			/* From drm_mm perspective address space is continuous,
1099 			 * so from this point we're always using non-canonical
1100 			 * form internally.
1101 			 */
1102 			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1103 		}
1104 
1105 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1106 			return -EINVAL;
1107 
1108 		/* pad_to_size was once a reserved field, so sanitize it */
1109 		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1110 			if (offset_in_page(exec[i].pad_to_size))
1111 				return -EINVAL;
1112 		} else {
1113 			exec[i].pad_to_size = 0;
1114 		}
1115 
1116 		/* First check for malicious input causing overflow in
1117 		 * the worst case where we need to allocate the entire
1118 		 * relocation tree as a single array.
1119 		 */
1120 		if (exec[i].relocation_count > relocs_max - relocs_total)
1121 			return -EINVAL;
1122 		relocs_total += exec[i].relocation_count;
1123 
1124 		length = exec[i].relocation_count *
1125 			sizeof(struct drm_i915_gem_relocation_entry);
1126 		/*
1127 		 * We must check that the entire relocation array is safe
1128 		 * to read, but since we may need to update the presumed
1129 		 * offsets during execution, check for full write access.
1130 		 */
1131 #if 0
1132 		if (!access_ok(VERIFY_WRITE, ptr, length))
1133 			return -EFAULT;
1134 #endif
1135 
1136 		if (likely(!i915.prefault_disable)) {
1137 			if (fault_in_multipages_readable(ptr, length))
1138 				return -EFAULT;
1139 		}
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static struct i915_gem_context *
1146 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1147 			  struct intel_engine_cs *engine, const u32 ctx_id)
1148 {
1149 	struct i915_gem_context *ctx = NULL;
1150 	struct i915_ctx_hang_stats *hs;
1151 
1152 	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1153 		return ERR_PTR(-EINVAL);
1154 
1155 	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1156 	if (IS_ERR(ctx))
1157 		return ctx;
1158 
1159 	hs = &ctx->hang_stats;
1160 	if (hs->banned) {
1161 		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1162 		return ERR_PTR(-EIO);
1163 	}
1164 
1165 	return ctx;
1166 }
1167 
1168 void i915_vma_move_to_active(struct i915_vma *vma,
1169 			     struct drm_i915_gem_request *req,
1170 			     unsigned int flags)
1171 {
1172 	struct drm_i915_gem_object *obj = vma->obj;
1173 	const unsigned int idx = req->engine->id;
1174 
1175 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1176 
1177 	obj->dirty = 1; /* be paranoid  */
1178 
1179 	/* Add a reference if we're newly entering the active list.
1180 	 * The order in which we add operations to the retirement queue is
1181 	 * vital here: mark_active adds to the start of the callback list,
1182 	 * such that subsequent callbacks are called first. Therefore we
1183 	 * add the active reference first and queue for it to be dropped
1184 	 * *last*.
1185 	 */
1186 	if (!i915_gem_object_is_active(obj))
1187 		i915_gem_object_get(obj);
1188 	i915_gem_object_set_active(obj, idx);
1189 	i915_gem_active_set(&obj->last_read[idx], req);
1190 
1191 	if (flags & EXEC_OBJECT_WRITE) {
1192 		i915_gem_active_set(&obj->last_write, req);
1193 
1194 		intel_fb_obj_invalidate(obj, ORIGIN_CS);
1195 
1196 		/* update for the implicit flush after a batch */
1197 		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1198 	}
1199 
1200 	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1201 		i915_gem_active_set(&obj->last_fence, req);
1202 		if (flags & __EXEC_OBJECT_HAS_FENCE) {
1203 			struct drm_i915_private *dev_priv = req->i915;
1204 
1205 			list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1206 				       &dev_priv->mm.fence_list);
1207 		}
1208 	}
1209 
1210 	i915_vma_set_active(vma, idx);
1211 	i915_gem_active_set(&vma->last_read[idx], req);
1212 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1213 }
1214 
1215 static void eb_export_fence(struct drm_i915_gem_object *obj,
1216 			    struct drm_i915_gem_request *req,
1217 			    unsigned int flags)
1218 {
1219 	struct reservation_object *resv;
1220 
1221 	resv = i915_gem_object_get_dmabuf_resv(obj);
1222 	if (!resv)
1223 		return;
1224 
1225 	/* Ignore errors from failing to allocate the new fence, we can't
1226 	 * handle an error right now. Worst case should be missed
1227 	 * synchronisation leading to rendering corruption.
1228 	 */
1229 	ww_mutex_lock(&resv->lock, NULL);
1230 	if (flags & EXEC_OBJECT_WRITE)
1231 		reservation_object_add_excl_fence(resv, &req->fence);
1232 	else if (reservation_object_reserve_shared(resv) == 0)
1233 		reservation_object_add_shared_fence(resv, &req->fence);
1234 	ww_mutex_unlock(&resv->lock);
1235 }
1236 
1237 static void
1238 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1239 				   struct drm_i915_gem_request *req)
1240 {
1241 	struct i915_vma *vma;
1242 
1243 	list_for_each_entry(vma, vmas, exec_list) {
1244 		struct drm_i915_gem_object *obj = vma->obj;
1245 		u32 old_read = obj->base.read_domains;
1246 		u32 old_write = obj->base.write_domain;
1247 
1248 		obj->base.write_domain = obj->base.pending_write_domain;
1249 		if (obj->base.write_domain)
1250 			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1251 		else
1252 			obj->base.pending_read_domains |= obj->base.read_domains;
1253 		obj->base.read_domains = obj->base.pending_read_domains;
1254 
1255 		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
1256 		eb_export_fence(obj, req, vma->exec_entry->flags);
1257 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1258 	}
1259 }
1260 
1261 static int
1262 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1263 {
1264 	struct intel_ring *ring = req->ring;
1265 	int ret, i;
1266 
1267 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1268 		DRM_DEBUG("sol reset is gen7/rcs only\n");
1269 		return -EINVAL;
1270 	}
1271 
1272 	ret = intel_ring_begin(req, 4 * 3);
1273 	if (ret)
1274 		return ret;
1275 
1276 	for (i = 0; i < 4; i++) {
1277 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1278 		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1279 		intel_ring_emit(ring, 0);
1280 	}
1281 
1282 	intel_ring_advance(ring);
1283 
1284 	return 0;
1285 }
1286 
1287 static struct i915_vma*
1288 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1289 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1290 			  struct drm_i915_gem_object *batch_obj,
1291 			  struct eb_vmas *eb,
1292 			  u32 batch_start_offset,
1293 			  u32 batch_len,
1294 			  bool is_master)
1295 {
1296 	struct drm_i915_gem_object *shadow_batch_obj;
1297 	struct i915_vma *vma;
1298 	int ret;
1299 
1300 	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1301 						   PAGE_ALIGN(batch_len));
1302 	if (IS_ERR(shadow_batch_obj))
1303 		return ERR_CAST(shadow_batch_obj);
1304 
1305 	ret = intel_engine_cmd_parser(engine,
1306 				      batch_obj,
1307 				      shadow_batch_obj,
1308 				      batch_start_offset,
1309 				      batch_len,
1310 				      is_master);
1311 	if (ret)
1312 		goto err;
1313 
1314 	ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1315 	if (ret)
1316 		goto err;
1317 
1318 	i915_gem_object_unpin_pages(shadow_batch_obj);
1319 
1320 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1321 
1322 	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1323 	vma->exec_entry = shadow_exec_entry;
1324 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1325 	i915_gem_object_get(shadow_batch_obj);
1326 	list_add_tail(&vma->exec_list, &eb->vmas);
1327 
1328 	return vma;
1329 
1330 err:
1331 	i915_gem_object_unpin_pages(shadow_batch_obj);
1332 	if (ret == -EACCES) /* unhandled chained batch */
1333 		return NULL;
1334 	else
1335 		return ERR_PTR(ret);
1336 }
1337 
1338 static int
1339 execbuf_submit(struct i915_execbuffer_params *params,
1340 	       struct drm_i915_gem_execbuffer2 *args,
1341 	       struct list_head *vmas)
1342 {
1343 	struct drm_i915_private *dev_priv = params->request->i915;
1344 	u64 exec_start, exec_len;
1345 	int instp_mode;
1346 	u32 instp_mask;
1347 	int ret;
1348 
1349 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1350 	if (ret)
1351 		return ret;
1352 
1353 	ret = i915_switch_context(params->request);
1354 	if (ret)
1355 		return ret;
1356 
1357 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1358 	instp_mask = I915_EXEC_CONSTANTS_MASK;
1359 	switch (instp_mode) {
1360 	case I915_EXEC_CONSTANTS_REL_GENERAL:
1361 	case I915_EXEC_CONSTANTS_ABSOLUTE:
1362 	case I915_EXEC_CONSTANTS_REL_SURFACE:
1363 		if (instp_mode != 0 && params->engine->id != RCS) {
1364 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1365 			return -EINVAL;
1366 		}
1367 
1368 		if (instp_mode != dev_priv->relative_constants_mode) {
1369 			if (INTEL_INFO(dev_priv)->gen < 4) {
1370 				DRM_DEBUG("no rel constants on pre-gen4\n");
1371 				return -EINVAL;
1372 			}
1373 
1374 			if (INTEL_INFO(dev_priv)->gen > 5 &&
1375 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1376 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1377 				return -EINVAL;
1378 			}
1379 
1380 			/* The HW changed the meaning on this bit on gen6 */
1381 			if (INTEL_INFO(dev_priv)->gen >= 6)
1382 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1383 		}
1384 		break;
1385 	default:
1386 		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1387 		return -EINVAL;
1388 	}
1389 
1390 	if (params->engine->id == RCS &&
1391 	    instp_mode != dev_priv->relative_constants_mode) {
1392 		struct intel_ring *ring = params->request->ring;
1393 
1394 		ret = intel_ring_begin(params->request, 4);
1395 		if (ret)
1396 			return ret;
1397 
1398 		intel_ring_emit(ring, MI_NOOP);
1399 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1400 		intel_ring_emit_reg(ring, INSTPM);
1401 		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1402 		intel_ring_advance(ring);
1403 
1404 		dev_priv->relative_constants_mode = instp_mode;
1405 	}
1406 
1407 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1408 		ret = i915_reset_gen7_sol_offsets(params->request);
1409 		if (ret)
1410 			return ret;
1411 	}
1412 
1413 	exec_len   = args->batch_len;
1414 	exec_start = params->batch->node.start +
1415 		     params->args_batch_start_offset;
1416 
1417 	if (exec_len == 0)
1418 		exec_len = params->batch->size;
1419 
1420 	ret = params->engine->emit_bb_start(params->request,
1421 					    exec_start, exec_len,
1422 					    params->dispatch_flags);
1423 	if (ret)
1424 		return ret;
1425 
1426 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1427 
1428 	i915_gem_execbuffer_move_to_active(vmas, params->request);
1429 
1430 	return 0;
1431 }
1432 
1433 /**
1434  * Find one BSD ring to dispatch the corresponding BSD command.
1435  * The engine index is returned.
1436  */
1437 static unsigned int
1438 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1439 			 struct drm_file *file)
1440 {
1441 	struct drm_i915_file_private *file_priv = file->driver_priv;
1442 
1443 	/* Check whether the file_priv has already selected one ring. */
1444 	if ((int)file_priv->bsd_engine < 0) {
1445 		/* If not, use the ping-pong mechanism to select one. */
1446 		mutex_lock(&dev_priv->drm.struct_mutex);
1447 		file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
1448 		dev_priv->mm.bsd_engine_dispatch_index ^= 1;
1449 		mutex_unlock(&dev_priv->drm.struct_mutex);
1450 	}
1451 
1452 	return file_priv->bsd_engine;
1453 }
1454 
1455 #define I915_USER_RINGS (4)
1456 
1457 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1458 	[I915_EXEC_DEFAULT]	= RCS,
1459 	[I915_EXEC_RENDER]	= RCS,
1460 	[I915_EXEC_BLT]		= BCS,
1461 	[I915_EXEC_BSD]		= VCS,
1462 	[I915_EXEC_VEBOX]	= VECS
1463 };
1464 
1465 static struct intel_engine_cs *
1466 eb_select_engine(struct drm_i915_private *dev_priv,
1467 		 struct drm_file *file,
1468 		 struct drm_i915_gem_execbuffer2 *args)
1469 {
1470 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1471 	struct intel_engine_cs *engine;
1472 
1473 	if (user_ring_id > I915_USER_RINGS) {
1474 		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1475 		return NULL;
1476 	}
1477 
1478 	if ((user_ring_id != I915_EXEC_BSD) &&
1479 	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1480 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
1481 			  "bsd dispatch flags: %d\n", (int)(args->flags));
1482 		return NULL;
1483 	}
1484 
1485 	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1486 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1487 
1488 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1489 			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1490 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1491 			   bsd_idx <= I915_EXEC_BSD_RING2) {
1492 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1493 			bsd_idx--;
1494 		} else {
1495 			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1496 				  bsd_idx);
1497 			return NULL;
1498 		}
1499 
1500 		engine = &dev_priv->engine[_VCS(bsd_idx)];
1501 	} else {
1502 		engine = &dev_priv->engine[user_ring_map[user_ring_id]];
1503 	}
1504 
1505 	if (!intel_engine_initialized(engine)) {
1506 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1507 		return NULL;
1508 	}
1509 
1510 	return engine;
1511 }
1512 
1513 static int
1514 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1515 		       struct drm_file *file,
1516 		       struct drm_i915_gem_execbuffer2 *args,
1517 		       struct drm_i915_gem_exec_object2 *exec)
1518 {
1519 	struct drm_i915_private *dev_priv = to_i915(dev);
1520 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1521 	struct eb_vmas *eb;
1522 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1523 	struct intel_engine_cs *engine;
1524 	struct i915_gem_context *ctx;
1525 	struct i915_address_space *vm;
1526 	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1527 	struct i915_execbuffer_params *params = &params_master;
1528 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1529 	u32 dispatch_flags;
1530 	int ret;
1531 	bool need_relocs;
1532 
1533 	if (!i915_gem_check_execbuffer(args))
1534 		return -EINVAL;
1535 
1536 	ret = validate_exec_list(dev, exec, args->buffer_count);
1537 	if (ret)
1538 		return ret;
1539 
1540 	dispatch_flags = 0;
1541 	if (args->flags & I915_EXEC_SECURE) {
1542 #if 0
1543 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1544 		    return -EPERM;
1545 #endif
1546 
1547 		dispatch_flags |= I915_DISPATCH_SECURE;
1548 	}
1549 	if (args->flags & I915_EXEC_IS_PINNED)
1550 		dispatch_flags |= I915_DISPATCH_PINNED;
1551 
1552 	engine = eb_select_engine(dev_priv, file, args);
1553 	if (!engine)
1554 		return -EINVAL;
1555 
1556 	if (args->buffer_count < 1) {
1557 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1558 		return -EINVAL;
1559 	}
1560 
1561 	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1562 		if (!HAS_RESOURCE_STREAMER(dev)) {
1563 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1564 			return -EINVAL;
1565 		}
1566 		if (engine->id != RCS) {
1567 			DRM_DEBUG("RS is not available on %s\n",
1568 				 engine->name);
1569 			return -EINVAL;
1570 		}
1571 
1572 		dispatch_flags |= I915_DISPATCH_RS;
1573 	}
1574 
1575 	/* Take a local wakeref for preparing to dispatch the execbuf as
1576 	 * we expect to access the hardware fairly frequently in the
1577 	 * process. Upon first dispatch, we acquire another prolonged
1578 	 * wakeref that we hold until the GPU has been idle for at least
1579 	 * 100ms.
1580 	 */
1581 	intel_runtime_pm_get(dev_priv);
1582 
1583 	ret = i915_mutex_lock_interruptible(dev);
1584 	if (ret)
1585 		goto pre_mutex_err;
1586 
1587 	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1588 	if (IS_ERR(ctx)) {
1589 		mutex_unlock(&dev->struct_mutex);
1590 		ret = PTR_ERR(ctx);
1591 		goto pre_mutex_err;
1592 	}
1593 
1594 	i915_gem_context_get(ctx);
1595 
1596 	if (ctx->ppgtt)
1597 		vm = &ctx->ppgtt->base;
1598 	else
1599 		vm = &ggtt->base;
1600 
1601 	memset(&params_master, 0x00, sizeof(params_master));
1602 
1603 	eb = eb_create(args);
1604 	if (eb == NULL) {
1605 		i915_gem_context_put(ctx);
1606 		mutex_unlock(&dev->struct_mutex);
1607 		ret = -ENOMEM;
1608 		goto pre_mutex_err;
1609 	}
1610 
1611 	/* Look up object handles */
1612 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1613 	if (ret)
1614 		goto err;
1615 
1616 	/* take note of the batch buffer before we might reorder the lists */
1617 	params->batch = eb_get_batch(eb);
1618 
1619 	/* Move the objects en-masse into the GTT, evicting if necessary. */
1620 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1621 	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1622 					  &need_relocs);
1623 	if (ret)
1624 		goto err;
1625 
1626 	/* The objects are in their final locations, apply the relocations. */
1627 	if (need_relocs)
1628 		ret = i915_gem_execbuffer_relocate(eb);
1629 	if (ret) {
1630 		if (ret == -EFAULT) {
1631 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1632 								engine,
1633 								eb, exec, ctx);
1634 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1635 		}
1636 		if (ret)
1637 			goto err;
1638 	}
1639 
1640 	/* Set the pending read domains for the batch buffer to COMMAND */
1641 	if (params->batch->obj->base.pending_write_domain) {
1642 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1643 		ret = -EINVAL;
1644 		goto err;
1645 	}
1646 
1647 	params->args_batch_start_offset = args->batch_start_offset;
1648 	if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
1649 		struct i915_vma *vma;
1650 
1651 		vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1652 						params->batch->obj,
1653 						eb,
1654 						args->batch_start_offset,
1655 						args->batch_len,
1656 						drm_is_current_master(file));
1657 		if (IS_ERR(vma)) {
1658 			ret = PTR_ERR(vma);
1659 			goto err;
1660 		}
1661 
1662 		if (vma) {
1663 			/*
1664 			 * Batch parsed and accepted:
1665 			 *
1666 			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1667 			 * bit from MI_BATCH_BUFFER_START commands issued in
1668 			 * the dispatch_execbuffer implementations. We
1669 			 * specifically don't want that set on batches the
1670 			 * command parser has accepted.
1671 			 */
1672 			dispatch_flags |= I915_DISPATCH_SECURE;
1673 			params->args_batch_start_offset = 0;
1674 			params->batch = vma;
1675 		}
1676 	}
1677 
1678 	params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1679 
1680 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1681 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
1682 	 * hsw should have this fixed, but bdw mucks it up again. */
1683 	if (dispatch_flags & I915_DISPATCH_SECURE) {
1684 		struct drm_i915_gem_object *obj = params->batch->obj;
1685 
1686 		/*
1687 		 * So on first glance it looks freaky that we pin the batch here
1688 		 * outside of the reservation loop. But:
1689 		 * - The batch is already pinned into the relevant ppgtt, so we
1690 		 *   already have the backing storage fully allocated.
1691 		 * - No other BO uses the global gtt (well contexts, but meh),
1692 		 *   so we don't really have issues with multiple objects not
1693 		 *   fitting due to fragmentation.
1694 		 * So this is actually safe.
1695 		 */
1696 		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1697 		if (ret)
1698 			goto err;
1699 
1700 		params->batch = i915_gem_obj_to_ggtt(obj);
1701 	}
1702 
1703 	/* Allocate a request for this batch buffer nice and early. */
1704 	params->request = i915_gem_request_alloc(engine, ctx);
1705 	if (IS_ERR(params->request)) {
1706 		ret = PTR_ERR(params->request);
1707 		goto err_batch_unpin;
1708 	}
1709 
1710 	ret = i915_gem_request_add_to_client(params->request, file);
1711 	if (ret)
1712 		goto err_request;
1713 
1714 	/*
1715 	 * Save assorted stuff away to pass through to *_submission().
1716 	 * NB: This data should be 'persistent' and not local as it will
1717 	 * kept around beyond the duration of the IOCTL once the GPU
1718 	 * scheduler arrives.
1719 	 */
1720 	params->dev                     = dev;
1721 	params->file                    = file;
1722 	params->engine                    = engine;
1723 	params->dispatch_flags          = dispatch_flags;
1724 	params->ctx                     = ctx;
1725 
1726 	ret = execbuf_submit(params, args, &eb->vmas);
1727 err_request:
1728 	__i915_add_request(params->request, params->batch->obj, ret == 0);
1729 
1730 err_batch_unpin:
1731 	/*
1732 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1733 	 * batch vma for correctness. For less ugly and less fragility this
1734 	 * needs to be adjusted to also track the ggtt batch vma properly as
1735 	 * active.
1736 	 */
1737 	if (dispatch_flags & I915_DISPATCH_SECURE)
1738 		i915_vma_unpin(params->batch);
1739 err:
1740 	/* the request owns the ref now */
1741 	i915_gem_context_put(ctx);
1742 	eb_destroy(eb);
1743 
1744 	mutex_unlock(&dev->struct_mutex);
1745 
1746 pre_mutex_err:
1747 	/* intel_gpu_busy should also get a ref, so it will free when the device
1748 	 * is really idle. */
1749 	intel_runtime_pm_put(dev_priv);
1750 	return ret;
1751 }
1752 
1753 /*
1754  * Legacy execbuffer just creates an exec2 list from the original exec object
1755  * list array and passes it to the real function.
1756  */
1757 int
1758 i915_gem_execbuffer(struct drm_device *dev, void *data,
1759 		    struct drm_file *file)
1760 {
1761 	struct drm_i915_gem_execbuffer *args = data;
1762 	struct drm_i915_gem_execbuffer2 exec2;
1763 	struct drm_i915_gem_exec_object *exec_list = NULL;
1764 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1765 	int ret, i;
1766 
1767 	if (args->buffer_count < 1) {
1768 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1769 		return -EINVAL;
1770 	}
1771 
1772 	/* Copy in the exec list from userland */
1773 	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1774 	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1775 	if (exec_list == NULL || exec2_list == NULL) {
1776 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1777 			  args->buffer_count);
1778 		drm_free_large(exec_list);
1779 		drm_free_large(exec2_list);
1780 		return -ENOMEM;
1781 	}
1782 	ret = copy_from_user(exec_list,
1783 			     u64_to_user_ptr(args->buffers_ptr),
1784 			     sizeof(*exec_list) * args->buffer_count);
1785 	if (ret != 0) {
1786 		DRM_DEBUG("copy %d exec entries failed %d\n",
1787 			  args->buffer_count, ret);
1788 		drm_free_large(exec_list);
1789 		drm_free_large(exec2_list);
1790 		return -EFAULT;
1791 	}
1792 
1793 	for (i = 0; i < args->buffer_count; i++) {
1794 		exec2_list[i].handle = exec_list[i].handle;
1795 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1796 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1797 		exec2_list[i].alignment = exec_list[i].alignment;
1798 		exec2_list[i].offset = exec_list[i].offset;
1799 		if (INTEL_INFO(dev)->gen < 4)
1800 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1801 		else
1802 			exec2_list[i].flags = 0;
1803 	}
1804 
1805 	exec2.buffers_ptr = args->buffers_ptr;
1806 	exec2.buffer_count = args->buffer_count;
1807 	exec2.batch_start_offset = args->batch_start_offset;
1808 	exec2.batch_len = args->batch_len;
1809 	exec2.DR1 = args->DR1;
1810 	exec2.DR4 = args->DR4;
1811 	exec2.num_cliprects = args->num_cliprects;
1812 	exec2.cliprects_ptr = args->cliprects_ptr;
1813 	exec2.flags = I915_EXEC_RENDER;
1814 	i915_execbuffer2_set_context_id(exec2, 0);
1815 
1816 	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1817 	if (!ret) {
1818 		struct drm_i915_gem_exec_object __user *user_exec_list =
1819 			u64_to_user_ptr(args->buffers_ptr);
1820 
1821 		/* Copy the new buffer offsets back to the user's exec list. */
1822 		for (i = 0; i < args->buffer_count; i++) {
1823 			exec2_list[i].offset =
1824 				gen8_canonical_addr(exec2_list[i].offset);
1825 			ret = __copy_to_user(&user_exec_list[i].offset,
1826 					     &exec2_list[i].offset,
1827 					     sizeof(user_exec_list[i].offset));
1828 			if (ret) {
1829 				ret = -EFAULT;
1830 				DRM_DEBUG("failed to copy %d exec entries "
1831 					  "back to user (%d)\n",
1832 					  args->buffer_count, ret);
1833 				break;
1834 			}
1835 		}
1836 	}
1837 
1838 	drm_free_large(exec_list);
1839 	drm_free_large(exec2_list);
1840 	return ret;
1841 }
1842 
1843 int
1844 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1845 		     struct drm_file *file)
1846 {
1847 	struct drm_i915_gem_execbuffer2 *args = data;
1848 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1849 	int ret;
1850 
1851 	if (args->buffer_count < 1 ||
1852 	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1853 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1854 		return -EINVAL;
1855 	}
1856 
1857 	if (args->rsvd2 != 0) {
1858 		DRM_DEBUG("dirty rvsd2 field\n");
1859 		return -EINVAL;
1860 	}
1861 
1862 	exec2_list = drm_malloc_gfp(args->buffer_count,
1863 				    sizeof(*exec2_list),
1864 				    GFP_TEMPORARY);
1865 	if (exec2_list == NULL) {
1866 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1867 			  args->buffer_count);
1868 		return -ENOMEM;
1869 	}
1870 	ret = copy_from_user(exec2_list,
1871 			     u64_to_user_ptr(args->buffers_ptr),
1872 			     sizeof(*exec2_list) * args->buffer_count);
1873 	if (ret != 0) {
1874 		DRM_DEBUG("copy %d exec entries failed %d\n",
1875 			  args->buffer_count, ret);
1876 		drm_free_large(exec2_list);
1877 		return -EFAULT;
1878 	}
1879 
1880 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1881 	if (!ret) {
1882 		/* Copy the new buffer offsets back to the user's exec list. */
1883 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1884 				   u64_to_user_ptr(args->buffers_ptr);
1885 		int i;
1886 
1887 		for (i = 0; i < args->buffer_count; i++) {
1888 			exec2_list[i].offset =
1889 				gen8_canonical_addr(exec2_list[i].offset);
1890 			ret = __copy_to_user(&user_exec_list[i].offset,
1891 					     &exec2_list[i].offset,
1892 					     sizeof(user_exec_list[i].offset));
1893 			if (ret) {
1894 				ret = -EFAULT;
1895 				DRM_DEBUG("failed to copy %d exec entries "
1896 					  "back to user\n",
1897 					  args->buffer_count);
1898 				break;
1899 			}
1900 		}
1901 	}
1902 
1903 	drm_free_large(exec2_list);
1904 	return ret;
1905 }
1906