xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_execbuf.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /*	$NetBSD: vmwgfx_execbuf.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_execbuf.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
32 
33 #include "vmwgfx_drv.h"
34 #include "vmwgfx_reg.h"
35 #include <drm/ttm/ttm_bo_api.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include "vmwgfx_so.h"
38 #include "vmwgfx_binding.h"
39 
40 #define VMW_RES_HT_ORDER 12
41 
42 /**
43  * struct vmw_resource_relocation - Relocation info for resources
44  *
45  * @head: List head for the software context's relocation list.
46  * @res: Non-ref-counted pointer to the resource.
47  * @offset: Offset of 4 byte entries into the command buffer where the
48  * id that needs fixup is located.
49  */
50 struct vmw_resource_relocation {
51 	struct list_head head;
52 	const struct vmw_resource *res;
53 	unsigned long offset;
54 };
55 
56 /**
57  * struct vmw_resource_val_node - Validation info for resources
58  *
59  * @head: List head for the software context's resource list.
60  * @hash: Hash entry for quick resouce to val_node lookup.
61  * @res: Ref-counted pointer to the resource.
62  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
63  * @new_backup: Refcounted pointer to the new backup buffer.
64  * @staged_bindings: If @res is a context, tracks bindings set up during
65  * the command batch. Otherwise NULL.
66  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
67  * @first_usage: Set to true the first time the resource is referenced in
68  * the command stream.
69  * @switching_backup: The command stream provides a new backup buffer for a
70  * resource.
71  * @no_buffer_needed: This means @switching_backup is true on first buffer
72  * reference. So resource reservation does not need to allocate a backup
73  * buffer for the resource.
74  */
75 struct vmw_resource_val_node {
76 	struct list_head head;
77 	struct drm_hash_item hash;
78 	struct vmw_resource *res;
79 	struct vmw_dma_buffer *new_backup;
80 	struct vmw_ctx_binding_state *staged_bindings;
81 	unsigned long new_backup_offset;
82 	u32 first_usage : 1;
83 	u32 switching_backup : 1;
84 	u32 no_buffer_needed : 1;
85 };
86 
87 /**
88  * struct vmw_cmd_entry - Describe a command for the verifier
89  *
90  * @user_allow: Whether allowed from the execbuf ioctl.
91  * @gb_disable: Whether disabled if guest-backed objects are available.
92  * @gb_enable: Whether enabled iff guest-backed objects are available.
93  */
94 struct vmw_cmd_entry {
95 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
96 		     SVGA3dCmdHeader *);
97 	bool user_allow;
98 	bool gb_disable;
99 	bool gb_enable;
100 };
101 
102 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
103 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
104 				       (_gb_disable), (_gb_enable)}
105 
106 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
107 					struct vmw_sw_context *sw_context,
108 					struct vmw_resource *ctx);
109 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
110 				 struct vmw_sw_context *sw_context,
111 				 SVGAMobId *id,
112 				 struct vmw_dma_buffer **vmw_bo_p);
113 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
114 				   struct vmw_dma_buffer *vbo,
115 				   bool validate_as_mob,
116 				   uint32_t *p_val_node);
117 
118 
119 /**
120  * vmw_resources_unreserve - unreserve resources previously reserved for
121  * command submission.
122  *
123  * @sw_context: pointer to the software context
124  * @backoff: Whether command submission failed.
125  */
126 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
127 				    bool backoff)
128 {
129 	struct vmw_resource_val_node *val;
130 	struct list_head *list = &sw_context->resource_list;
131 
132 	if (sw_context->dx_query_mob && !backoff)
133 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
134 					  sw_context->dx_query_mob);
135 
136 	list_for_each_entry(val, list, head) {
137 		struct vmw_resource *res = val->res;
138 		bool switch_backup =
139 			(backoff) ? false : val->switching_backup;
140 
141 		/*
142 		 * Transfer staged context bindings to the
143 		 * persistent context binding tracker.
144 		 */
145 		if (unlikely(val->staged_bindings)) {
146 			if (!backoff) {
147 				vmw_binding_state_commit
148 					(vmw_context_binding_state(val->res),
149 					 val->staged_bindings);
150 			}
151 
152 			if (val->staged_bindings != sw_context->staged_bindings)
153 				vmw_binding_state_free(val->staged_bindings);
154 			else
155 				sw_context->staged_bindings_inuse = false;
156 			val->staged_bindings = NULL;
157 		}
158 		vmw_resource_unreserve(res, switch_backup, val->new_backup,
159 				       val->new_backup_offset);
160 		vmw_dmabuf_unreference(&val->new_backup);
161 	}
162 }
163 
164 /**
165  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
166  * added to the validate list.
167  *
168  * @dev_priv: Pointer to the device private:
169  * @sw_context: The validation context:
170  * @node: The validation node holding this context.
171  */
172 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
173 				   struct vmw_sw_context *sw_context,
174 				   struct vmw_resource_val_node *node)
175 {
176 	int ret;
177 
178 	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
179 	if (unlikely(ret != 0))
180 		goto out_err;
181 
182 	if (!sw_context->staged_bindings) {
183 		sw_context->staged_bindings =
184 			vmw_binding_state_alloc(dev_priv);
185 		if (IS_ERR(sw_context->staged_bindings)) {
186 			DRM_ERROR("Failed to allocate context binding "
187 				  "information.\n");
188 			ret = PTR_ERR(sw_context->staged_bindings);
189 			sw_context->staged_bindings = NULL;
190 			goto out_err;
191 		}
192 	}
193 
194 	if (sw_context->staged_bindings_inuse) {
195 		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
196 		if (IS_ERR(node->staged_bindings)) {
197 			DRM_ERROR("Failed to allocate context binding "
198 				  "information.\n");
199 			ret = PTR_ERR(node->staged_bindings);
200 			node->staged_bindings = NULL;
201 			goto out_err;
202 		}
203 	} else {
204 		node->staged_bindings = sw_context->staged_bindings;
205 		sw_context->staged_bindings_inuse = true;
206 	}
207 
208 	return 0;
209 out_err:
210 	return ret;
211 }
212 
213 /**
214  * vmw_resource_val_add - Add a resource to the software context's
215  * resource list if it's not already on it.
216  *
217  * @sw_context: Pointer to the software context.
218  * @res: Pointer to the resource.
219  * @p_node On successful return points to a valid pointer to a
220  * struct vmw_resource_val_node, if non-NULL on entry.
221  */
222 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
223 				struct vmw_resource *res,
224 				struct vmw_resource_val_node **p_node)
225 {
226 	struct vmw_private *dev_priv = res->dev_priv;
227 	struct vmw_resource_val_node *node;
228 	struct drm_hash_item *hash;
229 	int ret;
230 
231 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
232 				    &hash) == 0)) {
233 		node = container_of(hash, struct vmw_resource_val_node, hash);
234 		node->first_usage = false;
235 		if (unlikely(p_node != NULL))
236 			*p_node = node;
237 		return 0;
238 	}
239 
240 	node = kzalloc(sizeof(*node), GFP_KERNEL);
241 	if (unlikely(node == NULL)) {
242 		DRM_ERROR("Failed to allocate a resource validation "
243 			  "entry.\n");
244 		return -ENOMEM;
245 	}
246 
247 	node->hash.key = (unsigned long) res;
248 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
249 	if (unlikely(ret != 0)) {
250 		DRM_ERROR("Failed to initialize a resource validation "
251 			  "entry.\n");
252 		kfree(node);
253 		return ret;
254 	}
255 	node->res = vmw_resource_reference(res);
256 	node->first_usage = true;
257 	if (unlikely(p_node != NULL))
258 		*p_node = node;
259 
260 	if (!dev_priv->has_mob) {
261 		list_add_tail(&node->head, &sw_context->resource_list);
262 		return 0;
263 	}
264 
265 	switch (vmw_res_type(res)) {
266 	case vmw_res_context:
267 	case vmw_res_dx_context:
268 		list_add(&node->head, &sw_context->ctx_resource_list);
269 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
270 		break;
271 	case vmw_res_cotable:
272 		list_add_tail(&node->head, &sw_context->ctx_resource_list);
273 		break;
274 	default:
275 		list_add_tail(&node->head, &sw_context->resource_list);
276 		break;
277 	}
278 
279 	return ret;
280 }
281 
282 /**
283  * vmw_view_res_val_add - Add a view and the surface it's pointing to
284  * to the validation list
285  *
286  * @sw_context: The software context holding the validation list.
287  * @view: Pointer to the view resource.
288  *
289  * Returns 0 if success, negative error code otherwise.
290  */
291 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
292 				struct vmw_resource *view)
293 {
294 	int ret;
295 
296 	/*
297 	 * First add the resource the view is pointing to, otherwise
298 	 * it may be swapped out when the view is validated.
299 	 */
300 	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
301 	if (ret)
302 		return ret;
303 
304 	return vmw_resource_val_add(sw_context, view, NULL);
305 }
306 
307 /**
308  * vmw_view_id_val_add - Look up a view and add it and the surface it's
309  * pointing to to the validation list.
310  *
311  * @sw_context: The software context holding the validation list.
312  * @view_type: The view type to look up.
313  * @id: view id of the view.
314  *
315  * The view is represented by a view id and the DX context it's created on,
316  * or scheduled for creation on. If there is no DX context set, the function
317  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
318  */
319 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
320 			       enum vmw_view_type view_type, u32 id)
321 {
322 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
323 	struct vmw_resource *view;
324 	int ret;
325 
326 	if (!ctx_node) {
327 		DRM_ERROR("DX Context not set.\n");
328 		return -EINVAL;
329 	}
330 
331 	view = vmw_view_lookup(sw_context->man, view_type, id);
332 	if (IS_ERR(view))
333 		return PTR_ERR(view);
334 
335 	ret = vmw_view_res_val_add(sw_context, view);
336 	vmw_resource_unreference(&view);
337 
338 	return ret;
339 }
340 
341 /**
342  * vmw_resource_context_res_add - Put resources previously bound to a context on
343  * the validation list
344  *
345  * @dev_priv: Pointer to a device private structure
346  * @sw_context: Pointer to a software context used for this command submission
347  * @ctx: Pointer to the context resource
348  *
349  * This function puts all resources that were previously bound to @ctx on
350  * the resource validation list. This is part of the context state reemission
351  */
352 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
353 					struct vmw_sw_context *sw_context,
354 					struct vmw_resource *ctx)
355 {
356 	struct list_head *binding_list;
357 	struct vmw_ctx_bindinfo *entry;
358 	int ret = 0;
359 	struct vmw_resource *res;
360 	u32 i;
361 
362 	/* Add all cotables to the validation list. */
363 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
364 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
365 			res = vmw_context_cotable(ctx, i);
366 			if (IS_ERR(res))
367 				continue;
368 
369 			ret = vmw_resource_val_add(sw_context, res, NULL);
370 			vmw_resource_unreference(&res);
371 			if (unlikely(ret != 0))
372 				return ret;
373 		}
374 	}
375 
376 
377 	/* Add all resources bound to the context to the validation list */
378 	mutex_lock(&dev_priv->binding_mutex);
379 	binding_list = vmw_context_binding_list(ctx);
380 
381 	list_for_each_entry(entry, binding_list, ctx_list) {
382 		/* entry->res is not refcounted */
383 		res = vmw_resource_reference_unless_doomed(entry->res);
384 		if (unlikely(res == NULL))
385 			continue;
386 
387 		if (vmw_res_type(entry->res) == vmw_res_view)
388 			ret = vmw_view_res_val_add(sw_context, entry->res);
389 		else
390 			ret = vmw_resource_val_add(sw_context, entry->res,
391 						   NULL);
392 		vmw_resource_unreference(&res);
393 		if (unlikely(ret != 0))
394 			break;
395 	}
396 
397 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
398 		struct vmw_dma_buffer *dx_query_mob;
399 
400 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
401 		if (dx_query_mob)
402 			ret = vmw_bo_to_validate_list(sw_context,
403 						      dx_query_mob,
404 						      true, NULL);
405 	}
406 
407 	mutex_unlock(&dev_priv->binding_mutex);
408 	return ret;
409 }
410 
411 /**
412  * vmw_resource_relocation_add - Add a relocation to the relocation list
413  *
414  * @list: Pointer to head of relocation list.
415  * @res: The resource.
416  * @offset: Offset into the command buffer currently being parsed where the
417  * id that needs fixup is located. Granularity is 4 bytes.
418  */
419 static int vmw_resource_relocation_add(struct list_head *list,
420 				       const struct vmw_resource *res,
421 				       unsigned long offset)
422 {
423 	struct vmw_resource_relocation *rel;
424 
425 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
426 	if (unlikely(rel == NULL)) {
427 		DRM_ERROR("Failed to allocate a resource relocation.\n");
428 		return -ENOMEM;
429 	}
430 
431 	rel->res = res;
432 	rel->offset = offset;
433 	list_add_tail(&rel->head, list);
434 
435 	return 0;
436 }
437 
438 /**
439  * vmw_resource_relocations_free - Free all relocations on a list
440  *
441  * @list: Pointer to the head of the relocation list.
442  */
443 static void vmw_resource_relocations_free(struct list_head *list)
444 {
445 	struct vmw_resource_relocation *rel, *n;
446 
447 	list_for_each_entry_safe(rel, n, list, head) {
448 		list_del(&rel->head);
449 		kfree(rel);
450 	}
451 }
452 
453 /**
454  * vmw_resource_relocations_apply - Apply all relocations on a list
455  *
456  * @cb: Pointer to the start of the command buffer bein patch. This need
457  * not be the same buffer as the one being parsed when the relocation
458  * list was built, but the contents must be the same modulo the
459  * resource ids.
460  * @list: Pointer to the head of the relocation list.
461  */
462 static void vmw_resource_relocations_apply(uint32_t *cb,
463 					   struct list_head *list)
464 {
465 	struct vmw_resource_relocation *rel;
466 
467 	list_for_each_entry(rel, list, head) {
468 		if (likely(rel->res != NULL))
469 			cb[rel->offset] = rel->res->id;
470 		else
471 			cb[rel->offset] = SVGA_3D_CMD_NOP;
472 	}
473 }
474 
475 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
476 			   struct vmw_sw_context *sw_context,
477 			   SVGA3dCmdHeader *header)
478 {
479 	return -EINVAL;
480 }
481 
482 static int vmw_cmd_ok(struct vmw_private *dev_priv,
483 		      struct vmw_sw_context *sw_context,
484 		      SVGA3dCmdHeader *header)
485 {
486 	return 0;
487 }
488 
489 /**
490  * vmw_bo_to_validate_list - add a bo to a validate list
491  *
492  * @sw_context: The software context used for this command submission batch.
493  * @bo: The buffer object to add.
494  * @validate_as_mob: Validate this buffer as a MOB.
495  * @p_val_node: If non-NULL Will be updated with the validate node number
496  * on return.
497  *
498  * Returns -EINVAL if the limit of number of buffer objects per command
499  * submission is reached.
500  */
501 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
502 				   struct vmw_dma_buffer *vbo,
503 				   bool validate_as_mob,
504 				   uint32_t *p_val_node)
505 {
506 	uint32_t val_node;
507 	struct vmw_validate_buffer *vval_buf;
508 	struct ttm_validate_buffer *val_buf;
509 	struct drm_hash_item *hash;
510 	int ret;
511 
512 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
513 				    &hash) == 0)) {
514 		vval_buf = container_of(hash, struct vmw_validate_buffer,
515 					hash);
516 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
517 			DRM_ERROR("Inconsistent buffer usage.\n");
518 			return -EINVAL;
519 		}
520 		val_buf = &vval_buf->base;
521 		val_node = vval_buf - sw_context->val_bufs;
522 	} else {
523 		val_node = sw_context->cur_val_buf;
524 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
525 			DRM_ERROR("Max number of DMA buffers per submission "
526 				  "exceeded.\n");
527 			return -EINVAL;
528 		}
529 		vval_buf = &sw_context->val_bufs[val_node];
530 		vval_buf->hash.key = (unsigned long) vbo;
531 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
532 		if (unlikely(ret != 0)) {
533 			DRM_ERROR("Failed to initialize a buffer validation "
534 				  "entry.\n");
535 			return ret;
536 		}
537 		++sw_context->cur_val_buf;
538 		val_buf = &vval_buf->base;
539 		val_buf->bo = ttm_bo_reference(&vbo->base);
540 		val_buf->shared = false;
541 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
542 		vval_buf->validate_as_mob = validate_as_mob;
543 	}
544 
545 	if (p_val_node)
546 		*p_val_node = val_node;
547 
548 	return 0;
549 }
550 
551 /**
552  * vmw_resources_reserve - Reserve all resources on the sw_context's
553  * resource list.
554  *
555  * @sw_context: Pointer to the software context.
556  *
557  * Note that since vmware's command submission currently is protected by
558  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
559  * since only a single thread at once will attempt this.
560  */
561 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
562 {
563 	struct vmw_resource_val_node *val;
564 	int ret = 0;
565 
566 	list_for_each_entry(val, &sw_context->resource_list, head) {
567 		struct vmw_resource *res = val->res;
568 
569 		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
570 		if (unlikely(ret != 0))
571 			return ret;
572 
573 		if (res->backup) {
574 			struct vmw_dma_buffer *vbo = res->backup;
575 
576 			ret = vmw_bo_to_validate_list
577 				(sw_context, vbo,
578 				 vmw_resource_needs_backup(res), NULL);
579 
580 			if (unlikely(ret != 0))
581 				return ret;
582 		}
583 	}
584 
585 	if (sw_context->dx_query_mob) {
586 		struct vmw_dma_buffer *expected_dx_query_mob;
587 
588 		expected_dx_query_mob =
589 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
590 		if (expected_dx_query_mob &&
591 		    expected_dx_query_mob != sw_context->dx_query_mob) {
592 			ret = -EINVAL;
593 		}
594 	}
595 
596 	return ret;
597 }
598 
599 /**
600  * vmw_resources_validate - Validate all resources on the sw_context's
601  * resource list.
602  *
603  * @sw_context: Pointer to the software context.
604  *
605  * Before this function is called, all resource backup buffers must have
606  * been validated.
607  */
608 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
609 {
610 	struct vmw_resource_val_node *val;
611 	int ret;
612 
613 	list_for_each_entry(val, &sw_context->resource_list, head) {
614 		struct vmw_resource *res = val->res;
615 		struct vmw_dma_buffer *backup = res->backup;
616 
617 		ret = vmw_resource_validate(res);
618 		if (unlikely(ret != 0)) {
619 			if (ret != -ERESTARTSYS)
620 				DRM_ERROR("Failed to validate resource.\n");
621 			return ret;
622 		}
623 
624 		/* Check if the resource switched backup buffer */
625 		if (backup && res->backup && (backup != res->backup)) {
626 			struct vmw_dma_buffer *vbo = res->backup;
627 
628 			ret = vmw_bo_to_validate_list
629 				(sw_context, vbo,
630 				 vmw_resource_needs_backup(res), NULL);
631 			if (ret) {
632 				ttm_bo_unreserve(&vbo->base);
633 				return ret;
634 			}
635 		}
636 	}
637 	return 0;
638 }
639 
640 /**
641  * vmw_cmd_res_reloc_add - Add a resource to a software context's
642  * relocation- and validation lists.
643  *
644  * @dev_priv: Pointer to a struct vmw_private identifying the device.
645  * @sw_context: Pointer to the software context.
646  * @id_loc: Pointer to where the id that needs translation is located.
647  * @res: Valid pointer to a struct vmw_resource.
648  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
649  * used for this resource is returned here.
650  */
651 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
652 				 struct vmw_sw_context *sw_context,
653 				 uint32_t *id_loc,
654 				 struct vmw_resource *res,
655 				 struct vmw_resource_val_node **p_val)
656 {
657 	int ret;
658 	struct vmw_resource_val_node *node;
659 
660 	*p_val = NULL;
661 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
662 					  res,
663 					  id_loc - sw_context->buf_start);
664 	if (unlikely(ret != 0))
665 		return ret;
666 
667 	ret = vmw_resource_val_add(sw_context, res, &node);
668 	if (unlikely(ret != 0))
669 		return ret;
670 
671 	if (p_val)
672 		*p_val = node;
673 
674 	return 0;
675 }
676 
677 
678 /**
679  * vmw_cmd_res_check - Check that a resource is present and if so, put it
680  * on the resource validate list unless it's already there.
681  *
682  * @dev_priv: Pointer to a device private structure.
683  * @sw_context: Pointer to the software context.
684  * @res_type: Resource type.
685  * @converter: User-space visisble type specific information.
686  * @id_loc: Pointer to the location in the command buffer currently being
687  * parsed from where the user-space resource id handle is located.
688  * @p_val: Pointer to pointer to resource validalidation node. Populated
689  * on exit.
690  */
691 static int
692 vmw_cmd_res_check(struct vmw_private *dev_priv,
693 		  struct vmw_sw_context *sw_context,
694 		  enum vmw_res_type res_type,
695 		  const struct vmw_user_resource_conv *converter,
696 		  uint32_t *id_loc,
697 		  struct vmw_resource_val_node **p_val)
698 {
699 	struct vmw_res_cache_entry *rcache =
700 		&sw_context->res_cache[res_type];
701 	struct vmw_resource *res;
702 	struct vmw_resource_val_node *node;
703 	int ret;
704 
705 	if (*id_loc == SVGA3D_INVALID_ID) {
706 		if (p_val)
707 			*p_val = NULL;
708 		if (res_type == vmw_res_context) {
709 			DRM_ERROR("Illegal context invalid id.\n");
710 			return -EINVAL;
711 		}
712 		return 0;
713 	}
714 
715 	/*
716 	 * Fastpath in case of repeated commands referencing the same
717 	 * resource
718 	 */
719 
720 	if (likely(rcache->valid && *id_loc == rcache->handle)) {
721 		const struct vmw_resource *res = rcache->res;
722 
723 		rcache->node->first_usage = false;
724 		if (p_val)
725 			*p_val = rcache->node;
726 
727 		return vmw_resource_relocation_add
728 			(&sw_context->res_relocations, res,
729 			 id_loc - sw_context->buf_start);
730 	}
731 
732 	ret = vmw_user_resource_lookup_handle(dev_priv,
733 					      sw_context->fp->tfile,
734 					      *id_loc,
735 					      converter,
736 					      &res);
737 	if (unlikely(ret != 0)) {
738 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
739 			  (unsigned) *id_loc);
740 		dump_stack();
741 		return ret;
742 	}
743 
744 	rcache->valid = true;
745 	rcache->res = res;
746 	rcache->handle = *id_loc;
747 
748 	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
749 				    res, &node);
750 	if (unlikely(ret != 0))
751 		goto out_no_reloc;
752 
753 	rcache->node = node;
754 	if (p_val)
755 		*p_val = node;
756 	vmw_resource_unreference(&res);
757 	return 0;
758 
759 out_no_reloc:
760 	BUG_ON(sw_context->error_resource != NULL);
761 	sw_context->error_resource = res;
762 
763 	return ret;
764 }
765 
766 /**
767  * vmw_rebind_dx_query - Rebind DX query associated with the context
768  *
769  * @ctx_res: context the query belongs to
770  *
771  * This function assumes binding_mutex is held.
772  */
773 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
774 {
775 	struct vmw_private *dev_priv = ctx_res->dev_priv;
776 	struct vmw_dma_buffer *dx_query_mob;
777 	struct {
778 		SVGA3dCmdHeader header;
779 		SVGA3dCmdDXBindAllQuery body;
780 	} *cmd;
781 
782 
783 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
784 
785 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
786 		return 0;
787 
788 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
789 
790 	if (cmd == NULL) {
791 		DRM_ERROR("Failed to rebind queries.\n");
792 		return -ENOMEM;
793 	}
794 
795 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
796 	cmd->header.size = sizeof(cmd->body);
797 	cmd->body.cid = ctx_res->id;
798 	cmd->body.mobid = dx_query_mob->base.mem.start;
799 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
800 
801 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
802 
803 	return 0;
804 }
805 
806 /**
807  * vmw_rebind_contexts - Rebind all resources previously bound to
808  * referenced contexts.
809  *
810  * @sw_context: Pointer to the software context.
811  *
812  * Rebind context binding points that have been scrubbed because of eviction.
813  */
814 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
815 {
816 	struct vmw_resource_val_node *val;
817 	int ret;
818 
819 	list_for_each_entry(val, &sw_context->resource_list, head) {
820 		if (unlikely(!val->staged_bindings))
821 			break;
822 
823 		ret = vmw_binding_rebind_all
824 			(vmw_context_binding_state(val->res));
825 		if (unlikely(ret != 0)) {
826 			if (ret != -ERESTARTSYS)
827 				DRM_ERROR("Failed to rebind context.\n");
828 			return ret;
829 		}
830 
831 		ret = vmw_rebind_all_dx_query(val->res);
832 		if (ret != 0)
833 			return ret;
834 	}
835 
836 	return 0;
837 }
838 
839 /**
840  * vmw_view_bindings_add - Add an array of view bindings to a context
841  * binding state tracker.
842  *
843  * @sw_context: The execbuf state used for this command.
844  * @view_type: View type for the bindings.
845  * @binding_type: Binding type for the bindings.
846  * @shader_slot: The shader slot to user for the bindings.
847  * @view_ids: Array of view ids to be bound.
848  * @num_views: Number of view ids in @view_ids.
849  * @first_slot: The binding slot to be used for the first view id in @view_ids.
850  */
851 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
852 				 enum vmw_view_type view_type,
853 				 enum vmw_ctx_binding_type binding_type,
854 				 uint32 shader_slot,
855 				 uint32 view_ids[], u32 num_views,
856 				 u32 first_slot)
857 {
858 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
859 	struct vmw_cmdbuf_res_manager *man;
860 	u32 i;
861 	int ret;
862 
863 	if (!ctx_node) {
864 		DRM_ERROR("DX Context not set.\n");
865 		return -EINVAL;
866 	}
867 
868 	man = sw_context->man;
869 	for (i = 0; i < num_views; ++i) {
870 		struct vmw_ctx_bindinfo_view binding;
871 		struct vmw_resource *view = NULL;
872 
873 		if (view_ids[i] != SVGA3D_INVALID_ID) {
874 			view = vmw_view_lookup(man, view_type, view_ids[i]);
875 			if (IS_ERR(view)) {
876 				DRM_ERROR("View not found.\n");
877 				return PTR_ERR(view);
878 			}
879 
880 			ret = vmw_view_res_val_add(sw_context, view);
881 			if (ret) {
882 				DRM_ERROR("Could not add view to "
883 					  "validation list.\n");
884 				vmw_resource_unreference(&view);
885 				return ret;
886 			}
887 		}
888 		binding.bi.ctx = ctx_node->res;
889 		binding.bi.res = view;
890 		binding.bi.bt = binding_type;
891 		binding.shader_slot = shader_slot;
892 		binding.slot = first_slot + i;
893 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
894 				shader_slot, binding.slot);
895 		if (view)
896 			vmw_resource_unreference(&view);
897 	}
898 
899 	return 0;
900 }
901 
902 /**
903  * vmw_cmd_cid_check - Check a command header for valid context information.
904  *
905  * @dev_priv: Pointer to a device private structure.
906  * @sw_context: Pointer to the software context.
907  * @header: A command header with an embedded user-space context handle.
908  *
909  * Convenience function: Call vmw_cmd_res_check with the user-space context
910  * handle embedded in @header.
911  */
912 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
913 			     struct vmw_sw_context *sw_context,
914 			     SVGA3dCmdHeader *header)
915 {
916 	struct vmw_cid_cmd {
917 		SVGA3dCmdHeader header;
918 		uint32_t cid;
919 	} *cmd;
920 
921 	cmd = container_of(header, struct vmw_cid_cmd, header);
922 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
923 				 user_context_converter, &cmd->cid, NULL);
924 }
925 
926 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
927 					   struct vmw_sw_context *sw_context,
928 					   SVGA3dCmdHeader *header)
929 {
930 	struct vmw_sid_cmd {
931 		SVGA3dCmdHeader header;
932 		SVGA3dCmdSetRenderTarget body;
933 	} *cmd;
934 	struct vmw_resource_val_node *ctx_node;
935 	struct vmw_resource_val_node *res_node;
936 	int ret;
937 
938 	cmd = container_of(header, struct vmw_sid_cmd, header);
939 
940 	if (cmd->body.type >= SVGA3D_RT_MAX) {
941 		DRM_ERROR("Illegal render target type %u.\n",
942 			  (unsigned) cmd->body.type);
943 		return -EINVAL;
944 	}
945 
946 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
947 				user_context_converter, &cmd->body.cid,
948 				&ctx_node);
949 	if (unlikely(ret != 0))
950 		return ret;
951 
952 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
953 				user_surface_converter,
954 				&cmd->body.target.sid, &res_node);
955 	if (unlikely(ret != 0))
956 		return ret;
957 
958 	if (dev_priv->has_mob) {
959 		struct vmw_ctx_bindinfo_view binding;
960 
961 		binding.bi.ctx = ctx_node->res;
962 		binding.bi.res = res_node ? res_node->res : NULL;
963 		binding.bi.bt = vmw_ctx_binding_rt;
964 		binding.slot = cmd->body.type;
965 		vmw_binding_add(ctx_node->staged_bindings,
966 				&binding.bi, 0, binding.slot);
967 	}
968 
969 	return 0;
970 }
971 
972 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
973 				      struct vmw_sw_context *sw_context,
974 				      SVGA3dCmdHeader *header)
975 {
976 	struct vmw_sid_cmd {
977 		SVGA3dCmdHeader header;
978 		SVGA3dCmdSurfaceCopy body;
979 	} *cmd;
980 	int ret;
981 
982 	cmd = container_of(header, struct vmw_sid_cmd, header);
983 
984 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
985 				user_surface_converter,
986 				&cmd->body.src.sid, NULL);
987 	if (ret)
988 		return ret;
989 
990 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
991 				 user_surface_converter,
992 				 &cmd->body.dest.sid, NULL);
993 }
994 
995 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
996 				      struct vmw_sw_context *sw_context,
997 				      SVGA3dCmdHeader *header)
998 {
999 	struct {
1000 		SVGA3dCmdHeader header;
1001 		SVGA3dCmdDXBufferCopy body;
1002 	} *cmd;
1003 	int ret;
1004 
1005 	cmd = container_of(header, typeof(*cmd), header);
1006 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007 				user_surface_converter,
1008 				&cmd->body.src, NULL);
1009 	if (ret != 0)
1010 		return ret;
1011 
1012 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1013 				 user_surface_converter,
1014 				 &cmd->body.dest, NULL);
1015 }
1016 
1017 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1018 				   struct vmw_sw_context *sw_context,
1019 				   SVGA3dCmdHeader *header)
1020 {
1021 	struct {
1022 		SVGA3dCmdHeader header;
1023 		SVGA3dCmdDXPredCopyRegion body;
1024 	} *cmd;
1025 	int ret;
1026 
1027 	cmd = container_of(header, typeof(*cmd), header);
1028 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1029 				user_surface_converter,
1030 				&cmd->body.srcSid, NULL);
1031 	if (ret != 0)
1032 		return ret;
1033 
1034 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1035 				 user_surface_converter,
1036 				 &cmd->body.dstSid, NULL);
1037 }
1038 
1039 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1040 				     struct vmw_sw_context *sw_context,
1041 				     SVGA3dCmdHeader *header)
1042 {
1043 	struct vmw_sid_cmd {
1044 		SVGA3dCmdHeader header;
1045 		SVGA3dCmdSurfaceStretchBlt body;
1046 	} *cmd;
1047 	int ret;
1048 
1049 	cmd = container_of(header, struct vmw_sid_cmd, header);
1050 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051 				user_surface_converter,
1052 				&cmd->body.src.sid, NULL);
1053 	if (unlikely(ret != 0))
1054 		return ret;
1055 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1056 				 user_surface_converter,
1057 				 &cmd->body.dest.sid, NULL);
1058 }
1059 
1060 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1061 					 struct vmw_sw_context *sw_context,
1062 					 SVGA3dCmdHeader *header)
1063 {
1064 	struct vmw_sid_cmd {
1065 		SVGA3dCmdHeader header;
1066 		SVGA3dCmdBlitSurfaceToScreen body;
1067 	} *cmd;
1068 
1069 	cmd = container_of(header, struct vmw_sid_cmd, header);
1070 
1071 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1072 				 user_surface_converter,
1073 				 &cmd->body.srcImage.sid, NULL);
1074 }
1075 
1076 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1077 				 struct vmw_sw_context *sw_context,
1078 				 SVGA3dCmdHeader *header)
1079 {
1080 	struct vmw_sid_cmd {
1081 		SVGA3dCmdHeader header;
1082 		SVGA3dCmdPresent body;
1083 	} *cmd;
1084 
1085 
1086 	cmd = container_of(header, struct vmw_sid_cmd, header);
1087 
1088 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1089 				 user_surface_converter, &cmd->body.sid,
1090 				 NULL);
1091 }
1092 
1093 /**
1094  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1095  *
1096  * @dev_priv: The device private structure.
1097  * @new_query_bo: The new buffer holding query results.
1098  * @sw_context: The software context used for this command submission.
1099  *
1100  * This function checks whether @new_query_bo is suitable for holding
1101  * query results, and if another buffer currently is pinned for query
1102  * results. If so, the function prepares the state of @sw_context for
1103  * switching pinned buffers after successful submission of the current
1104  * command batch.
1105  */
1106 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1107 				       struct vmw_dma_buffer *new_query_bo,
1108 				       struct vmw_sw_context *sw_context)
1109 {
1110 	struct vmw_res_cache_entry *ctx_entry =
1111 		&sw_context->res_cache[vmw_res_context];
1112 	int ret;
1113 
1114 	BUG_ON(!ctx_entry->valid);
1115 	sw_context->last_query_ctx = ctx_entry->res;
1116 
1117 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1118 
1119 		if (unlikely(new_query_bo->base.num_pages > 4)) {
1120 			DRM_ERROR("Query buffer too large.\n");
1121 			return -EINVAL;
1122 		}
1123 
1124 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1125 			sw_context->needs_post_query_barrier = true;
1126 			ret = vmw_bo_to_validate_list(sw_context,
1127 						      sw_context->cur_query_bo,
1128 						      dev_priv->has_mob, NULL);
1129 			if (unlikely(ret != 0))
1130 				return ret;
1131 		}
1132 		sw_context->cur_query_bo = new_query_bo;
1133 
1134 		ret = vmw_bo_to_validate_list(sw_context,
1135 					      dev_priv->dummy_query_bo,
1136 					      dev_priv->has_mob, NULL);
1137 		if (unlikely(ret != 0))
1138 			return ret;
1139 
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 
1146 /**
1147  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1148  *
1149  * @dev_priv: The device private structure.
1150  * @sw_context: The software context used for this command submission batch.
1151  *
1152  * This function will check if we're switching query buffers, and will then,
1153  * issue a dummy occlusion query wait used as a query barrier. When the fence
1154  * object following that query wait has signaled, we are sure that all
1155  * preceding queries have finished, and the old query buffer can be unpinned.
1156  * However, since both the new query buffer and the old one are fenced with
1157  * that fence, we can do an asynchronus unpin now, and be sure that the
1158  * old query buffer won't be moved until the fence has signaled.
1159  *
1160  * As mentioned above, both the new - and old query buffers need to be fenced
1161  * using a sequence emitted *after* calling this function.
1162  */
1163 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1164 				     struct vmw_sw_context *sw_context)
1165 {
1166 	/*
1167 	 * The validate list should still hold references to all
1168 	 * contexts here.
1169 	 */
1170 
1171 	if (sw_context->needs_post_query_barrier) {
1172 		struct vmw_res_cache_entry *ctx_entry =
1173 			&sw_context->res_cache[vmw_res_context];
1174 		struct vmw_resource *ctx;
1175 		int ret;
1176 
1177 		BUG_ON(!ctx_entry->valid);
1178 		ctx = ctx_entry->res;
1179 
1180 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1181 
1182 		if (unlikely(ret != 0))
1183 			DRM_ERROR("Out of fifo space for dummy query.\n");
1184 	}
1185 
1186 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1187 		if (dev_priv->pinned_bo) {
1188 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1189 			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1190 		}
1191 
1192 		if (!sw_context->needs_post_query_barrier) {
1193 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1194 
1195 			/*
1196 			 * We pin also the dummy_query_bo buffer so that we
1197 			 * don't need to validate it when emitting
1198 			 * dummy queries in context destroy paths.
1199 			 */
1200 
1201 			if (!dev_priv->dummy_query_bo_pinned) {
1202 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1203 						    true);
1204 				dev_priv->dummy_query_bo_pinned = true;
1205 			}
1206 
1207 			BUG_ON(sw_context->last_query_ctx == NULL);
1208 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1209 			dev_priv->query_cid_valid = true;
1210 			dev_priv->pinned_bo =
1211 				vmw_dmabuf_reference(sw_context->cur_query_bo);
1212 		}
1213 	}
1214 }
1215 
1216 /**
1217  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1218  * handle to a MOB id.
1219  *
1220  * @dev_priv: Pointer to a device private structure.
1221  * @sw_context: The software context used for this command batch validation.
1222  * @id: Pointer to the user-space handle to be translated.
1223  * @vmw_bo_p: Points to a location that, on successful return will carry
1224  * a reference-counted pointer to the DMA buffer identified by the
1225  * user-space handle in @id.
1226  *
1227  * This function saves information needed to translate a user-space buffer
1228  * handle to a MOB id. The translation does not take place immediately, but
1229  * during a call to vmw_apply_relocations(). This function builds a relocation
1230  * list and a list of buffers to validate. The former needs to be freed using
1231  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1232  * needs to be freed using vmw_clear_validations.
1233  */
1234 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1235 				 struct vmw_sw_context *sw_context,
1236 				 SVGAMobId *id,
1237 				 struct vmw_dma_buffer **vmw_bo_p)
1238 {
1239 	struct vmw_dma_buffer *vmw_bo = NULL;
1240 	uint32_t handle = *id;
1241 	struct vmw_relocation *reloc;
1242 	int ret;
1243 
1244 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1245 				     NULL);
1246 	if (unlikely(ret != 0)) {
1247 		DRM_ERROR("Could not find or use MOB buffer.\n");
1248 		ret = -EINVAL;
1249 		goto out_no_reloc;
1250 	}
1251 
1252 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1253 		DRM_ERROR("Max number relocations per submission"
1254 			  " exceeded\n");
1255 		ret = -EINVAL;
1256 		goto out_no_reloc;
1257 	}
1258 
1259 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1260 	reloc->mob_loc = id;
1261 	reloc->location = NULL;
1262 
1263 	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1264 	if (unlikely(ret != 0))
1265 		goto out_no_reloc;
1266 
1267 	*vmw_bo_p = vmw_bo;
1268 	return 0;
1269 
1270 out_no_reloc:
1271 	vmw_dmabuf_unreference(&vmw_bo);
1272 	*vmw_bo_p = NULL;
1273 	return ret;
1274 }
1275 
1276 /**
1277  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1278  * handle to a valid SVGAGuestPtr
1279  *
1280  * @dev_priv: Pointer to a device private structure.
1281  * @sw_context: The software context used for this command batch validation.
1282  * @ptr: Pointer to the user-space handle to be translated.
1283  * @vmw_bo_p: Points to a location that, on successful return will carry
1284  * a reference-counted pointer to the DMA buffer identified by the
1285  * user-space handle in @id.
1286  *
1287  * This function saves information needed to translate a user-space buffer
1288  * handle to a valid SVGAGuestPtr. The translation does not take place
1289  * immediately, but during a call to vmw_apply_relocations().
1290  * This function builds a relocation list and a list of buffers to validate.
1291  * The former needs to be freed using either vmw_apply_relocations() or
1292  * vmw_free_relocations(). The latter needs to be freed using
1293  * vmw_clear_validations.
1294  */
1295 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1296 				   struct vmw_sw_context *sw_context,
1297 				   SVGAGuestPtr *ptr,
1298 				   struct vmw_dma_buffer **vmw_bo_p)
1299 {
1300 	struct vmw_dma_buffer *vmw_bo = NULL;
1301 	uint32_t handle = ptr->gmrId;
1302 	struct vmw_relocation *reloc;
1303 	int ret;
1304 
1305 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1306 				     NULL);
1307 	if (unlikely(ret != 0)) {
1308 		DRM_ERROR("Could not find or use GMR region.\n");
1309 		ret = -EINVAL;
1310 		goto out_no_reloc;
1311 	}
1312 
1313 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1314 		DRM_ERROR("Max number relocations per submission"
1315 			  " exceeded\n");
1316 		ret = -EINVAL;
1317 		goto out_no_reloc;
1318 	}
1319 
1320 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1321 	reloc->location = ptr;
1322 
1323 	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1324 	if (unlikely(ret != 0))
1325 		goto out_no_reloc;
1326 
1327 	*vmw_bo_p = vmw_bo;
1328 	return 0;
1329 
1330 out_no_reloc:
1331 	vmw_dmabuf_unreference(&vmw_bo);
1332 	*vmw_bo_p = NULL;
1333 	return ret;
1334 }
1335 
1336 
1337 
1338 /**
1339  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1340  *
1341  * @dev_priv: Pointer to a device private struct.
1342  * @sw_context: The software context used for this command submission.
1343  * @header: Pointer to the command header in the command stream.
1344  *
1345  * This function adds the new query into the query COTABLE
1346  */
1347 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1348 				   struct vmw_sw_context *sw_context,
1349 				   SVGA3dCmdHeader *header)
1350 {
1351 	struct vmw_dx_define_query_cmd {
1352 		SVGA3dCmdHeader header;
1353 		SVGA3dCmdDXDefineQuery q;
1354 	} *cmd;
1355 
1356 	int    ret;
1357 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1358 	struct vmw_resource *cotable_res;
1359 
1360 
1361 	if (ctx_node == NULL) {
1362 		DRM_ERROR("DX Context not set for query.\n");
1363 		return -EINVAL;
1364 	}
1365 
1366 	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1367 
1368 	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1369 	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1370 		return -EINVAL;
1371 
1372 	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1373 	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1374 	vmw_resource_unreference(&cotable_res);
1375 
1376 	return ret;
1377 }
1378 
1379 
1380 
1381 /**
1382  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1383  *
1384  * @dev_priv: Pointer to a device private struct.
1385  * @sw_context: The software context used for this command submission.
1386  * @header: Pointer to the command header in the command stream.
1387  *
1388  * The query bind operation will eventually associate the query ID
1389  * with its backing MOB.  In this function, we take the user mode
1390  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1391  * kernel mode equivalent.
1392  */
1393 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1394 				 struct vmw_sw_context *sw_context,
1395 				 SVGA3dCmdHeader *header)
1396 {
1397 	struct vmw_dx_bind_query_cmd {
1398 		SVGA3dCmdHeader header;
1399 		SVGA3dCmdDXBindQuery q;
1400 	} *cmd;
1401 
1402 	struct vmw_dma_buffer *vmw_bo;
1403 	int    ret;
1404 
1405 
1406 	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1407 
1408 	/*
1409 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1410 	 * list so its kernel mode MOB ID can be filled in later
1411 	 */
1412 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1413 				    &vmw_bo);
1414 
1415 	if (ret != 0)
1416 		return ret;
1417 
1418 	sw_context->dx_query_mob = vmw_bo;
1419 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1420 
1421 	vmw_dmabuf_unreference(&vmw_bo);
1422 
1423 	return ret;
1424 }
1425 
1426 
1427 
1428 /**
1429  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1430  *
1431  * @dev_priv: Pointer to a device private struct.
1432  * @sw_context: The software context used for this command submission.
1433  * @header: Pointer to the command header in the command stream.
1434  */
1435 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1436 				  struct vmw_sw_context *sw_context,
1437 				  SVGA3dCmdHeader *header)
1438 {
1439 	struct vmw_begin_gb_query_cmd {
1440 		SVGA3dCmdHeader header;
1441 		SVGA3dCmdBeginGBQuery q;
1442 	} *cmd;
1443 
1444 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1445 			   header);
1446 
1447 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1448 				 user_context_converter, &cmd->q.cid,
1449 				 NULL);
1450 }
1451 
1452 /**
1453  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1454  *
1455  * @dev_priv: Pointer to a device private struct.
1456  * @sw_context: The software context used for this command submission.
1457  * @header: Pointer to the command header in the command stream.
1458  */
1459 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1460 			       struct vmw_sw_context *sw_context,
1461 			       SVGA3dCmdHeader *header)
1462 {
1463 	struct vmw_begin_query_cmd {
1464 		SVGA3dCmdHeader header;
1465 		SVGA3dCmdBeginQuery q;
1466 	} *cmd;
1467 
1468 	cmd = container_of(header, struct vmw_begin_query_cmd,
1469 			   header);
1470 
1471 	if (unlikely(dev_priv->has_mob)) {
1472 		struct {
1473 			SVGA3dCmdHeader header;
1474 			SVGA3dCmdBeginGBQuery q;
1475 		} gb_cmd;
1476 
1477 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1478 
1479 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1480 		gb_cmd.header.size = cmd->header.size;
1481 		gb_cmd.q.cid = cmd->q.cid;
1482 		gb_cmd.q.type = cmd->q.type;
1483 
1484 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1485 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1486 	}
1487 
1488 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1489 				 user_context_converter, &cmd->q.cid,
1490 				 NULL);
1491 }
1492 
1493 /**
1494  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1495  *
1496  * @dev_priv: Pointer to a device private struct.
1497  * @sw_context: The software context used for this command submission.
1498  * @header: Pointer to the command header in the command stream.
1499  */
1500 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1501 				struct vmw_sw_context *sw_context,
1502 				SVGA3dCmdHeader *header)
1503 {
1504 	struct vmw_dma_buffer *vmw_bo;
1505 	struct vmw_query_cmd {
1506 		SVGA3dCmdHeader header;
1507 		SVGA3dCmdEndGBQuery q;
1508 	} *cmd;
1509 	int ret;
1510 
1511 	cmd = container_of(header, struct vmw_query_cmd, header);
1512 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1513 	if (unlikely(ret != 0))
1514 		return ret;
1515 
1516 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1517 				    &cmd->q.mobid,
1518 				    &vmw_bo);
1519 	if (unlikely(ret != 0))
1520 		return ret;
1521 
1522 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1523 
1524 	vmw_dmabuf_unreference(&vmw_bo);
1525 	return ret;
1526 }
1527 
1528 /**
1529  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1530  *
1531  * @dev_priv: Pointer to a device private struct.
1532  * @sw_context: The software context used for this command submission.
1533  * @header: Pointer to the command header in the command stream.
1534  */
1535 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1536 			     struct vmw_sw_context *sw_context,
1537 			     SVGA3dCmdHeader *header)
1538 {
1539 	struct vmw_dma_buffer *vmw_bo;
1540 	struct vmw_query_cmd {
1541 		SVGA3dCmdHeader header;
1542 		SVGA3dCmdEndQuery q;
1543 	} *cmd;
1544 	int ret;
1545 
1546 	cmd = container_of(header, struct vmw_query_cmd, header);
1547 	if (dev_priv->has_mob) {
1548 		struct {
1549 			SVGA3dCmdHeader header;
1550 			SVGA3dCmdEndGBQuery q;
1551 		} gb_cmd;
1552 
1553 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1554 
1555 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1556 		gb_cmd.header.size = cmd->header.size;
1557 		gb_cmd.q.cid = cmd->q.cid;
1558 		gb_cmd.q.type = cmd->q.type;
1559 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1560 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1561 
1562 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1563 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1564 	}
1565 
1566 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1567 	if (unlikely(ret != 0))
1568 		return ret;
1569 
1570 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1571 				      &cmd->q.guestResult,
1572 				      &vmw_bo);
1573 	if (unlikely(ret != 0))
1574 		return ret;
1575 
1576 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1577 
1578 	vmw_dmabuf_unreference(&vmw_bo);
1579 	return ret;
1580 }
1581 
1582 /**
1583  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1584  *
1585  * @dev_priv: Pointer to a device private struct.
1586  * @sw_context: The software context used for this command submission.
1587  * @header: Pointer to the command header in the command stream.
1588  */
1589 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1590 				 struct vmw_sw_context *sw_context,
1591 				 SVGA3dCmdHeader *header)
1592 {
1593 	struct vmw_dma_buffer *vmw_bo;
1594 	struct vmw_query_cmd {
1595 		SVGA3dCmdHeader header;
1596 		SVGA3dCmdWaitForGBQuery q;
1597 	} *cmd;
1598 	int ret;
1599 
1600 	cmd = container_of(header, struct vmw_query_cmd, header);
1601 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1602 	if (unlikely(ret != 0))
1603 		return ret;
1604 
1605 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1606 				    &cmd->q.mobid,
1607 				    &vmw_bo);
1608 	if (unlikely(ret != 0))
1609 		return ret;
1610 
1611 	vmw_dmabuf_unreference(&vmw_bo);
1612 	return 0;
1613 }
1614 
1615 /**
1616  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1617  *
1618  * @dev_priv: Pointer to a device private struct.
1619  * @sw_context: The software context used for this command submission.
1620  * @header: Pointer to the command header in the command stream.
1621  */
1622 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1623 			      struct vmw_sw_context *sw_context,
1624 			      SVGA3dCmdHeader *header)
1625 {
1626 	struct vmw_dma_buffer *vmw_bo;
1627 	struct vmw_query_cmd {
1628 		SVGA3dCmdHeader header;
1629 		SVGA3dCmdWaitForQuery q;
1630 	} *cmd;
1631 	int ret;
1632 
1633 	cmd = container_of(header, struct vmw_query_cmd, header);
1634 	if (dev_priv->has_mob) {
1635 		struct {
1636 			SVGA3dCmdHeader header;
1637 			SVGA3dCmdWaitForGBQuery q;
1638 		} gb_cmd;
1639 
1640 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1641 
1642 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1643 		gb_cmd.header.size = cmd->header.size;
1644 		gb_cmd.q.cid = cmd->q.cid;
1645 		gb_cmd.q.type = cmd->q.type;
1646 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1647 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1648 
1649 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1650 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1651 	}
1652 
1653 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1654 	if (unlikely(ret != 0))
1655 		return ret;
1656 
1657 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1658 				      &cmd->q.guestResult,
1659 				      &vmw_bo);
1660 	if (unlikely(ret != 0))
1661 		return ret;
1662 
1663 	vmw_dmabuf_unreference(&vmw_bo);
1664 	return 0;
1665 }
1666 
1667 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1668 		       struct vmw_sw_context *sw_context,
1669 		       SVGA3dCmdHeader *header)
1670 {
1671 	struct vmw_dma_buffer *vmw_bo = NULL;
1672 	struct vmw_surface *srf = NULL;
1673 	struct vmw_dma_cmd {
1674 		SVGA3dCmdHeader header;
1675 		SVGA3dCmdSurfaceDMA dma;
1676 	} *cmd;
1677 	int ret;
1678 	SVGA3dCmdSurfaceDMASuffix *suffix;
1679 	uint32_t bo_size;
1680 
1681 	cmd = container_of(header, struct vmw_dma_cmd, header);
1682 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1683 					       header->size - sizeof(*suffix));
1684 
1685 	/* Make sure device and verifier stays in sync. */
1686 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1687 		DRM_ERROR("Invalid DMA suffix size.\n");
1688 		return -EINVAL;
1689 	}
1690 
1691 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1692 				      &cmd->dma.guest.ptr,
1693 				      &vmw_bo);
1694 	if (unlikely(ret != 0))
1695 		return ret;
1696 
1697 	/* Make sure DMA doesn't cross BO boundaries. */
1698 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1699 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1700 		DRM_ERROR("Invalid DMA offset.\n");
1701 		return -EINVAL;
1702 	}
1703 
1704 	bo_size -= cmd->dma.guest.ptr.offset;
1705 	if (unlikely(suffix->maximumOffset > bo_size))
1706 		suffix->maximumOffset = bo_size;
1707 
1708 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1709 				user_surface_converter, &cmd->dma.host.sid,
1710 				NULL);
1711 	if (unlikely(ret != 0)) {
1712 		if (unlikely(ret != -ERESTARTSYS))
1713 			DRM_ERROR("could not find surface for DMA.\n");
1714 		goto out_no_surface;
1715 	}
1716 
1717 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1718 
1719 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1720 			     header);
1721 
1722 out_no_surface:
1723 	vmw_dmabuf_unreference(&vmw_bo);
1724 	return ret;
1725 }
1726 
1727 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1728 			struct vmw_sw_context *sw_context,
1729 			SVGA3dCmdHeader *header)
1730 {
1731 	struct vmw_draw_cmd {
1732 		SVGA3dCmdHeader header;
1733 		SVGA3dCmdDrawPrimitives body;
1734 	} *cmd;
1735 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1736 		(unsigned long)header + sizeof(*cmd));
1737 	SVGA3dPrimitiveRange *range;
1738 	uint32_t i;
1739 	uint32_t maxnum;
1740 	int ret;
1741 
1742 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1743 	if (unlikely(ret != 0))
1744 		return ret;
1745 
1746 	cmd = container_of(header, struct vmw_draw_cmd, header);
1747 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1748 
1749 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1750 		DRM_ERROR("Illegal number of vertex declarations.\n");
1751 		return -EINVAL;
1752 	}
1753 
1754 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1755 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1756 					user_surface_converter,
1757 					&decl->array.surfaceId, NULL);
1758 		if (unlikely(ret != 0))
1759 			return ret;
1760 	}
1761 
1762 	maxnum = (header->size - sizeof(cmd->body) -
1763 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1764 	if (unlikely(cmd->body.numRanges > maxnum)) {
1765 		DRM_ERROR("Illegal number of index ranges.\n");
1766 		return -EINVAL;
1767 	}
1768 
1769 	range = (SVGA3dPrimitiveRange *) decl;
1770 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1771 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1772 					user_surface_converter,
1773 					&range->indexArray.surfaceId, NULL);
1774 		if (unlikely(ret != 0))
1775 			return ret;
1776 	}
1777 	return 0;
1778 }
1779 
1780 
1781 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1782 			     struct vmw_sw_context *sw_context,
1783 			     SVGA3dCmdHeader *header)
1784 {
1785 	struct vmw_tex_state_cmd {
1786 		SVGA3dCmdHeader header;
1787 		SVGA3dCmdSetTextureState state;
1788 	} *cmd;
1789 
1790 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1791 	  ((unsigned long) header + header->size + sizeof(header));
1792 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1793 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1794 	struct vmw_resource_val_node *ctx_node;
1795 	struct vmw_resource_val_node *res_node;
1796 	int ret;
1797 
1798 	cmd = container_of(header, struct vmw_tex_state_cmd,
1799 			   header);
1800 
1801 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1802 				user_context_converter, &cmd->state.cid,
1803 				&ctx_node);
1804 	if (unlikely(ret != 0))
1805 		return ret;
1806 
1807 	for (; cur_state < last_state; ++cur_state) {
1808 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1809 			continue;
1810 
1811 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1812 			DRM_ERROR("Illegal texture/sampler unit %u.\n",
1813 				  (unsigned) cur_state->stage);
1814 			return -EINVAL;
1815 		}
1816 
1817 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 					user_surface_converter,
1819 					&cur_state->value, &res_node);
1820 		if (unlikely(ret != 0))
1821 			return ret;
1822 
1823 		if (dev_priv->has_mob) {
1824 			struct vmw_ctx_bindinfo_tex binding;
1825 
1826 			binding.bi.ctx = ctx_node->res;
1827 			binding.bi.res = res_node ? res_node->res : NULL;
1828 			binding.bi.bt = vmw_ctx_binding_tex;
1829 			binding.texture_stage = cur_state->stage;
1830 			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1831 					0, binding.texture_stage);
1832 		}
1833 	}
1834 
1835 	return 0;
1836 }
1837 
1838 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1839 				      struct vmw_sw_context *sw_context,
1840 				      void *buf)
1841 {
1842 	struct vmw_dma_buffer *vmw_bo;
1843 	int ret;
1844 
1845 	struct {
1846 		uint32_t header;
1847 		SVGAFifoCmdDefineGMRFB body;
1848 	} *cmd = buf;
1849 
1850 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1851 				      &cmd->body.ptr,
1852 				      &vmw_bo);
1853 	if (unlikely(ret != 0))
1854 		return ret;
1855 
1856 	vmw_dmabuf_unreference(&vmw_bo);
1857 
1858 	return ret;
1859 }
1860 
1861 
1862 /**
1863  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1864  * switching
1865  *
1866  * @dev_priv: Pointer to a device private struct.
1867  * @sw_context: The software context being used for this batch.
1868  * @val_node: The validation node representing the resource.
1869  * @buf_id: Pointer to the user-space backup buffer handle in the command
1870  * stream.
1871  * @backup_offset: Offset of backup into MOB.
1872  *
1873  * This function prepares for registering a switch of backup buffers
1874  * in the resource metadata just prior to unreserving. It's basically a wrapper
1875  * around vmw_cmd_res_switch_backup with a different interface.
1876  */
1877 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1878 				     struct vmw_sw_context *sw_context,
1879 				     struct vmw_resource_val_node *val_node,
1880 				     uint32_t *buf_id,
1881 				     unsigned long backup_offset)
1882 {
1883 	struct vmw_dma_buffer *dma_buf;
1884 	int ret;
1885 
1886 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1887 	if (ret)
1888 		return ret;
1889 
1890 	val_node->switching_backup = true;
1891 	if (val_node->first_usage)
1892 		val_node->no_buffer_needed = true;
1893 
1894 	vmw_dmabuf_unreference(&val_node->new_backup);
1895 	val_node->new_backup = dma_buf;
1896 	val_node->new_backup_offset = backup_offset;
1897 
1898 	return 0;
1899 }
1900 
1901 
1902 /**
1903  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1904  *
1905  * @dev_priv: Pointer to a device private struct.
1906  * @sw_context: The software context being used for this batch.
1907  * @res_type: The resource type.
1908  * @converter: Information about user-space binding for this resource type.
1909  * @res_id: Pointer to the user-space resource handle in the command stream.
1910  * @buf_id: Pointer to the user-space backup buffer handle in the command
1911  * stream.
1912  * @backup_offset: Offset of backup into MOB.
1913  *
1914  * This function prepares for registering a switch of backup buffers
1915  * in the resource metadata just prior to unreserving. It's basically a wrapper
1916  * around vmw_cmd_res_switch_backup with a different interface.
1917  */
1918 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1919 				 struct vmw_sw_context *sw_context,
1920 				 enum vmw_res_type res_type,
1921 				 const struct vmw_user_resource_conv
1922 				 *converter,
1923 				 uint32_t *res_id,
1924 				 uint32_t *buf_id,
1925 				 unsigned long backup_offset)
1926 {
1927 	struct vmw_resource_val_node *val_node;
1928 	int ret;
1929 
1930 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1931 				converter, res_id, &val_node);
1932 	if (ret)
1933 		return ret;
1934 
1935 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1936 					 buf_id, backup_offset);
1937 }
1938 
1939 /**
1940  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1941  * command
1942  *
1943  * @dev_priv: Pointer to a device private struct.
1944  * @sw_context: The software context being used for this batch.
1945  * @header: Pointer to the command header in the command stream.
1946  */
1947 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1948 				   struct vmw_sw_context *sw_context,
1949 				   SVGA3dCmdHeader *header)
1950 {
1951 	struct vmw_bind_gb_surface_cmd {
1952 		SVGA3dCmdHeader header;
1953 		SVGA3dCmdBindGBSurface body;
1954 	} *cmd;
1955 
1956 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1957 
1958 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1959 				     user_surface_converter,
1960 				     &cmd->body.sid, &cmd->body.mobid,
1961 				     0);
1962 }
1963 
1964 /**
1965  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1966  * command
1967  *
1968  * @dev_priv: Pointer to a device private struct.
1969  * @sw_context: The software context being used for this batch.
1970  * @header: Pointer to the command header in the command stream.
1971  */
1972 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1973 				   struct vmw_sw_context *sw_context,
1974 				   SVGA3dCmdHeader *header)
1975 {
1976 	struct vmw_gb_surface_cmd {
1977 		SVGA3dCmdHeader header;
1978 		SVGA3dCmdUpdateGBImage body;
1979 	} *cmd;
1980 
1981 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1982 
1983 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1984 				 user_surface_converter,
1985 				 &cmd->body.image.sid, NULL);
1986 }
1987 
1988 /**
1989  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1990  * command
1991  *
1992  * @dev_priv: Pointer to a device private struct.
1993  * @sw_context: The software context being used for this batch.
1994  * @header: Pointer to the command header in the command stream.
1995  */
1996 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1997 				     struct vmw_sw_context *sw_context,
1998 				     SVGA3dCmdHeader *header)
1999 {
2000 	struct vmw_gb_surface_cmd {
2001 		SVGA3dCmdHeader header;
2002 		SVGA3dCmdUpdateGBSurface body;
2003 	} *cmd;
2004 
2005 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2006 
2007 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2008 				 user_surface_converter,
2009 				 &cmd->body.sid, NULL);
2010 }
2011 
2012 /**
2013  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2014  * command
2015  *
2016  * @dev_priv: Pointer to a device private struct.
2017  * @sw_context: The software context being used for this batch.
2018  * @header: Pointer to the command header in the command stream.
2019  */
2020 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2021 				     struct vmw_sw_context *sw_context,
2022 				     SVGA3dCmdHeader *header)
2023 {
2024 	struct vmw_gb_surface_cmd {
2025 		SVGA3dCmdHeader header;
2026 		SVGA3dCmdReadbackGBImage body;
2027 	} *cmd;
2028 
2029 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2030 
2031 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2032 				 user_surface_converter,
2033 				 &cmd->body.image.sid, NULL);
2034 }
2035 
2036 /**
2037  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2038  * command
2039  *
2040  * @dev_priv: Pointer to a device private struct.
2041  * @sw_context: The software context being used for this batch.
2042  * @header: Pointer to the command header in the command stream.
2043  */
2044 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2045 				       struct vmw_sw_context *sw_context,
2046 				       SVGA3dCmdHeader *header)
2047 {
2048 	struct vmw_gb_surface_cmd {
2049 		SVGA3dCmdHeader header;
2050 		SVGA3dCmdReadbackGBSurface body;
2051 	} *cmd;
2052 
2053 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2054 
2055 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2056 				 user_surface_converter,
2057 				 &cmd->body.sid, NULL);
2058 }
2059 
2060 /**
2061  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2062  * command
2063  *
2064  * @dev_priv: Pointer to a device private struct.
2065  * @sw_context: The software context being used for this batch.
2066  * @header: Pointer to the command header in the command stream.
2067  */
2068 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2069 				       struct vmw_sw_context *sw_context,
2070 				       SVGA3dCmdHeader *header)
2071 {
2072 	struct vmw_gb_surface_cmd {
2073 		SVGA3dCmdHeader header;
2074 		SVGA3dCmdInvalidateGBImage body;
2075 	} *cmd;
2076 
2077 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2078 
2079 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2080 				 user_surface_converter,
2081 				 &cmd->body.image.sid, NULL);
2082 }
2083 
2084 /**
2085  * vmw_cmd_invalidate_gb_surface - Validate an
2086  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2087  *
2088  * @dev_priv: Pointer to a device private struct.
2089  * @sw_context: The software context being used for this batch.
2090  * @header: Pointer to the command header in the command stream.
2091  */
2092 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2093 					 struct vmw_sw_context *sw_context,
2094 					 SVGA3dCmdHeader *header)
2095 {
2096 	struct vmw_gb_surface_cmd {
2097 		SVGA3dCmdHeader header;
2098 		SVGA3dCmdInvalidateGBSurface body;
2099 	} *cmd;
2100 
2101 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2102 
2103 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2104 				 user_surface_converter,
2105 				 &cmd->body.sid, NULL);
2106 }
2107 
2108 
2109 /**
2110  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2111  * command
2112  *
2113  * @dev_priv: Pointer to a device private struct.
2114  * @sw_context: The software context being used for this batch.
2115  * @header: Pointer to the command header in the command stream.
2116  */
2117 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2118 				 struct vmw_sw_context *sw_context,
2119 				 SVGA3dCmdHeader *header)
2120 {
2121 	struct vmw_shader_define_cmd {
2122 		SVGA3dCmdHeader header;
2123 		SVGA3dCmdDefineShader body;
2124 	} *cmd;
2125 	int ret;
2126 	size_t size;
2127 	struct vmw_resource_val_node *val;
2128 
2129 	cmd = container_of(header, struct vmw_shader_define_cmd,
2130 			   header);
2131 
2132 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2133 				user_context_converter, &cmd->body.cid,
2134 				&val);
2135 	if (unlikely(ret != 0))
2136 		return ret;
2137 
2138 	if (unlikely(!dev_priv->has_mob))
2139 		return 0;
2140 
2141 	size = cmd->header.size - sizeof(cmd->body);
2142 	ret = vmw_compat_shader_add(dev_priv,
2143 				    vmw_context_res_man(val->res),
2144 				    cmd->body.shid, cmd + 1,
2145 				    cmd->body.type, size,
2146 				    &sw_context->staged_cmd_res);
2147 	if (unlikely(ret != 0))
2148 		return ret;
2149 
2150 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2151 					   NULL, &cmd->header.id -
2152 					   sw_context->buf_start);
2153 
2154 	return 0;
2155 }
2156 
2157 /**
2158  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2159  * command
2160  *
2161  * @dev_priv: Pointer to a device private struct.
2162  * @sw_context: The software context being used for this batch.
2163  * @header: Pointer to the command header in the command stream.
2164  */
2165 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2166 				  struct vmw_sw_context *sw_context,
2167 				  SVGA3dCmdHeader *header)
2168 {
2169 	struct vmw_shader_destroy_cmd {
2170 		SVGA3dCmdHeader header;
2171 		SVGA3dCmdDestroyShader body;
2172 	} *cmd;
2173 	int ret;
2174 	struct vmw_resource_val_node *val;
2175 
2176 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
2177 			   header);
2178 
2179 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2180 				user_context_converter, &cmd->body.cid,
2181 				&val);
2182 	if (unlikely(ret != 0))
2183 		return ret;
2184 
2185 	if (unlikely(!dev_priv->has_mob))
2186 		return 0;
2187 
2188 	ret = vmw_shader_remove(vmw_context_res_man(val->res),
2189 				cmd->body.shid,
2190 				cmd->body.type,
2191 				&sw_context->staged_cmd_res);
2192 	if (unlikely(ret != 0))
2193 		return ret;
2194 
2195 	return vmw_resource_relocation_add(&sw_context->res_relocations,
2196 					   NULL, &cmd->header.id -
2197 					   sw_context->buf_start);
2198 
2199 	return 0;
2200 }
2201 
2202 /**
2203  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2204  * command
2205  *
2206  * @dev_priv: Pointer to a device private struct.
2207  * @sw_context: The software context being used for this batch.
2208  * @header: Pointer to the command header in the command stream.
2209  */
2210 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2211 			      struct vmw_sw_context *sw_context,
2212 			      SVGA3dCmdHeader *header)
2213 {
2214 	struct vmw_set_shader_cmd {
2215 		SVGA3dCmdHeader header;
2216 		SVGA3dCmdSetShader body;
2217 	} *cmd;
2218 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2219 	struct vmw_ctx_bindinfo_shader binding;
2220 	struct vmw_resource *res = NULL;
2221 	int ret;
2222 
2223 	cmd = container_of(header, struct vmw_set_shader_cmd,
2224 			   header);
2225 
2226 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2227 		DRM_ERROR("Illegal shader type %u.\n",
2228 			  (unsigned) cmd->body.type);
2229 		return -EINVAL;
2230 	}
2231 
2232 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2233 				user_context_converter, &cmd->body.cid,
2234 				&ctx_node);
2235 	if (unlikely(ret != 0))
2236 		return ret;
2237 
2238 	if (!dev_priv->has_mob)
2239 		return 0;
2240 
2241 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2242 		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2243 					cmd->body.shid,
2244 					cmd->body.type);
2245 
2246 		if (!IS_ERR(res)) {
2247 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2248 						    &cmd->body.shid, res,
2249 						    &res_node);
2250 			vmw_resource_unreference(&res);
2251 			if (unlikely(ret != 0))
2252 				return ret;
2253 		}
2254 	}
2255 
2256 	if (!res_node) {
2257 		ret = vmw_cmd_res_check(dev_priv, sw_context,
2258 					vmw_res_shader,
2259 					user_shader_converter,
2260 					&cmd->body.shid, &res_node);
2261 		if (unlikely(ret != 0))
2262 			return ret;
2263 	}
2264 
2265 	binding.bi.ctx = ctx_node->res;
2266 	binding.bi.res = res_node ? res_node->res : NULL;
2267 	binding.bi.bt = vmw_ctx_binding_shader;
2268 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2269 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2270 			binding.shader_slot, 0);
2271 	return 0;
2272 }
2273 
2274 /**
2275  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2276  * command
2277  *
2278  * @dev_priv: Pointer to a device private struct.
2279  * @sw_context: The software context being used for this batch.
2280  * @header: Pointer to the command header in the command stream.
2281  */
2282 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2283 				    struct vmw_sw_context *sw_context,
2284 				    SVGA3dCmdHeader *header)
2285 {
2286 	struct vmw_set_shader_const_cmd {
2287 		SVGA3dCmdHeader header;
2288 		SVGA3dCmdSetShaderConst body;
2289 	} *cmd;
2290 	int ret;
2291 
2292 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
2293 			   header);
2294 
2295 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2296 				user_context_converter, &cmd->body.cid,
2297 				NULL);
2298 	if (unlikely(ret != 0))
2299 		return ret;
2300 
2301 	if (dev_priv->has_mob)
2302 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2303 
2304 	return 0;
2305 }
2306 
2307 /**
2308  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2309  * command
2310  *
2311  * @dev_priv: Pointer to a device private struct.
2312  * @sw_context: The software context being used for this batch.
2313  * @header: Pointer to the command header in the command stream.
2314  */
2315 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2316 				  struct vmw_sw_context *sw_context,
2317 				  SVGA3dCmdHeader *header)
2318 {
2319 	struct vmw_bind_gb_shader_cmd {
2320 		SVGA3dCmdHeader header;
2321 		SVGA3dCmdBindGBShader body;
2322 	} *cmd;
2323 
2324 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2325 			   header);
2326 
2327 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2328 				     user_shader_converter,
2329 				     &cmd->body.shid, &cmd->body.mobid,
2330 				     cmd->body.offsetInBytes);
2331 }
2332 
2333 /**
2334  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2335  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2336  *
2337  * @dev_priv: Pointer to a device private struct.
2338  * @sw_context: The software context being used for this batch.
2339  * @header: Pointer to the command header in the command stream.
2340  */
2341 static int
2342 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2343 				      struct vmw_sw_context *sw_context,
2344 				      SVGA3dCmdHeader *header)
2345 {
2346 	struct {
2347 		SVGA3dCmdHeader header;
2348 		SVGA3dCmdDXSetSingleConstantBuffer body;
2349 	} *cmd;
2350 	struct vmw_resource_val_node *res_node = NULL;
2351 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2352 	struct vmw_ctx_bindinfo_cb binding;
2353 	int ret;
2354 
2355 	if (unlikely(ctx_node == NULL)) {
2356 		DRM_ERROR("DX Context not set.\n");
2357 		return -EINVAL;
2358 	}
2359 
2360 	cmd = container_of(header, typeof(*cmd), header);
2361 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2362 				user_surface_converter,
2363 				&cmd->body.sid, &res_node);
2364 	if (unlikely(ret != 0))
2365 		return ret;
2366 
2367 	binding.bi.ctx = ctx_node->res;
2368 	binding.bi.res = res_node ? res_node->res : NULL;
2369 	binding.bi.bt = vmw_ctx_binding_cb;
2370 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2371 	binding.offset = cmd->body.offsetInBytes;
2372 	binding.size = cmd->body.sizeInBytes;
2373 	binding.slot = cmd->body.slot;
2374 
2375 	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2376 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2377 		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2378 			  (unsigned) cmd->body.type,
2379 			  (unsigned) binding.slot);
2380 		return -EINVAL;
2381 	}
2382 
2383 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2384 			binding.shader_slot, binding.slot);
2385 
2386 	return 0;
2387 }
2388 
2389 /**
2390  * vmw_cmd_dx_set_shader_res - Validate an
2391  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2392  *
2393  * @dev_priv: Pointer to a device private struct.
2394  * @sw_context: The software context being used for this batch.
2395  * @header: Pointer to the command header in the command stream.
2396  */
2397 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2398 				     struct vmw_sw_context *sw_context,
2399 				     SVGA3dCmdHeader *header)
2400 {
2401 	struct {
2402 		SVGA3dCmdHeader header;
2403 		SVGA3dCmdDXSetShaderResources body;
2404 	} *cmd = container_of(header, typeof(*cmd), header);
2405 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2406 		sizeof(SVGA3dShaderResourceViewId);
2407 
2408 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2409 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2410 	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2411 		DRM_ERROR("Invalid shader binding.\n");
2412 		return -EINVAL;
2413 	}
2414 
2415 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2416 				     vmw_ctx_binding_sr,
2417 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2418 				     (void *) &cmd[1], num_sr_view,
2419 				     cmd->body.startView);
2420 }
2421 
2422 /**
2423  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2424  * command
2425  *
2426  * @dev_priv: Pointer to a device private struct.
2427  * @sw_context: The software context being used for this batch.
2428  * @header: Pointer to the command header in the command stream.
2429  */
2430 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2431 				 struct vmw_sw_context *sw_context,
2432 				 SVGA3dCmdHeader *header)
2433 {
2434 	struct {
2435 		SVGA3dCmdHeader header;
2436 		SVGA3dCmdDXSetShader body;
2437 	} *cmd;
2438 	struct vmw_resource *res = NULL;
2439 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2440 	struct vmw_ctx_bindinfo_shader binding;
2441 	int ret = 0;
2442 
2443 	if (unlikely(ctx_node == NULL)) {
2444 		DRM_ERROR("DX Context not set.\n");
2445 		return -EINVAL;
2446 	}
2447 
2448 	cmd = container_of(header, typeof(*cmd), header);
2449 
2450 	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2451 		DRM_ERROR("Illegal shader type %u.\n",
2452 			  (unsigned) cmd->body.type);
2453 		return -EINVAL;
2454 	}
2455 
2456 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2457 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2458 		if (IS_ERR(res)) {
2459 			DRM_ERROR("Could not find shader for binding.\n");
2460 			return PTR_ERR(res);
2461 		}
2462 
2463 		ret = vmw_resource_val_add(sw_context, res, NULL);
2464 		if (ret)
2465 			goto out_unref;
2466 	}
2467 
2468 	binding.bi.ctx = ctx_node->res;
2469 	binding.bi.res = res;
2470 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2471 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2472 
2473 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2474 			binding.shader_slot, 0);
2475 out_unref:
2476 	if (res)
2477 		vmw_resource_unreference(&res);
2478 
2479 	return ret;
2480 }
2481 
2482 /**
2483  * vmw_cmd_dx_set_vertex_buffers - Validates an
2484  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2485  *
2486  * @dev_priv: Pointer to a device private struct.
2487  * @sw_context: The software context being used for this batch.
2488  * @header: Pointer to the command header in the command stream.
2489  */
2490 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2491 					 struct vmw_sw_context *sw_context,
2492 					 SVGA3dCmdHeader *header)
2493 {
2494 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2495 	struct vmw_ctx_bindinfo_vb binding;
2496 	struct vmw_resource_val_node *res_node;
2497 	struct {
2498 		SVGA3dCmdHeader header;
2499 		SVGA3dCmdDXSetVertexBuffers body;
2500 		SVGA3dVertexBuffer buf[];
2501 	} *cmd;
2502 	int i, ret, num;
2503 
2504 	if (unlikely(ctx_node == NULL)) {
2505 		DRM_ERROR("DX Context not set.\n");
2506 		return -EINVAL;
2507 	}
2508 
2509 	cmd = container_of(header, typeof(*cmd), header);
2510 	num = (cmd->header.size - sizeof(cmd->body)) /
2511 		sizeof(SVGA3dVertexBuffer);
2512 	if ((u64)num + (u64)cmd->body.startBuffer >
2513 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2514 		DRM_ERROR("Invalid number of vertex buffers.\n");
2515 		return -EINVAL;
2516 	}
2517 
2518 	for (i = 0; i < num; i++) {
2519 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2520 					user_surface_converter,
2521 					&cmd->buf[i].sid, &res_node);
2522 		if (unlikely(ret != 0))
2523 			return ret;
2524 
2525 		binding.bi.ctx = ctx_node->res;
2526 		binding.bi.bt = vmw_ctx_binding_vb;
2527 		binding.bi.res = ((res_node) ? res_node->res : NULL);
2528 		binding.offset = cmd->buf[i].offset;
2529 		binding.stride = cmd->buf[i].stride;
2530 		binding.slot = i + cmd->body.startBuffer;
2531 
2532 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2533 				0, binding.slot);
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 /**
2540  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2541  * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2542  *
2543  * @dev_priv: Pointer to a device private struct.
2544  * @sw_context: The software context being used for this batch.
2545  * @header: Pointer to the command header in the command stream.
2546  */
2547 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2548 				       struct vmw_sw_context *sw_context,
2549 				       SVGA3dCmdHeader *header)
2550 {
2551 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2552 	struct vmw_ctx_bindinfo_ib binding;
2553 	struct vmw_resource_val_node *res_node;
2554 	struct {
2555 		SVGA3dCmdHeader header;
2556 		SVGA3dCmdDXSetIndexBuffer body;
2557 	} *cmd;
2558 	int ret;
2559 
2560 	if (unlikely(ctx_node == NULL)) {
2561 		DRM_ERROR("DX Context not set.\n");
2562 		return -EINVAL;
2563 	}
2564 
2565 	cmd = container_of(header, typeof(*cmd), header);
2566 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2567 				user_surface_converter,
2568 				&cmd->body.sid, &res_node);
2569 	if (unlikely(ret != 0))
2570 		return ret;
2571 
2572 	binding.bi.ctx = ctx_node->res;
2573 	binding.bi.res = ((res_node) ? res_node->res : NULL);
2574 	binding.bi.bt = vmw_ctx_binding_ib;
2575 	binding.offset = cmd->body.offset;
2576 	binding.format = cmd->body.format;
2577 
2578 	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2579 
2580 	return 0;
2581 }
2582 
2583 /**
2584  * vmw_cmd_dx_set_rendertarget - Validate an
2585  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2586  *
2587  * @dev_priv: Pointer to a device private struct.
2588  * @sw_context: The software context being used for this batch.
2589  * @header: Pointer to the command header in the command stream.
2590  */
2591 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2592 					struct vmw_sw_context *sw_context,
2593 					SVGA3dCmdHeader *header)
2594 {
2595 	struct {
2596 		SVGA3dCmdHeader header;
2597 		SVGA3dCmdDXSetRenderTargets body;
2598 	} *cmd = container_of(header, typeof(*cmd), header);
2599 	int ret;
2600 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2601 		sizeof(SVGA3dRenderTargetViewId);
2602 
2603 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2604 		DRM_ERROR("Invalid DX Rendertarget binding.\n");
2605 		return -EINVAL;
2606 	}
2607 
2608 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2609 				    vmw_ctx_binding_ds, 0,
2610 				    &cmd->body.depthStencilViewId, 1, 0);
2611 	if (ret)
2612 		return ret;
2613 
2614 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2615 				     vmw_ctx_binding_dx_rt, 0,
2616 				     (void *)&cmd[1], num_rt_view, 0);
2617 }
2618 
2619 /**
2620  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2621  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2622  *
2623  * @dev_priv: Pointer to a device private struct.
2624  * @sw_context: The software context being used for this batch.
2625  * @header: Pointer to the command header in the command stream.
2626  */
2627 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2628 					      struct vmw_sw_context *sw_context,
2629 					      SVGA3dCmdHeader *header)
2630 {
2631 	struct {
2632 		SVGA3dCmdHeader header;
2633 		SVGA3dCmdDXClearRenderTargetView body;
2634 	} *cmd = container_of(header, typeof(*cmd), header);
2635 
2636 	return vmw_view_id_val_add(sw_context, vmw_view_rt,
2637 				   cmd->body.renderTargetViewId);
2638 }
2639 
2640 /**
2641  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2642  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2643  *
2644  * @dev_priv: Pointer to a device private struct.
2645  * @sw_context: The software context being used for this batch.
2646  * @header: Pointer to the command header in the command stream.
2647  */
2648 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2649 					      struct vmw_sw_context *sw_context,
2650 					      SVGA3dCmdHeader *header)
2651 {
2652 	struct {
2653 		SVGA3dCmdHeader header;
2654 		SVGA3dCmdDXClearDepthStencilView body;
2655 	} *cmd = container_of(header, typeof(*cmd), header);
2656 
2657 	return vmw_view_id_val_add(sw_context, vmw_view_ds,
2658 				   cmd->body.depthStencilViewId);
2659 }
2660 
2661 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2662 				  struct vmw_sw_context *sw_context,
2663 				  SVGA3dCmdHeader *header)
2664 {
2665 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2666 	struct vmw_resource_val_node *srf_node;
2667 	struct vmw_resource *res;
2668 	enum vmw_view_type view_type;
2669 	int ret;
2670 	/*
2671 	 * This is based on the fact that all affected define commands have
2672 	 * the same initial command body layout.
2673 	 */
2674 	struct {
2675 		SVGA3dCmdHeader header;
2676 		uint32 defined_id;
2677 		uint32 sid;
2678 	} *cmd;
2679 
2680 	if (unlikely(ctx_node == NULL)) {
2681 		DRM_ERROR("DX Context not set.\n");
2682 		return -EINVAL;
2683 	}
2684 
2685 	view_type = vmw_view_cmd_to_type(header->id);
2686 	if (view_type == vmw_view_max)
2687 		return -EINVAL;
2688 	cmd = container_of(header, typeof(*cmd), header);
2689 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2690 				user_surface_converter,
2691 				&cmd->sid, &srf_node);
2692 	if (unlikely(ret != 0))
2693 		return ret;
2694 
2695 	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2696 	ret = vmw_cotable_notify(res, cmd->defined_id);
2697 	vmw_resource_unreference(&res);
2698 	if (unlikely(ret != 0))
2699 		return ret;
2700 
2701 	return vmw_view_add(sw_context->man,
2702 			    ctx_node->res,
2703 			    srf_node->res,
2704 			    view_type,
2705 			    cmd->defined_id,
2706 			    header,
2707 			    header->size + sizeof(*header),
2708 			    &sw_context->staged_cmd_res);
2709 }
2710 
2711 /**
2712  * vmw_cmd_dx_set_so_targets - Validate an
2713  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2714  *
2715  * @dev_priv: Pointer to a device private struct.
2716  * @sw_context: The software context being used for this batch.
2717  * @header: Pointer to the command header in the command stream.
2718  */
2719 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2720 				     struct vmw_sw_context *sw_context,
2721 				     SVGA3dCmdHeader *header)
2722 {
2723 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2724 	struct vmw_ctx_bindinfo_so binding;
2725 	struct vmw_resource_val_node *res_node;
2726 	struct {
2727 		SVGA3dCmdHeader header;
2728 		SVGA3dCmdDXSetSOTargets body;
2729 		SVGA3dSoTarget targets[];
2730 	} *cmd;
2731 	int i, ret, num;
2732 
2733 	if (unlikely(ctx_node == NULL)) {
2734 		DRM_ERROR("DX Context not set.\n");
2735 		return -EINVAL;
2736 	}
2737 
2738 	cmd = container_of(header, typeof(*cmd), header);
2739 	num = (cmd->header.size - sizeof(cmd->body)) /
2740 		sizeof(SVGA3dSoTarget);
2741 
2742 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2743 		DRM_ERROR("Invalid DX SO binding.\n");
2744 		return -EINVAL;
2745 	}
2746 
2747 	for (i = 0; i < num; i++) {
2748 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2749 					user_surface_converter,
2750 					&cmd->targets[i].sid, &res_node);
2751 		if (unlikely(ret != 0))
2752 			return ret;
2753 
2754 		binding.bi.ctx = ctx_node->res;
2755 		binding.bi.res = ((res_node) ? res_node->res : NULL);
2756 		binding.bi.bt = vmw_ctx_binding_so,
2757 		binding.offset = cmd->targets[i].offset;
2758 		binding.size = cmd->targets[i].sizeInBytes;
2759 		binding.slot = i;
2760 
2761 		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2762 				0, binding.slot);
2763 	}
2764 
2765 	return 0;
2766 }
2767 
2768 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2769 				struct vmw_sw_context *sw_context,
2770 				SVGA3dCmdHeader *header)
2771 {
2772 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2773 	struct vmw_resource *res;
2774 	/*
2775 	 * This is based on the fact that all affected define commands have
2776 	 * the same initial command body layout.
2777 	 */
2778 	struct {
2779 		SVGA3dCmdHeader header;
2780 		uint32 defined_id;
2781 	} *cmd;
2782 	enum vmw_so_type so_type;
2783 	int ret;
2784 
2785 	if (unlikely(ctx_node == NULL)) {
2786 		DRM_ERROR("DX Context not set.\n");
2787 		return -EINVAL;
2788 	}
2789 
2790 	so_type = vmw_so_cmd_to_type(header->id);
2791 	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2792 	cmd = container_of(header, typeof(*cmd), header);
2793 	ret = vmw_cotable_notify(res, cmd->defined_id);
2794 	vmw_resource_unreference(&res);
2795 
2796 	return ret;
2797 }
2798 
2799 /**
2800  * vmw_cmd_dx_check_subresource - Validate an
2801  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2802  *
2803  * @dev_priv: Pointer to a device private struct.
2804  * @sw_context: The software context being used for this batch.
2805  * @header: Pointer to the command header in the command stream.
2806  */
2807 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2808 					struct vmw_sw_context *sw_context,
2809 					SVGA3dCmdHeader *header)
2810 {
2811 	struct {
2812 		SVGA3dCmdHeader header;
2813 		union {
2814 			SVGA3dCmdDXReadbackSubResource r_body;
2815 			SVGA3dCmdDXInvalidateSubResource i_body;
2816 			SVGA3dCmdDXUpdateSubResource u_body;
2817 			SVGA3dSurfaceId sid;
2818 		};
2819 	} *cmd;
2820 
2821 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2822 		     offsetof(typeof(*cmd), sid));
2823 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2824 		     offsetof(typeof(*cmd), sid));
2825 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2826 		     offsetof(typeof(*cmd), sid));
2827 
2828 	cmd = container_of(header, typeof(*cmd), header);
2829 
2830 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2831 				 user_surface_converter,
2832 				 &cmd->sid, NULL);
2833 }
2834 
2835 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2836 				struct vmw_sw_context *sw_context,
2837 				SVGA3dCmdHeader *header)
2838 {
2839 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2840 
2841 	if (unlikely(ctx_node == NULL)) {
2842 		DRM_ERROR("DX Context not set.\n");
2843 		return -EINVAL;
2844 	}
2845 
2846 	return 0;
2847 }
2848 
2849 /**
2850  * vmw_cmd_dx_view_remove - validate a view remove command and
2851  * schedule the view resource for removal.
2852  *
2853  * @dev_priv: Pointer to a device private struct.
2854  * @sw_context: The software context being used for this batch.
2855  * @header: Pointer to the command header in the command stream.
2856  *
2857  * Check that the view exists, and if it was not created using this
2858  * command batch, make sure it's validated (present in the device) so that
2859  * the remove command will not confuse the device.
2860  */
2861 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2862 				  struct vmw_sw_context *sw_context,
2863 				  SVGA3dCmdHeader *header)
2864 {
2865 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2866 	struct {
2867 		SVGA3dCmdHeader header;
2868 		union vmw_view_destroy body;
2869 	} *cmd = container_of(header, typeof(*cmd), header);
2870 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2871 	struct vmw_resource *view;
2872 	int ret;
2873 
2874 	if (!ctx_node) {
2875 		DRM_ERROR("DX Context not set.\n");
2876 		return -EINVAL;
2877 	}
2878 
2879 	ret = vmw_view_remove(sw_context->man,
2880 			      cmd->body.view_id, view_type,
2881 			      &sw_context->staged_cmd_res,
2882 			      &view);
2883 	if (ret || !view)
2884 		return ret;
2885 
2886 	/*
2887 	 * Add view to the validate list iff it was not created using this
2888 	 * command batch.
2889 	 */
2890 	return vmw_view_res_val_add(sw_context, view);
2891 }
2892 
2893 /**
2894  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2895  * command
2896  *
2897  * @dev_priv: Pointer to a device private struct.
2898  * @sw_context: The software context being used for this batch.
2899  * @header: Pointer to the command header in the command stream.
2900  */
2901 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2902 				    struct vmw_sw_context *sw_context,
2903 				    SVGA3dCmdHeader *header)
2904 {
2905 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2906 	struct vmw_resource *res;
2907 	struct {
2908 		SVGA3dCmdHeader header;
2909 		SVGA3dCmdDXDefineShader body;
2910 	} *cmd = container_of(header, typeof(*cmd), header);
2911 	int ret;
2912 
2913 	if (!ctx_node) {
2914 		DRM_ERROR("DX Context not set.\n");
2915 		return -EINVAL;
2916 	}
2917 
2918 	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2919 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2920 	vmw_resource_unreference(&res);
2921 	if (ret)
2922 		return ret;
2923 
2924 	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2925 				 cmd->body.shaderId, cmd->body.type,
2926 				 &sw_context->staged_cmd_res);
2927 }
2928 
2929 /**
2930  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2931  * command
2932  *
2933  * @dev_priv: Pointer to a device private struct.
2934  * @sw_context: The software context being used for this batch.
2935  * @header: Pointer to the command header in the command stream.
2936  */
2937 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2938 				     struct vmw_sw_context *sw_context,
2939 				     SVGA3dCmdHeader *header)
2940 {
2941 	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2942 	struct {
2943 		SVGA3dCmdHeader header;
2944 		SVGA3dCmdDXDestroyShader body;
2945 	} *cmd = container_of(header, typeof(*cmd), header);
2946 	int ret;
2947 
2948 	if (!ctx_node) {
2949 		DRM_ERROR("DX Context not set.\n");
2950 		return -EINVAL;
2951 	}
2952 
2953 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2954 				&sw_context->staged_cmd_res);
2955 	if (ret)
2956 		DRM_ERROR("Could not find shader to remove.\n");
2957 
2958 	return ret;
2959 }
2960 
2961 /**
2962  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2963  * command
2964  *
2965  * @dev_priv: Pointer to a device private struct.
2966  * @sw_context: The software context being used for this batch.
2967  * @header: Pointer to the command header in the command stream.
2968  */
2969 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2970 				  struct vmw_sw_context *sw_context,
2971 				  SVGA3dCmdHeader *header)
2972 {
2973 	struct vmw_resource_val_node *ctx_node;
2974 	struct vmw_resource_val_node *res_node;
2975 	struct vmw_resource *res;
2976 	struct {
2977 		SVGA3dCmdHeader header;
2978 		SVGA3dCmdDXBindShader body;
2979 	} *cmd = container_of(header, typeof(*cmd), header);
2980 	int ret;
2981 
2982 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2983 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2984 					user_context_converter,
2985 					&cmd->body.cid, &ctx_node);
2986 		if (ret)
2987 			return ret;
2988 	} else {
2989 		ctx_node = sw_context->dx_ctx_node;
2990 		if (!ctx_node) {
2991 			DRM_ERROR("DX Context not set.\n");
2992 			return -EINVAL;
2993 		}
2994 	}
2995 
2996 	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2997 				cmd->body.shid, 0);
2998 	if (IS_ERR(res)) {
2999 		DRM_ERROR("Could not find shader to bind.\n");
3000 		return PTR_ERR(res);
3001 	}
3002 
3003 	ret = vmw_resource_val_add(sw_context, res, &res_node);
3004 	if (ret) {
3005 		DRM_ERROR("Error creating resource validation node.\n");
3006 		goto out_unref;
3007 	}
3008 
3009 
3010 	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3011 					&cmd->body.mobid,
3012 					cmd->body.offsetInBytes);
3013 out_unref:
3014 	vmw_resource_unreference(&res);
3015 
3016 	return ret;
3017 }
3018 
3019 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3020 				struct vmw_sw_context *sw_context,
3021 				void *buf, uint32_t *size)
3022 {
3023 	uint32_t size_remaining = *size;
3024 	uint32_t cmd_id;
3025 
3026 	cmd_id = ((uint32_t *)buf)[0];
3027 	switch (cmd_id) {
3028 	case SVGA_CMD_UPDATE:
3029 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3030 		break;
3031 	case SVGA_CMD_DEFINE_GMRFB:
3032 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3033 		break;
3034 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3035 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3036 		break;
3037 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3038 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3039 		break;
3040 	default:
3041 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3042 		return -EINVAL;
3043 	}
3044 
3045 	if (*size > size_remaining) {
3046 		DRM_ERROR("Invalid SVGA command (size mismatch):"
3047 			  " %u.\n", cmd_id);
3048 		return -EINVAL;
3049 	}
3050 
3051 	if (unlikely(!sw_context->kernel)) {
3052 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3053 		return -EPERM;
3054 	}
3055 
3056 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3057 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3058 
3059 	return 0;
3060 }
3061 
3062 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3063 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3064 		    false, false, false),
3065 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3066 		    false, false, false),
3067 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3068 		    true, false, false),
3069 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3070 		    true, false, false),
3071 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3072 		    true, false, false),
3073 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3074 		    false, false, false),
3075 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3076 		    false, false, false),
3077 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3078 		    true, false, false),
3079 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3080 		    true, false, false),
3081 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3082 		    true, false, false),
3083 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3084 		    &vmw_cmd_set_render_target_check, true, false, false),
3085 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3086 		    true, false, false),
3087 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3088 		    true, false, false),
3089 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3090 		    true, false, false),
3091 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3092 		    true, false, false),
3093 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3094 		    true, false, false),
3095 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3096 		    true, false, false),
3097 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3098 		    true, false, false),
3099 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3100 		    false, false, false),
3101 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3102 		    true, false, false),
3103 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3104 		    true, false, false),
3105 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3106 		    true, false, false),
3107 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3108 		    true, false, false),
3109 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3110 		    true, false, false),
3111 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3112 		    true, false, false),
3113 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3114 		    true, false, false),
3115 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3116 		    true, false, false),
3117 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3118 		    true, false, false),
3119 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3120 		    true, false, false),
3121 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3122 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3123 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3124 		    false, false, false),
3125 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3126 		    false, false, false),
3127 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3128 		    false, false, false),
3129 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3130 		    false, false, false),
3131 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3132 		    false, false, false),
3133 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3134 		    false, false, false),
3135 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3136 		    false, false, false),
3137 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3138 		    false, false, false),
3139 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3140 		    false, false, false),
3141 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3142 		    false, false, false),
3143 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3144 		    false, false, false),
3145 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3146 		    false, false, false),
3147 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3148 		    false, false, false),
3149 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3150 		    false, false, true),
3151 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3152 		    false, false, true),
3153 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3154 		    false, false, true),
3155 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3156 		    false, false, true),
3157 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3158 		    false, false, true),
3159 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3160 		    false, false, true),
3161 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3162 		    false, false, true),
3163 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3164 		    false, false, true),
3165 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3166 		    true, false, true),
3167 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3168 		    false, false, true),
3169 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3170 		    true, false, true),
3171 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3172 		    &vmw_cmd_update_gb_surface, true, false, true),
3173 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3174 		    &vmw_cmd_readback_gb_image, true, false, true),
3175 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3176 		    &vmw_cmd_readback_gb_surface, true, false, true),
3177 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3178 		    &vmw_cmd_invalidate_gb_image, true, false, true),
3179 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3180 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3181 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3182 		    false, false, true),
3183 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3184 		    false, false, true),
3185 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3186 		    false, false, true),
3187 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3188 		    false, false, true),
3189 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3190 		    false, false, true),
3191 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3192 		    false, false, true),
3193 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3194 		    true, false, true),
3195 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3196 		    false, false, true),
3197 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3198 		    false, false, false),
3199 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3200 		    true, false, true),
3201 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3202 		    true, false, true),
3203 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3204 		    true, false, true),
3205 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3206 		    true, false, true),
3207 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3208 		    false, false, true),
3209 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3210 		    false, false, true),
3211 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3212 		    false, false, true),
3213 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3214 		    false, false, true),
3215 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3216 		    false, false, true),
3217 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3218 		    false, false, true),
3219 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3220 		    false, false, true),
3221 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3222 		    false, false, true),
3223 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3224 		    false, false, true),
3225 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3226 		    false, false, true),
3227 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3228 		    true, false, true),
3229 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3230 		    false, false, true),
3231 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3232 		    false, false, true),
3233 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3234 		    false, false, true),
3235 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3236 		    false, false, true),
3237 
3238 	/*
3239 	 * DX commands
3240 	 */
3241 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3242 		    false, false, true),
3243 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3244 		    false, false, true),
3245 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3246 		    false, false, true),
3247 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3248 		    false, false, true),
3249 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3250 		    false, false, true),
3251 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3252 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3253 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3254 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3255 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3256 		    true, false, true),
3257 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3258 		    true, false, true),
3259 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3260 		    true, false, true),
3261 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3262 		    true, false, true),
3263 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3264 		    true, false, true),
3265 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3266 		    &vmw_cmd_dx_cid_check, true, false, true),
3267 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3268 		    true, false, true),
3269 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3270 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3271 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3272 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3273 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3274 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3275 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3276 		    true, false, true),
3277 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3278 		    &vmw_cmd_dx_cid_check, true, false, true),
3279 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3280 		    &vmw_cmd_dx_cid_check, true, false, true),
3281 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3282 		    true, false, true),
3283 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3284 		    true, false, true),
3285 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3286 		    true, false, true),
3287 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3288 		    &vmw_cmd_dx_cid_check, true, false, true),
3289 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3290 		    true, false, true),
3291 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3292 		    true, false, true),
3293 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3294 		    true, false, true),
3295 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3296 		    true, false, true),
3297 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3298 		    true, false, true),
3299 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3300 		    true, false, true),
3301 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3302 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3303 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3304 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3305 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3306 		    true, false, true),
3307 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3308 		    true, false, true),
3309 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3310 		    &vmw_cmd_dx_check_subresource, true, false, true),
3311 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3312 		    &vmw_cmd_dx_check_subresource, true, false, true),
3313 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3314 		    &vmw_cmd_dx_check_subresource, true, false, true),
3315 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3316 		    &vmw_cmd_dx_view_define, true, false, true),
3317 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3318 		    &vmw_cmd_dx_view_remove, true, false, true),
3319 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3320 		    &vmw_cmd_dx_view_define, true, false, true),
3321 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3322 		    &vmw_cmd_dx_view_remove, true, false, true),
3323 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3324 		    &vmw_cmd_dx_view_define, true, false, true),
3325 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3326 		    &vmw_cmd_dx_view_remove, true, false, true),
3327 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3328 		    &vmw_cmd_dx_so_define, true, false, true),
3329 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3330 		    &vmw_cmd_dx_cid_check, true, false, true),
3331 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3332 		    &vmw_cmd_dx_so_define, true, false, true),
3333 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3334 		    &vmw_cmd_dx_cid_check, true, false, true),
3335 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3336 		    &vmw_cmd_dx_so_define, true, false, true),
3337 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3338 		    &vmw_cmd_dx_cid_check, true, false, true),
3339 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3340 		    &vmw_cmd_dx_so_define, true, false, true),
3341 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3342 		    &vmw_cmd_dx_cid_check, true, false, true),
3343 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3344 		    &vmw_cmd_dx_so_define, true, false, true),
3345 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3346 		    &vmw_cmd_dx_cid_check, true, false, true),
3347 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3348 		    &vmw_cmd_dx_define_shader, true, false, true),
3349 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3350 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3351 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3352 		    &vmw_cmd_dx_bind_shader, true, false, true),
3353 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3354 		    &vmw_cmd_dx_so_define, true, false, true),
3355 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3356 		    &vmw_cmd_dx_cid_check, true, false, true),
3357 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3358 		    true, false, true),
3359 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3360 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3361 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3362 		    &vmw_cmd_dx_cid_check, true, false, true),
3363 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3364 		    &vmw_cmd_dx_cid_check, true, false, true),
3365 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3366 		    &vmw_cmd_buffer_copy_check, true, false, true),
3367 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3368 		    &vmw_cmd_pred_copy_check, true, false, true),
3369 };
3370 
3371 static int vmw_cmd_check(struct vmw_private *dev_priv,
3372 			 struct vmw_sw_context *sw_context,
3373 			 void *buf, uint32_t *size)
3374 {
3375 	uint32_t cmd_id;
3376 	uint32_t size_remaining = *size;
3377 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3378 	int ret;
3379 	const struct vmw_cmd_entry *entry;
3380 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3381 
3382 	cmd_id = ((uint32_t *)buf)[0];
3383 	/* Handle any none 3D commands */
3384 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3385 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3386 
3387 
3388 	cmd_id = header->id;
3389 	*size = header->size + sizeof(SVGA3dCmdHeader);
3390 
3391 	cmd_id -= SVGA_3D_CMD_BASE;
3392 	if (unlikely(*size > size_remaining))
3393 		goto out_invalid;
3394 
3395 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3396 		goto out_invalid;
3397 
3398 	entry = &vmw_cmd_entries[cmd_id];
3399 	if (unlikely(!entry->func))
3400 		goto out_invalid;
3401 
3402 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3403 		goto out_privileged;
3404 
3405 	if (unlikely(entry->gb_disable && gb))
3406 		goto out_old;
3407 
3408 	if (unlikely(entry->gb_enable && !gb))
3409 		goto out_new;
3410 
3411 	ret = entry->func(dev_priv, sw_context, header);
3412 	if (unlikely(ret != 0))
3413 		goto out_invalid;
3414 
3415 	return 0;
3416 out_invalid:
3417 	DRM_ERROR("Invalid SVGA3D command: %d\n",
3418 		  cmd_id + SVGA_3D_CMD_BASE);
3419 	return -EINVAL;
3420 out_privileged:
3421 	DRM_ERROR("Privileged SVGA3D command: %d\n",
3422 		  cmd_id + SVGA_3D_CMD_BASE);
3423 	return -EPERM;
3424 out_old:
3425 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3426 		  cmd_id + SVGA_3D_CMD_BASE);
3427 	return -EINVAL;
3428 out_new:
3429 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3430 		  cmd_id + SVGA_3D_CMD_BASE);
3431 	return -EINVAL;
3432 }
3433 
3434 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3435 			     struct vmw_sw_context *sw_context,
3436 			     void *buf,
3437 			     uint32_t size)
3438 {
3439 	int32_t cur_size = size;
3440 	int ret;
3441 
3442 	sw_context->buf_start = buf;
3443 
3444 	while (cur_size > 0) {
3445 		size = cur_size;
3446 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3447 		if (unlikely(ret != 0))
3448 			return ret;
3449 		buf = (void *)((unsigned long) buf + size);
3450 		cur_size -= size;
3451 	}
3452 
3453 	if (unlikely(cur_size != 0)) {
3454 		DRM_ERROR("Command verifier out of sync.\n");
3455 		return -EINVAL;
3456 	}
3457 
3458 	return 0;
3459 }
3460 
3461 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3462 {
3463 	sw_context->cur_reloc = 0;
3464 }
3465 
3466 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3467 {
3468 	uint32_t i;
3469 	struct vmw_relocation *reloc;
3470 	struct ttm_validate_buffer *validate;
3471 	struct ttm_buffer_object *bo;
3472 
3473 	for (i = 0; i < sw_context->cur_reloc; ++i) {
3474 		reloc = &sw_context->relocs[i];
3475 		validate = &sw_context->val_bufs[reloc->index].base;
3476 		bo = validate->bo;
3477 		switch (bo->mem.mem_type) {
3478 		case TTM_PL_VRAM:
3479 			reloc->location->offset += bo->offset;
3480 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3481 			break;
3482 		case VMW_PL_GMR:
3483 			reloc->location->gmrId = bo->mem.start;
3484 			break;
3485 		case VMW_PL_MOB:
3486 			*reloc->mob_loc = bo->mem.start;
3487 			break;
3488 		default:
3489 			BUG();
3490 		}
3491 	}
3492 	vmw_free_relocations(sw_context);
3493 }
3494 
3495 /**
3496  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3497  * all resources referenced by it.
3498  *
3499  * @list: The resource list.
3500  */
3501 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3502 					  struct list_head *list)
3503 {
3504 	struct vmw_resource_val_node *val, *val_next;
3505 
3506 	/*
3507 	 * Drop references to resources held during command submission.
3508 	 */
3509 
3510 	list_for_each_entry_safe(val, val_next, list, head) {
3511 		list_del_init(&val->head);
3512 		vmw_resource_unreference(&val->res);
3513 
3514 		if (val->staged_bindings) {
3515 			if (val->staged_bindings != sw_context->staged_bindings)
3516 				vmw_binding_state_free(val->staged_bindings);
3517 			else
3518 				sw_context->staged_bindings_inuse = false;
3519 			val->staged_bindings = NULL;
3520 		}
3521 
3522 		kfree(val);
3523 	}
3524 }
3525 
3526 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3527 {
3528 	struct vmw_validate_buffer *entry, *next;
3529 	struct vmw_resource_val_node *val;
3530 
3531 	/*
3532 	 * Drop references to DMA buffers held during command submission.
3533 	 */
3534 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3535 				 base.head) {
3536 		list_del(&entry->base.head);
3537 		ttm_bo_unref(&entry->base.bo);
3538 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3539 		sw_context->cur_val_buf--;
3540 	}
3541 	BUG_ON(sw_context->cur_val_buf != 0);
3542 
3543 	list_for_each_entry(val, &sw_context->resource_list, head)
3544 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3545 }
3546 
3547 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3548 			       struct ttm_buffer_object *bo,
3549 			       bool interruptible,
3550 			       bool validate_as_mob)
3551 {
3552 	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3553 						  base);
3554 	int ret;
3555 
3556 	if (vbo->pin_count > 0)
3557 		return 0;
3558 
3559 	if (validate_as_mob)
3560 		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3561 				       false);
3562 
3563 	/**
3564 	 * Put BO in VRAM if there is space, otherwise as a GMR.
3565 	 * If there is no space in VRAM and GMR ids are all used up,
3566 	 * start evicting GMRs to make room. If the DMA buffer can't be
3567 	 * used as a GMR, this will return -ENOMEM.
3568 	 */
3569 
3570 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3571 			      false);
3572 	if (likely(ret == 0 || ret == -ERESTARTSYS))
3573 		return ret;
3574 
3575 	/**
3576 	 * If that failed, try VRAM again, this time evicting
3577 	 * previous contents.
3578 	 */
3579 
3580 	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3581 	return ret;
3582 }
3583 
3584 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3585 				struct vmw_sw_context *sw_context)
3586 {
3587 	struct vmw_validate_buffer *entry;
3588 	int ret;
3589 
3590 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3591 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3592 						 true,
3593 						 entry->validate_as_mob);
3594 		if (unlikely(ret != 0))
3595 			return ret;
3596 	}
3597 	return 0;
3598 }
3599 
3600 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3601 				 uint32_t size)
3602 {
3603 	if (likely(sw_context->cmd_bounce_size >= size))
3604 		return 0;
3605 
3606 	if (sw_context->cmd_bounce_size == 0)
3607 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3608 
3609 	while (sw_context->cmd_bounce_size < size) {
3610 		sw_context->cmd_bounce_size =
3611 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3612 				   (sw_context->cmd_bounce_size >> 1));
3613 	}
3614 
3615 	if (sw_context->cmd_bounce != NULL)
3616 		vfree(sw_context->cmd_bounce);
3617 
3618 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3619 
3620 	if (sw_context->cmd_bounce == NULL) {
3621 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
3622 		sw_context->cmd_bounce_size = 0;
3623 		return -ENOMEM;
3624 	}
3625 
3626 	return 0;
3627 }
3628 
3629 /**
3630  * vmw_execbuf_fence_commands - create and submit a command stream fence
3631  *
3632  * Creates a fence object and submits a command stream marker.
3633  * If this fails for some reason, We sync the fifo and return NULL.
3634  * It is then safe to fence buffers with a NULL pointer.
3635  *
3636  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3637  * a userspace handle if @p_handle is not NULL, otherwise not.
3638  */
3639 
3640 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3641 			       struct vmw_private *dev_priv,
3642 			       struct vmw_fence_obj **p_fence,
3643 			       uint32_t *p_handle)
3644 {
3645 	uint32_t sequence;
3646 	int ret;
3647 	bool synced = false;
3648 
3649 	/* p_handle implies file_priv. */
3650 	BUG_ON(p_handle != NULL && file_priv == NULL);
3651 
3652 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3653 	if (unlikely(ret != 0)) {
3654 		DRM_ERROR("Fence submission error. Syncing.\n");
3655 		synced = true;
3656 	}
3657 
3658 	if (p_handle != NULL)
3659 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3660 					    sequence, p_fence, p_handle);
3661 	else
3662 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3663 
3664 	if (unlikely(ret != 0 && !synced)) {
3665 		(void) vmw_fallback_wait(dev_priv, false, false,
3666 					 sequence, false,
3667 					 VMW_FENCE_WAIT_TIMEOUT);
3668 		*p_fence = NULL;
3669 	}
3670 
3671 	return 0;
3672 }
3673 
3674 /**
3675  * vmw_execbuf_copy_fence_user - copy fence object information to
3676  * user-space.
3677  *
3678  * @dev_priv: Pointer to a vmw_private struct.
3679  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3680  * @ret: Return value from fence object creation.
3681  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3682  * which the information should be copied.
3683  * @fence: Pointer to the fenc object.
3684  * @fence_handle: User-space fence handle.
3685  *
3686  * This function copies fence information to user-space. If copying fails,
3687  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3688  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3689  * the error will hopefully be detected.
3690  * Also if copying fails, user-space will be unable to signal the fence
3691  * object so we wait for it immediately, and then unreference the
3692  * user-space reference.
3693  */
3694 void
3695 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3696 			    struct vmw_fpriv *vmw_fp,
3697 			    int ret,
3698 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3699 			    struct vmw_fence_obj *fence,
3700 			    uint32_t fence_handle)
3701 {
3702 	struct drm_vmw_fence_rep fence_rep;
3703 
3704 	if (user_fence_rep == NULL)
3705 		return;
3706 
3707 	memset(&fence_rep, 0, sizeof(fence_rep));
3708 
3709 	fence_rep.error = ret;
3710 	if (ret == 0) {
3711 		BUG_ON(fence == NULL);
3712 
3713 		fence_rep.handle = fence_handle;
3714 		fence_rep.seqno = fence->base.seqno;
3715 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3716 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3717 	}
3718 
3719 	/*
3720 	 * copy_to_user errors will be detected by user space not
3721 	 * seeing fence_rep::error filled in. Typically
3722 	 * user-space would have pre-set that member to -EFAULT.
3723 	 */
3724 	ret = copy_to_user(user_fence_rep, &fence_rep,
3725 			   sizeof(fence_rep));
3726 
3727 	/*
3728 	 * User-space lost the fence object. We need to sync
3729 	 * and unreference the handle.
3730 	 */
3731 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3732 		ttm_ref_object_base_unref(vmw_fp->tfile,
3733 					  fence_handle, TTM_REF_USAGE);
3734 		DRM_ERROR("Fence copy error. Syncing.\n");
3735 		(void) vmw_fence_obj_wait(fence, false, false,
3736 					  VMW_FENCE_WAIT_TIMEOUT);
3737 	}
3738 }
3739 
3740 /**
3741  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3742  * the fifo.
3743  *
3744  * @dev_priv: Pointer to a device private structure.
3745  * @kernel_commands: Pointer to the unpatched command batch.
3746  * @command_size: Size of the unpatched command batch.
3747  * @sw_context: Structure holding the relocation lists.
3748  *
3749  * Side effects: If this function returns 0, then the command batch
3750  * pointed to by @kernel_commands will have been modified.
3751  */
3752 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3753 				   void *kernel_commands,
3754 				   u32 command_size,
3755 				   struct vmw_sw_context *sw_context)
3756 {
3757 	void *cmd;
3758 
3759 	if (sw_context->dx_ctx_node)
3760 		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3761 					  sw_context->dx_ctx_node->res->id);
3762 	else
3763 		cmd = vmw_fifo_reserve(dev_priv, command_size);
3764 	if (!cmd) {
3765 		DRM_ERROR("Failed reserving fifo space for commands.\n");
3766 		return -ENOMEM;
3767 	}
3768 
3769 	vmw_apply_relocations(sw_context);
3770 	memcpy(cmd, kernel_commands, command_size);
3771 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3772 	vmw_resource_relocations_free(&sw_context->res_relocations);
3773 	vmw_fifo_commit(dev_priv, command_size);
3774 
3775 	return 0;
3776 }
3777 
3778 /**
3779  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3780  * the command buffer manager.
3781  *
3782  * @dev_priv: Pointer to a device private structure.
3783  * @header: Opaque handle to the command buffer allocation.
3784  * @command_size: Size of the unpatched command batch.
3785  * @sw_context: Structure holding the relocation lists.
3786  *
3787  * Side effects: If this function returns 0, then the command buffer
3788  * represented by @header will have been modified.
3789  */
3790 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3791 				     struct vmw_cmdbuf_header *header,
3792 				     u32 command_size,
3793 				     struct vmw_sw_context *sw_context)
3794 {
3795 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3796 		  SVGA3D_INVALID_ID);
3797 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3798 				       id, false, header);
3799 
3800 	vmw_apply_relocations(sw_context);
3801 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3802 	vmw_resource_relocations_free(&sw_context->res_relocations);
3803 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3804 
3805 	return 0;
3806 }
3807 
3808 /**
3809  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3810  * submission using a command buffer.
3811  *
3812  * @dev_priv: Pointer to a device private structure.
3813  * @user_commands: User-space pointer to the commands to be submitted.
3814  * @command_size: Size of the unpatched command batch.
3815  * @header: Out parameter returning the opaque pointer to the command buffer.
3816  *
3817  * This function checks whether we can use the command buffer manager for
3818  * submission and if so, creates a command buffer of suitable size and
3819  * copies the user data into that buffer.
3820  *
3821  * On successful return, the function returns a pointer to the data in the
3822  * command buffer and *@header is set to non-NULL.
3823  * If command buffers could not be used, the function will return the value
3824  * of @kernel_commands on function call. That value may be NULL. In that case,
3825  * the value of *@header will be set to NULL.
3826  * If an error is encountered, the function will return a pointer error value.
3827  * If the function is interrupted by a signal while sleeping, it will return
3828  * -ERESTARTSYS casted to a pointer error value.
3829  */
3830 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3831 				void __user *user_commands,
3832 				void *kernel_commands,
3833 				u32 command_size,
3834 				struct vmw_cmdbuf_header **header)
3835 {
3836 	size_t cmdbuf_size;
3837 	int ret;
3838 
3839 	*header = NULL;
3840 	if (command_size > SVGA_CB_MAX_SIZE) {
3841 		DRM_ERROR("Command buffer is too large.\n");
3842 		return ERR_PTR(-EINVAL);
3843 	}
3844 
3845 	if (!dev_priv->cman || kernel_commands)
3846 		return kernel_commands;
3847 
3848 	/* If possible, add a little space for fencing. */
3849 	cmdbuf_size = command_size + 512;
3850 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3851 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3852 					   true, header);
3853 	if (IS_ERR(kernel_commands))
3854 		return kernel_commands;
3855 
3856 	ret = copy_from_user(kernel_commands, user_commands,
3857 			     command_size);
3858 	if (ret) {
3859 		DRM_ERROR("Failed copying commands.\n");
3860 		vmw_cmdbuf_header_free(*header);
3861 		*header = NULL;
3862 		return ERR_PTR(-EFAULT);
3863 	}
3864 
3865 	return kernel_commands;
3866 }
3867 
3868 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3869 				   struct vmw_sw_context *sw_context,
3870 				   uint32_t handle)
3871 {
3872 	struct vmw_resource_val_node *ctx_node;
3873 	struct vmw_resource *res;
3874 	int ret;
3875 
3876 	if (handle == SVGA3D_INVALID_ID)
3877 		return 0;
3878 
3879 	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3880 					      handle, user_context_converter,
3881 					      &res);
3882 	if (unlikely(ret != 0)) {
3883 		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3884 			  (unsigned) handle);
3885 		return ret;
3886 	}
3887 
3888 	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3889 	if (unlikely(ret != 0))
3890 		goto out_err;
3891 
3892 	sw_context->dx_ctx_node = ctx_node;
3893 	sw_context->man = vmw_context_res_man(res);
3894 out_err:
3895 	vmw_resource_unreference(&res);
3896 	return ret;
3897 }
3898 
3899 int vmw_execbuf_process(struct drm_file *file_priv,
3900 			struct vmw_private *dev_priv,
3901 			void __user *user_commands,
3902 			void *kernel_commands,
3903 			uint32_t command_size,
3904 			uint64_t throttle_us,
3905 			uint32_t dx_context_handle,
3906 			struct drm_vmw_fence_rep __user *user_fence_rep,
3907 			struct vmw_fence_obj **out_fence)
3908 {
3909 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
3910 	struct vmw_fence_obj *fence = NULL;
3911 	struct vmw_resource *error_resource;
3912 	struct list_head resource_list;
3913 	struct vmw_cmdbuf_header *header;
3914 	struct ww_acquire_ctx ticket;
3915 	uint32_t handle;
3916 	int ret;
3917 
3918 	if (throttle_us) {
3919 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3920 				   throttle_us);
3921 
3922 		if (ret)
3923 			return ret;
3924 	}
3925 
3926 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3927 					     kernel_commands, command_size,
3928 					     &header);
3929 	if (IS_ERR(kernel_commands))
3930 		return PTR_ERR(kernel_commands);
3931 
3932 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3933 	if (ret) {
3934 		ret = -ERESTARTSYS;
3935 		goto out_free_header;
3936 	}
3937 
3938 	sw_context->kernel = false;
3939 	if (kernel_commands == NULL) {
3940 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
3941 		if (unlikely(ret != 0))
3942 			goto out_unlock;
3943 
3944 
3945 		ret = copy_from_user(sw_context->cmd_bounce,
3946 				     user_commands, command_size);
3947 
3948 		if (unlikely(ret != 0)) {
3949 			ret = -EFAULT;
3950 			DRM_ERROR("Failed copying commands.\n");
3951 			goto out_unlock;
3952 		}
3953 		kernel_commands = sw_context->cmd_bounce;
3954 	} else if (!header)
3955 		sw_context->kernel = true;
3956 
3957 	sw_context->fp = vmw_fpriv(file_priv);
3958 	sw_context->cur_reloc = 0;
3959 	sw_context->cur_val_buf = 0;
3960 	INIT_LIST_HEAD(&sw_context->resource_list);
3961 	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3962 	sw_context->cur_query_bo = dev_priv->pinned_bo;
3963 	sw_context->last_query_ctx = NULL;
3964 	sw_context->needs_post_query_barrier = false;
3965 	sw_context->dx_ctx_node = NULL;
3966 	sw_context->dx_query_mob = NULL;
3967 	sw_context->dx_query_ctx = NULL;
3968 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3969 	INIT_LIST_HEAD(&sw_context->validate_nodes);
3970 	INIT_LIST_HEAD(&sw_context->res_relocations);
3971 	if (sw_context->staged_bindings)
3972 		vmw_binding_state_reset(sw_context->staged_bindings);
3973 
3974 	if (!sw_context->res_ht_initialized) {
3975 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3976 		if (unlikely(ret != 0))
3977 			goto out_unlock;
3978 		sw_context->res_ht_initialized = true;
3979 	}
3980 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3981 	INIT_LIST_HEAD(&resource_list);
3982 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3983 	if (unlikely(ret != 0)) {
3984 		list_splice_init(&sw_context->ctx_resource_list,
3985 				 &sw_context->resource_list);
3986 		goto out_err_nores;
3987 	}
3988 
3989 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3990 				command_size);
3991 	/*
3992 	 * Merge the resource lists before checking the return status
3993 	 * from vmd_cmd_check_all so that all the open hashtabs will
3994 	 * be handled properly even if vmw_cmd_check_all fails.
3995 	 */
3996 	list_splice_init(&sw_context->ctx_resource_list,
3997 			 &sw_context->resource_list);
3998 
3999 	if (unlikely(ret != 0))
4000 		goto out_err_nores;
4001 
4002 	ret = vmw_resources_reserve(sw_context);
4003 	if (unlikely(ret != 0))
4004 		goto out_err_nores;
4005 
4006 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4007 				     true, NULL);
4008 	if (unlikely(ret != 0))
4009 		goto out_err_nores;
4010 
4011 	ret = vmw_validate_buffers(dev_priv, sw_context);
4012 	if (unlikely(ret != 0))
4013 		goto out_err;
4014 
4015 	ret = vmw_resources_validate(sw_context);
4016 	if (unlikely(ret != 0))
4017 		goto out_err;
4018 
4019 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4020 	if (unlikely(ret != 0)) {
4021 		ret = -ERESTARTSYS;
4022 		goto out_err;
4023 	}
4024 
4025 	if (dev_priv->has_mob) {
4026 		ret = vmw_rebind_contexts(sw_context);
4027 		if (unlikely(ret != 0))
4028 			goto out_unlock_binding;
4029 	}
4030 
4031 	if (!header) {
4032 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4033 					      command_size, sw_context);
4034 	} else {
4035 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4036 						sw_context);
4037 		header = NULL;
4038 	}
4039 	mutex_unlock(&dev_priv->binding_mutex);
4040 	if (ret)
4041 		goto out_err;
4042 
4043 	vmw_query_bo_switch_commit(dev_priv, sw_context);
4044 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4045 					 &fence,
4046 					 (user_fence_rep) ? &handle : NULL);
4047 	/*
4048 	 * This error is harmless, because if fence submission fails,
4049 	 * vmw_fifo_send_fence will sync. The error will be propagated to
4050 	 * user-space in @fence_rep
4051 	 */
4052 
4053 	if (ret != 0)
4054 		DRM_ERROR("Fence submission error. Syncing.\n");
4055 
4056 	vmw_resources_unreserve(sw_context, false);
4057 
4058 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4059 				    (void *) fence);
4060 
4061 	if (unlikely(dev_priv->pinned_bo != NULL &&
4062 		     !dev_priv->query_cid_valid))
4063 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4064 
4065 	vmw_clear_validations(sw_context);
4066 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4067 				    user_fence_rep, fence, handle);
4068 
4069 	/* Don't unreference when handing fence out */
4070 	if (unlikely(out_fence != NULL)) {
4071 		*out_fence = fence;
4072 		fence = NULL;
4073 	} else if (likely(fence != NULL)) {
4074 		vmw_fence_obj_unreference(&fence);
4075 	}
4076 
4077 	list_splice_init(&sw_context->resource_list, &resource_list);
4078 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4079 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4080 
4081 	/*
4082 	 * Unreference resources outside of the cmdbuf_mutex to
4083 	 * avoid deadlocks in resource destruction paths.
4084 	 */
4085 	vmw_resource_list_unreference(sw_context, &resource_list);
4086 
4087 	return 0;
4088 
4089 out_unlock_binding:
4090 	mutex_unlock(&dev_priv->binding_mutex);
4091 out_err:
4092 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4093 out_err_nores:
4094 	vmw_resources_unreserve(sw_context, true);
4095 	vmw_resource_relocations_free(&sw_context->res_relocations);
4096 	vmw_free_relocations(sw_context);
4097 	vmw_clear_validations(sw_context);
4098 	if (unlikely(dev_priv->pinned_bo != NULL &&
4099 		     !dev_priv->query_cid_valid))
4100 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4101 out_unlock:
4102 	list_splice_init(&sw_context->resource_list, &resource_list);
4103 	error_resource = sw_context->error_resource;
4104 	sw_context->error_resource = NULL;
4105 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4106 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4107 
4108 	/*
4109 	 * Unreference resources outside of the cmdbuf_mutex to
4110 	 * avoid deadlocks in resource destruction paths.
4111 	 */
4112 	vmw_resource_list_unreference(sw_context, &resource_list);
4113 	if (unlikely(error_resource != NULL))
4114 		vmw_resource_unreference(&error_resource);
4115 out_free_header:
4116 	if (header)
4117 		vmw_cmdbuf_header_free(header);
4118 
4119 	return ret;
4120 }
4121 
4122 /**
4123  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4124  *
4125  * @dev_priv: The device private structure.
4126  *
4127  * This function is called to idle the fifo and unpin the query buffer
4128  * if the normal way to do this hits an error, which should typically be
4129  * extremely rare.
4130  */
4131 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4132 {
4133 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4134 
4135 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4136 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4137 	if (dev_priv->dummy_query_bo_pinned) {
4138 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4139 		dev_priv->dummy_query_bo_pinned = false;
4140 	}
4141 }
4142 
4143 
4144 /**
4145  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4146  * query bo.
4147  *
4148  * @dev_priv: The device private structure.
4149  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4150  * _after_ a query barrier that flushes all queries touching the current
4151  * buffer pointed to by @dev_priv->pinned_bo
4152  *
4153  * This function should be used to unpin the pinned query bo, or
4154  * as a query barrier when we need to make sure that all queries have
4155  * finished before the next fifo command. (For example on hardware
4156  * context destructions where the hardware may otherwise leak unfinished
4157  * queries).
4158  *
4159  * This function does not return any failure codes, but make attempts
4160  * to do safe unpinning in case of errors.
4161  *
4162  * The function will synchronize on the previous query barrier, and will
4163  * thus not finish until that barrier has executed.
4164  *
4165  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4166  * before calling this function.
4167  */
4168 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4169 				     struct vmw_fence_obj *fence)
4170 {
4171 	int ret = 0;
4172 	struct list_head validate_list;
4173 	struct ttm_validate_buffer pinned_val, query_val;
4174 	struct vmw_fence_obj *lfence = NULL;
4175 	struct ww_acquire_ctx ticket;
4176 
4177 	if (dev_priv->pinned_bo == NULL)
4178 		goto out_unlock;
4179 
4180 	INIT_LIST_HEAD(&validate_list);
4181 
4182 	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4183 	pinned_val.shared = false;
4184 	list_add_tail(&pinned_val.head, &validate_list);
4185 
4186 	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4187 	query_val.shared = false;
4188 	list_add_tail(&query_val.head, &validate_list);
4189 
4190 	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4191 				     false, NULL);
4192 	if (unlikely(ret != 0)) {
4193 		vmw_execbuf_unpin_panic(dev_priv);
4194 		goto out_no_reserve;
4195 	}
4196 
4197 	if (dev_priv->query_cid_valid) {
4198 		BUG_ON(fence != NULL);
4199 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4200 		if (unlikely(ret != 0)) {
4201 			vmw_execbuf_unpin_panic(dev_priv);
4202 			goto out_no_emit;
4203 		}
4204 		dev_priv->query_cid_valid = false;
4205 	}
4206 
4207 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4208 	if (dev_priv->dummy_query_bo_pinned) {
4209 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4210 		dev_priv->dummy_query_bo_pinned = false;
4211 	}
4212 	if (fence == NULL) {
4213 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4214 						  NULL);
4215 		fence = lfence;
4216 	}
4217 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4218 	if (lfence != NULL)
4219 		vmw_fence_obj_unreference(&lfence);
4220 
4221 	ttm_bo_unref(&query_val.bo);
4222 	ttm_bo_unref(&pinned_val.bo);
4223 	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4224 	DRM_INFO("Dummy query bo pin count: %d\n",
4225 		 dev_priv->dummy_query_bo->pin_count);
4226 
4227 out_unlock:
4228 	return;
4229 
4230 out_no_emit:
4231 	ttm_eu_backoff_reservation(&ticket, &validate_list);
4232 out_no_reserve:
4233 	ttm_bo_unref(&query_val.bo);
4234 	ttm_bo_unref(&pinned_val.bo);
4235 	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4236 }
4237 
4238 /**
4239  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4240  * query bo.
4241  *
4242  * @dev_priv: The device private structure.
4243  *
4244  * This function should be used to unpin the pinned query bo, or
4245  * as a query barrier when we need to make sure that all queries have
4246  * finished before the next fifo command. (For example on hardware
4247  * context destructions where the hardware may otherwise leak unfinished
4248  * queries).
4249  *
4250  * This function does not return any failure codes, but make attempts
4251  * to do safe unpinning in case of errors.
4252  *
4253  * The function will synchronize on the previous query barrier, and will
4254  * thus not finish until that barrier has executed.
4255  */
4256 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4257 {
4258 	mutex_lock(&dev_priv->cmdbuf_mutex);
4259 	if (dev_priv->query_cid_valid)
4260 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4261 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4262 }
4263 
4264 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4265 		      struct drm_file *file_priv, size_t size)
4266 {
4267 	struct vmw_private *dev_priv = vmw_priv(dev);
4268 	struct drm_vmw_execbuf_arg arg;
4269 	int ret;
4270 	static const size_t copy_offset[] = {
4271 		offsetof(struct drm_vmw_execbuf_arg, context_handle),
4272 		sizeof(struct drm_vmw_execbuf_arg)};
4273 
4274 	if (unlikely(size < copy_offset[0])) {
4275 		DRM_ERROR("Invalid command size, ioctl %d\n",
4276 			  DRM_VMW_EXECBUF);
4277 		return -EINVAL;
4278 	}
4279 
4280 	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4281 		return -EFAULT;
4282 
4283 	/*
4284 	 * Extend the ioctl argument while
4285 	 * maintaining backwards compatibility:
4286 	 * We take different code paths depending on the value of
4287 	 * arg.version.
4288 	 */
4289 
4290 	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4291 		     arg.version == 0)) {
4292 		DRM_ERROR("Incorrect execbuf version.\n");
4293 		return -EINVAL;
4294 	}
4295 
4296 	if (arg.version > 1 &&
4297 	    copy_from_user(&arg.context_handle,
4298 			   (void __user *) (data + copy_offset[0]),
4299 			   copy_offset[arg.version - 1] -
4300 			   copy_offset[0]) != 0)
4301 		return -EFAULT;
4302 
4303 	switch (arg.version) {
4304 	case 1:
4305 		arg.context_handle = (uint32_t) -1;
4306 		break;
4307 	case 2:
4308 		if (arg.pad64 != 0) {
4309 			DRM_ERROR("Unused IOCTL data not set to zero.\n");
4310 			return -EINVAL;
4311 		}
4312 		break;
4313 	default:
4314 		break;
4315 	}
4316 
4317 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4318 	if (unlikely(ret != 0))
4319 		return ret;
4320 
4321 	ret = vmw_execbuf_process(file_priv, dev_priv,
4322 				  (void __user *)(unsigned long)arg.commands,
4323 				  NULL, arg.command_size, arg.throttle_us,
4324 				  arg.context_handle,
4325 				  (void __user *)(unsigned long)arg.fence_rep,
4326 				  NULL);
4327 	ttm_read_unlock(&dev_priv->reservation_sem);
4328 	if (unlikely(ret != 0))
4329 		return ret;
4330 
4331 	vmw_kms_cursor_post_execbuf(dev_priv);
4332 
4333 	return 0;
4334 }
4335