xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: vmwgfx_cmdbuf.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_cmdbuf.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
32 
33 #include "vmwgfx_drv.h"
34 #include "ttm/ttm_bo_api.h"
35 
36 /*
37  * Size of inline command buffers. Try to make sure that a page size is a
38  * multiple of the DMA pool allocation size.
39  */
40 #define VMW_CMDBUF_INLINE_ALIGN 64
41 #define VMW_CMDBUF_INLINE_SIZE \
42 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
43 
44 /**
45  * struct vmw_cmdbuf_context - Command buffer context queues
46  *
47  * @submitted: List of command buffers that have been submitted to the
48  * manager but not yet submitted to hardware.
49  * @hw_submitted: List of command buffers submitted to hardware.
50  * @preempted: List of preempted command buffers.
51  * @num_hw_submitted: Number of buffers currently being processed by hardware
52  */
53 struct vmw_cmdbuf_context {
54 	struct list_head submitted;
55 	struct list_head hw_submitted;
56 	struct list_head preempted;
57 	unsigned num_hw_submitted;
58 };
59 
60 /**
61  * struct vmw_cmdbuf_man: - Command buffer manager
62  *
63  * @cur_mutex: Mutex protecting the command buffer used for incremental small
64  * kernel command submissions, @cur.
65  * @space_mutex: Mutex to protect against starvation when we allocate
66  * main pool buffer space.
67  * @work: A struct work_struct implementeing command buffer error handling.
68  * Immutable.
69  * @dev_priv: Pointer to the device private struct. Immutable.
70  * @ctx: Array of command buffer context queues. The queues and the context
71  * data is protected by @lock.
72  * @error: List of command buffers that have caused device errors.
73  * Protected by @lock.
74  * @mm: Range manager for the command buffer space. Manager allocations and
75  * frees are protected by @lock.
76  * @cmd_space: Buffer object for the command buffer space, unless we were
77  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
78  * @map_obj: Mapping state for @cmd_space. Immutable.
79  * @map: Pointer to command buffer space. May be a mapped buffer object or
80  * a contigous coherent DMA memory allocation. Immutable.
81  * @cur: Command buffer for small kernel command submissions. Protected by
82  * the @cur_mutex.
83  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
84  * @default_size: Default size for the @cur command buffer. Immutable.
85  * @max_hw_submitted: Max number of in-flight command buffers the device can
86  * handle. Immutable.
87  * @lock: Spinlock protecting command submission queues.
88  * @header: Pool of DMA memory for device command buffer headers.
89  * Internal protection.
90  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
91  * space for inline data. Internal protection.
92  * @tasklet: Tasklet struct for irq processing. Immutable.
93  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
94  * space.
95  * @idle_queue: Wait queue for processes waiting for command buffer idle.
96  * @irq_on: Whether the process function has requested irq to be turned on.
97  * Protected by @lock.
98  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
99  * allocation. Immutable.
100  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
101  * Typically this is false only during bootstrap.
102  * @handle: DMA address handle for the command buffer space if @using_mob is
103  * false. Immutable.
104  * @size: The size of the command buffer space. Immutable.
105  */
106 struct vmw_cmdbuf_man {
107 	struct mutex cur_mutex;
108 	struct mutex space_mutex;
109 	struct work_struct work;
110 	struct vmw_private *dev_priv;
111 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
112 	struct list_head error;
113 	struct drm_mm mm;
114 	struct ttm_buffer_object *cmd_space;
115 	struct ttm_bo_kmap_obj map_obj;
116 	u8 *map;
117 	struct vmw_cmdbuf_header *cur;
118 	size_t cur_pos;
119 	size_t default_size;
120 	unsigned max_hw_submitted;
121 	spinlock_t lock;
122 	struct dma_pool *headers;
123 	struct dma_pool *dheaders;
124 	struct tasklet_struct tasklet;
125 	wait_queue_head_t alloc_queue;
126 	wait_queue_head_t idle_queue;
127 	bool irq_on;
128 	bool using_mob;
129 	bool has_pool;
130 	dma_addr_t handle;
131 	size_t size;
132 };
133 
134 /**
135  * struct vmw_cmdbuf_header - Command buffer metadata
136  *
137  * @man: The command buffer manager.
138  * @cb_header: Device command buffer header, allocated from a DMA pool.
139  * @cb_context: The device command buffer context.
140  * @list: List head for attaching to the manager lists.
141  * @node: The range manager node.
142  * @handle. The DMA address of @cb_header. Handed to the device on command
143  * buffer submission.
144  * @cmd: Pointer to the command buffer space of this buffer.
145  * @size: Size of the command buffer space of this buffer.
146  * @reserved: Reserved space of this buffer.
147  * @inline_space: Whether inline command buffer space is used.
148  */
149 struct vmw_cmdbuf_header {
150 	struct vmw_cmdbuf_man *man;
151 	SVGACBHeader *cb_header;
152 	SVGACBContext cb_context;
153 	struct list_head list;
154 	struct drm_mm_node node;
155 	dma_addr_t handle;
156 	u8 *cmd;
157 	size_t size;
158 	size_t reserved;
159 	bool inline_space;
160 };
161 
162 /**
163  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
164  * command buffer space.
165  *
166  * @cb_header: Device command buffer header.
167  * @cmd: Inline command buffer space.
168  */
169 struct vmw_cmdbuf_dheader {
170 	SVGACBHeader cb_header;
171 	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
172 };
173 
174 /**
175  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
176  *
177  * @page_size: Size of requested command buffer space in pages.
178  * @node: Pointer to the range manager node.
179  * @done: True if this allocation has succeeded.
180  */
181 struct vmw_cmdbuf_alloc_info {
182 	size_t page_size;
183 	struct drm_mm_node *node;
184 	bool done;
185 };
186 
187 /* Loop over each context in the command buffer manager. */
188 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
189 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
190 	     ++(_i), ++(_ctx))
191 
192 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
193 
194 
195 /**
196  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
197  *
198  * @man: The range manager.
199  * @interruptible: Whether to wait interruptible when locking.
200  */
201 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
202 {
203 	if (interruptible) {
204 		if (mutex_lock_interruptible(&man->cur_mutex))
205 			return -ERESTARTSYS;
206 	} else {
207 		mutex_lock(&man->cur_mutex);
208 	}
209 
210 	return 0;
211 }
212 
213 /**
214  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
215  *
216  * @man: The range manager.
217  */
218 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
219 {
220 	mutex_unlock(&man->cur_mutex);
221 }
222 
223 /**
224  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
225  * been used for the device context with inline command buffers.
226  * Need not be called locked.
227  *
228  * @header: Pointer to the header to free.
229  */
230 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
231 {
232 	struct vmw_cmdbuf_dheader *dheader;
233 
234 	if (WARN_ON_ONCE(!header->inline_space))
235 		return;
236 
237 	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
238 			       cb_header);
239 	dma_pool_free(header->man->dheaders, dheader, header->handle);
240 	kfree(header);
241 }
242 
243 /**
244  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
245  * associated structures.
246  *
247  * header: Pointer to the header to free.
248  *
249  * For internal use. Must be called with man::lock held.
250  */
251 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
252 {
253 	struct vmw_cmdbuf_man *man = header->man;
254 
255 	lockdep_assert_held_once(&man->lock);
256 
257 	if (header->inline_space) {
258 		vmw_cmdbuf_header_inline_free(header);
259 		return;
260 	}
261 
262 	drm_mm_remove_node(&header->node);
263 	wake_up_all(&man->alloc_queue);
264 	if (header->cb_header)
265 		dma_pool_free(man->headers, header->cb_header,
266 			      header->handle);
267 	kfree(header);
268 }
269 
270 /**
271  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
272  * associated structures.
273  *
274  * @header: Pointer to the header to free.
275  */
276 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
277 {
278 	struct vmw_cmdbuf_man *man = header->man;
279 
280 	/* Avoid locking if inline_space */
281 	if (header->inline_space) {
282 		vmw_cmdbuf_header_inline_free(header);
283 		return;
284 	}
285 	spin_lock_bh(&man->lock);
286 	__vmw_cmdbuf_header_free(header);
287 	spin_unlock_bh(&man->lock);
288 }
289 
290 
291 /**
292  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
293  *
294  * @header: The header of the buffer to submit.
295  */
296 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
297 {
298 	struct vmw_cmdbuf_man *man = header->man;
299 	u32 val;
300 
301 	val = upper_32_bits(header->handle);
302 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
303 
304 	val = lower_32_bits(header->handle);
305 	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
306 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
307 
308 	return header->cb_header->status;
309 }
310 
311 /**
312  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
313  *
314  * @ctx: The command buffer context to initialize
315  */
316 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
317 {
318 	INIT_LIST_HEAD(&ctx->hw_submitted);
319 	INIT_LIST_HEAD(&ctx->submitted);
320 	INIT_LIST_HEAD(&ctx->preempted);
321 	ctx->num_hw_submitted = 0;
322 }
323 
324 /**
325  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
326  * context.
327  *
328  * @man: The command buffer manager.
329  * @ctx: The command buffer context.
330  *
331  * Submits command buffers to hardware until there are no more command
332  * buffers to submit or the hardware can't handle more command buffers.
333  */
334 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
335 				  struct vmw_cmdbuf_context *ctx)
336 {
337 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
338 	      !list_empty(&ctx->submitted)) {
339 		struct vmw_cmdbuf_header *entry;
340 		SVGACBStatus status;
341 
342 		entry = list_first_entry(&ctx->submitted,
343 					 struct vmw_cmdbuf_header,
344 					 list);
345 
346 		status = vmw_cmdbuf_header_submit(entry);
347 
348 		/* This should never happen */
349 		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
350 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
351 			break;
352 		}
353 
354 		list_del(&entry->list);
355 		list_add_tail(&entry->list, &ctx->hw_submitted);
356 		ctx->num_hw_submitted++;
357 	}
358 
359 }
360 
361 /**
362  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
363  *
364  * @man: The command buffer manager.
365  * @ctx: The command buffer context.
366  *
367  * Submit command buffers to hardware if possible, and process finished
368  * buffers. Typically freeing them, but on preemption or error take
369  * appropriate action. Wake up waiters if appropriate.
370  */
371 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
372 				   struct vmw_cmdbuf_context *ctx,
373 				   int *notempty)
374 {
375 	struct vmw_cmdbuf_header *entry, *next;
376 
377 	vmw_cmdbuf_ctx_submit(man, ctx);
378 
379 	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
380 		SVGACBStatus status = entry->cb_header->status;
381 
382 		if (status == SVGA_CB_STATUS_NONE)
383 			break;
384 
385 		list_del(&entry->list);
386 		wake_up_all(&man->idle_queue);
387 		ctx->num_hw_submitted--;
388 		switch (status) {
389 		case SVGA_CB_STATUS_COMPLETED:
390 			__vmw_cmdbuf_header_free(entry);
391 			break;
392 		case SVGA_CB_STATUS_COMMAND_ERROR:
393 		case SVGA_CB_STATUS_CB_HEADER_ERROR:
394 			list_add_tail(&entry->list, &man->error);
395 			schedule_work(&man->work);
396 			break;
397 		case SVGA_CB_STATUS_PREEMPTED:
398 			list_add(&entry->list, &ctx->preempted);
399 			break;
400 		default:
401 			WARN_ONCE(true, "Undefined command buffer status.\n");
402 			__vmw_cmdbuf_header_free(entry);
403 			break;
404 		}
405 	}
406 
407 	vmw_cmdbuf_ctx_submit(man, ctx);
408 	if (!list_empty(&ctx->submitted))
409 		(*notempty)++;
410 }
411 
412 /**
413  * vmw_cmdbuf_man_process - Process all command buffer contexts and
414  * switch on and off irqs as appropriate.
415  *
416  * @man: The command buffer manager.
417  *
418  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
419  * command buffers left that are not submitted to hardware, Make sure
420  * IRQ handling is turned on. Otherwise, make sure it's turned off.
421  */
422 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
423 {
424 	int notempty;
425 	struct vmw_cmdbuf_context *ctx;
426 	int i;
427 
428 retry:
429 	notempty = 0;
430 	for_each_cmdbuf_ctx(man, i, ctx)
431 		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
432 
433 	if (man->irq_on && !notempty) {
434 		vmw_generic_waiter_remove(man->dev_priv,
435 					  SVGA_IRQFLAG_COMMAND_BUFFER,
436 					  &man->dev_priv->cmdbuf_waiters);
437 		man->irq_on = false;
438 	} else if (!man->irq_on && notempty) {
439 		vmw_generic_waiter_add(man->dev_priv,
440 				       SVGA_IRQFLAG_COMMAND_BUFFER,
441 				       &man->dev_priv->cmdbuf_waiters);
442 		man->irq_on = true;
443 
444 		/* Rerun in case we just missed an irq. */
445 		goto retry;
446 	}
447 }
448 
449 /**
450  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
451  * command buffer context
452  *
453  * @man: The command buffer manager.
454  * @header: The header of the buffer to submit.
455  * @cb_context: The command buffer context to use.
456  *
457  * This function adds @header to the "submitted" queue of the command
458  * buffer context identified by @cb_context. It then calls the command buffer
459  * manager processing to potentially submit the buffer to hardware.
460  * @man->lock needs to be held when calling this function.
461  */
462 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
463 			       struct vmw_cmdbuf_header *header,
464 			       SVGACBContext cb_context)
465 {
466 	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
467 		header->cb_header->dxContext = 0;
468 	header->cb_context = cb_context;
469 	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
470 
471 	vmw_cmdbuf_man_process(man);
472 }
473 
474 /**
475  * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
476  * handler implemented as a tasklet.
477  *
478  * @data: Tasklet closure. A pointer to the command buffer manager cast to
479  * an unsigned long.
480  *
481  * The bottom half (tasklet) of the interrupt handler simply calls into the
482  * command buffer processor to free finished buffers and submit any
483  * queued buffers to hardware.
484  */
485 static void vmw_cmdbuf_man_tasklet(unsigned long data)
486 {
487 	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
488 
489 	spin_lock(&man->lock);
490 	vmw_cmdbuf_man_process(man);
491 	spin_unlock(&man->lock);
492 }
493 
494 /**
495  * vmw_cmdbuf_work_func - The deferred work function that handles
496  * command buffer errors.
497  *
498  * @work: The work func closure argument.
499  *
500  * Restarting the command buffer context after an error requires process
501  * context, so it is deferred to this work function.
502  */
503 static void vmw_cmdbuf_work_func(struct work_struct *work)
504 {
505 	struct vmw_cmdbuf_man *man =
506 		container_of(work, struct vmw_cmdbuf_man, work);
507 	struct vmw_cmdbuf_header *entry, *next;
508 	uint32_t dummy;
509 	bool restart = false;
510 
511 	spin_lock_bh(&man->lock);
512 	list_for_each_entry_safe(entry, next, &man->error, list) {
513 		restart = true;
514 		DRM_ERROR("Command buffer error.\n");
515 
516 		list_del(&entry->list);
517 		__vmw_cmdbuf_header_free(entry);
518 		wake_up_all(&man->idle_queue);
519 	}
520 	spin_unlock_bh(&man->lock);
521 
522 	if (restart && vmw_cmdbuf_startstop(man, true))
523 		DRM_ERROR("Failed restarting command buffer context 0.\n");
524 
525 	/* Send a new fence in case one was removed */
526 	vmw_fifo_send_fence(man->dev_priv, &dummy);
527 }
528 
529 /**
530  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
531  *
532  * @man: The command buffer manager.
533  * @check_preempted: Check also the preempted queue for pending command buffers.
534  *
535  */
536 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
537 				bool check_preempted)
538 {
539 	struct vmw_cmdbuf_context *ctx;
540 	bool idle = false;
541 	int i;
542 
543 	spin_lock_bh(&man->lock);
544 	vmw_cmdbuf_man_process(man);
545 	for_each_cmdbuf_ctx(man, i, ctx) {
546 		if (!list_empty(&ctx->submitted) ||
547 		    !list_empty(&ctx->hw_submitted) ||
548 		    (check_preempted && !list_empty(&ctx->preempted)))
549 			goto out_unlock;
550 	}
551 
552 	idle = list_empty(&man->error);
553 
554 out_unlock:
555 	spin_unlock_bh(&man->lock);
556 
557 	return idle;
558 }
559 
560 /**
561  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
562  * command submissions
563  *
564  * @man: The command buffer manager.
565  *
566  * Flushes the current command buffer without allocating a new one. A new one
567  * is automatically allocated when needed. Call with @man->cur_mutex held.
568  */
569 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
570 {
571 	struct vmw_cmdbuf_header *cur = man->cur;
572 
573 	WARN_ON(!mutex_is_locked(&man->cur_mutex));
574 
575 	if (!cur)
576 		return;
577 
578 	spin_lock_bh(&man->lock);
579 	if (man->cur_pos == 0) {
580 		__vmw_cmdbuf_header_free(cur);
581 		goto out_unlock;
582 	}
583 
584 	man->cur->cb_header->length = man->cur_pos;
585 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
586 out_unlock:
587 	spin_unlock_bh(&man->lock);
588 	man->cur = NULL;
589 	man->cur_pos = 0;
590 }
591 
592 /**
593  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
594  * command submissions
595  *
596  * @man: The command buffer manager.
597  * @interruptible: Whether to sleep interruptible when sleeping.
598  *
599  * Flushes the current command buffer without allocating a new one. A new one
600  * is automatically allocated when needed.
601  */
602 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
603 			 bool interruptible)
604 {
605 	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
606 
607 	if (ret)
608 		return ret;
609 
610 	__vmw_cmdbuf_cur_flush(man);
611 	vmw_cmdbuf_cur_unlock(man);
612 
613 	return 0;
614 }
615 
616 /**
617  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
618  *
619  * @man: The command buffer manager.
620  * @interruptible: Sleep interruptible while waiting.
621  * @timeout: Time out after this many ticks.
622  *
623  * Wait until the command buffer manager has processed all command buffers,
624  * or until a timeout occurs. If a timeout occurs, the function will return
625  * -EBUSY.
626  */
627 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
628 		    unsigned long timeout)
629 {
630 	int ret;
631 
632 	ret = vmw_cmdbuf_cur_flush(man, interruptible);
633 	vmw_generic_waiter_add(man->dev_priv,
634 			       SVGA_IRQFLAG_COMMAND_BUFFER,
635 			       &man->dev_priv->cmdbuf_waiters);
636 
637 	if (interruptible) {
638 		ret = wait_event_interruptible_timeout
639 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
640 			 timeout);
641 	} else {
642 		ret = wait_event_timeout
643 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
644 			 timeout);
645 	}
646 	vmw_generic_waiter_remove(man->dev_priv,
647 				  SVGA_IRQFLAG_COMMAND_BUFFER,
648 				  &man->dev_priv->cmdbuf_waiters);
649 	if (ret == 0) {
650 		if (!vmw_cmdbuf_man_idle(man, true))
651 			ret = -EBUSY;
652 		else
653 			ret = 0;
654 	}
655 	if (ret > 0)
656 		ret = 0;
657 
658 	return ret;
659 }
660 
661 /**
662  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
663  *
664  * @man: The command buffer manager.
665  * @info: Allocation info. Will hold the size on entry and allocated mm node
666  * on successful return.
667  *
668  * Try to allocate buffer space from the main pool. Returns true if succeeded.
669  * If a fatal error was hit, the error code is returned in @info->ret.
670  */
671 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
672 				 struct vmw_cmdbuf_alloc_info *info)
673 {
674 	int ret;
675 
676 	if (info->done)
677 		return true;
678 
679 	memset(info->node, 0, sizeof(*info->node));
680 	spin_lock_bh(&man->lock);
681 	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
682 					 0, 0,
683 					 DRM_MM_SEARCH_DEFAULT,
684 					 DRM_MM_CREATE_DEFAULT);
685 	if (ret) {
686 		vmw_cmdbuf_man_process(man);
687 		ret = drm_mm_insert_node_generic(&man->mm, info->node,
688 						 info->page_size, 0, 0,
689 						 DRM_MM_SEARCH_DEFAULT,
690 						 DRM_MM_CREATE_DEFAULT);
691 	}
692 
693 	spin_unlock_bh(&man->lock);
694 	info->done = !ret;
695 
696 	return info->done;
697 }
698 
699 /**
700  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
701  *
702  * @man: The command buffer manager.
703  * @node: Pointer to pre-allocated range-manager node.
704  * @size: The size of the allocation.
705  * @interruptible: Whether to sleep interruptible while waiting for space.
706  *
707  * This function allocates buffer space from the main pool, and if there is
708  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
709  * become available.
710  */
711 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
712 				  struct drm_mm_node *node,
713 				  size_t size,
714 				  bool interruptible)
715 {
716 	struct vmw_cmdbuf_alloc_info info;
717 
718 	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
719 	info.node = node;
720 	info.done = false;
721 
722 	/*
723 	 * To prevent starvation of large requests, only one allocating call
724 	 * at a time waiting for space.
725 	 */
726 	if (interruptible) {
727 		if (mutex_lock_interruptible(&man->space_mutex))
728 			return -ERESTARTSYS;
729 	} else {
730 		mutex_lock(&man->space_mutex);
731 	}
732 
733 	/* Try to allocate space without waiting. */
734 	if (vmw_cmdbuf_try_alloc(man, &info))
735 		goto out_unlock;
736 
737 	vmw_generic_waiter_add(man->dev_priv,
738 			       SVGA_IRQFLAG_COMMAND_BUFFER,
739 			       &man->dev_priv->cmdbuf_waiters);
740 
741 	if (interruptible) {
742 		int ret;
743 
744 		ret = wait_event_interruptible
745 			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
746 		if (ret) {
747 			vmw_generic_waiter_remove
748 				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
749 				 &man->dev_priv->cmdbuf_waiters);
750 			mutex_unlock(&man->space_mutex);
751 			return ret;
752 		}
753 	} else {
754 		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
755 	}
756 	vmw_generic_waiter_remove(man->dev_priv,
757 				  SVGA_IRQFLAG_COMMAND_BUFFER,
758 				  &man->dev_priv->cmdbuf_waiters);
759 
760 out_unlock:
761 	mutex_unlock(&man->space_mutex);
762 
763 	return 0;
764 }
765 
766 /**
767  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
768  * space from the main pool.
769  *
770  * @man: The command buffer manager.
771  * @header: Pointer to the header to set up.
772  * @size: The requested size of the buffer space.
773  * @interruptible: Whether to sleep interruptible while waiting for space.
774  */
775 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
776 				 struct vmw_cmdbuf_header *header,
777 				 size_t size,
778 				 bool interruptible)
779 {
780 	SVGACBHeader *cb_hdr;
781 	size_t offset;
782 	int ret;
783 
784 	if (!man->has_pool)
785 		return -ENOMEM;
786 
787 	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
788 
789 	if (ret)
790 		return ret;
791 
792 	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
793 					   &header->handle);
794 	if (!header->cb_header) {
795 		ret = -ENOMEM;
796 		goto out_no_cb_header;
797 	}
798 
799 	header->size = header->node.size << PAGE_SHIFT;
800 	cb_hdr = header->cb_header;
801 	offset = header->node.start << PAGE_SHIFT;
802 	header->cmd = man->map + offset;
803 	memset(cb_hdr, 0, sizeof(*cb_hdr));
804 	if (man->using_mob) {
805 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
806 		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
807 		cb_hdr->ptr.mob.mobOffset = offset;
808 	} else {
809 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
810 	}
811 
812 	return 0;
813 
814 out_no_cb_header:
815 	spin_lock_bh(&man->lock);
816 	drm_mm_remove_node(&header->node);
817 	spin_unlock_bh(&man->lock);
818 
819 	return ret;
820 }
821 
822 /**
823  * vmw_cmdbuf_space_inline - Set up a command buffer header with
824  * inline command buffer space.
825  *
826  * @man: The command buffer manager.
827  * @header: Pointer to the header to set up.
828  * @size: The requested size of the buffer space.
829  */
830 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
831 				   struct vmw_cmdbuf_header *header,
832 				   int size)
833 {
834 	struct vmw_cmdbuf_dheader *dheader;
835 	SVGACBHeader *cb_hdr;
836 
837 	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
838 		return -ENOMEM;
839 
840 	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
841 				 &header->handle);
842 	if (!dheader)
843 		return -ENOMEM;
844 
845 	header->inline_space = true;
846 	header->size = VMW_CMDBUF_INLINE_SIZE;
847 	cb_hdr = &dheader->cb_header;
848 	header->cb_header = cb_hdr;
849 	header->cmd = dheader->cmd;
850 	memset(dheader, 0, sizeof(*dheader));
851 	cb_hdr->status = SVGA_CB_STATUS_NONE;
852 	cb_hdr->flags = SVGA_CB_FLAG_NONE;
853 	cb_hdr->ptr.pa = (u64)header->handle +
854 		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
855 
856 	return 0;
857 }
858 
859 /**
860  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
861  * command buffer space.
862  *
863  * @man: The command buffer manager.
864  * @size: The requested size of the buffer space.
865  * @interruptible: Whether to sleep interruptible while waiting for space.
866  * @p_header: points to a header pointer to populate on successful return.
867  *
868  * Returns a pointer to command buffer space if successful. Otherwise
869  * returns an error pointer. The header pointer returned in @p_header should
870  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
871  */
872 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
873 		       size_t size, bool interruptible,
874 		       struct vmw_cmdbuf_header **p_header)
875 {
876 	struct vmw_cmdbuf_header *header;
877 	int ret = 0;
878 
879 	*p_header = NULL;
880 
881 	header = kzalloc(sizeof(*header), GFP_KERNEL);
882 	if (!header)
883 		return ERR_PTR(-ENOMEM);
884 
885 	if (size <= VMW_CMDBUF_INLINE_SIZE)
886 		ret = vmw_cmdbuf_space_inline(man, header, size);
887 	else
888 		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
889 
890 	if (ret) {
891 		kfree(header);
892 		return ERR_PTR(ret);
893 	}
894 
895 	header->man = man;
896 	INIT_LIST_HEAD(&header->list);
897 	header->cb_header->status = SVGA_CB_STATUS_NONE;
898 	*p_header = header;
899 
900 	return header->cmd;
901 }
902 
903 /**
904  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
905  * command buffer.
906  *
907  * @man: The command buffer manager.
908  * @size: The requested size of the commands.
909  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
910  * @interruptible: Whether to sleep interruptible while waiting for space.
911  *
912  * Returns a pointer to command buffer space if successful. Otherwise
913  * returns an error pointer.
914  */
915 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
916 				    size_t size,
917 				    int ctx_id,
918 				    bool interruptible)
919 {
920 	struct vmw_cmdbuf_header *cur;
921 	void *ret;
922 
923 	if (vmw_cmdbuf_cur_lock(man, interruptible))
924 		return ERR_PTR(-ERESTARTSYS);
925 
926 	cur = man->cur;
927 	if (cur && (size + man->cur_pos > cur->size ||
928 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
929 		     ctx_id != cur->cb_header->dxContext)))
930 		__vmw_cmdbuf_cur_flush(man);
931 
932 	if (!man->cur) {
933 		ret = vmw_cmdbuf_alloc(man,
934 				       max_t(size_t, size, man->default_size),
935 				       interruptible, &man->cur);
936 		if (IS_ERR(ret)) {
937 			vmw_cmdbuf_cur_unlock(man);
938 			return ret;
939 		}
940 
941 		cur = man->cur;
942 	}
943 
944 	if (ctx_id != SVGA3D_INVALID_ID) {
945 		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
946 		cur->cb_header->dxContext = ctx_id;
947 	}
948 
949 	cur->reserved = size;
950 
951 	return (void *) (man->cur->cmd + man->cur_pos);
952 }
953 
954 /**
955  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
956  *
957  * @man: The command buffer manager.
958  * @size: The size of the commands actually written.
959  * @flush: Whether to flush the command buffer immediately.
960  */
961 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
962 				  size_t size, bool flush)
963 {
964 	struct vmw_cmdbuf_header *cur = man->cur;
965 
966 	WARN_ON(!mutex_is_locked(&man->cur_mutex));
967 
968 	WARN_ON(size > cur->reserved);
969 	man->cur_pos += size;
970 	if (!size)
971 		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
972 	if (flush)
973 		__vmw_cmdbuf_cur_flush(man);
974 	vmw_cmdbuf_cur_unlock(man);
975 }
976 
977 /**
978  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
979  *
980  * @man: The command buffer manager.
981  * @size: The requested size of the commands.
982  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
983  * @interruptible: Whether to sleep interruptible while waiting for space.
984  * @header: Header of the command buffer. NULL if the current command buffer
985  * should be used.
986  *
987  * Returns a pointer to command buffer space if successful. Otherwise
988  * returns an error pointer.
989  */
990 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
991 			 int ctx_id, bool interruptible,
992 			 struct vmw_cmdbuf_header *header)
993 {
994 	if (!header)
995 		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
996 
997 	if (size > header->size)
998 		return ERR_PTR(-EINVAL);
999 
1000 	if (ctx_id != SVGA3D_INVALID_ID) {
1001 		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1002 		header->cb_header->dxContext = ctx_id;
1003 	}
1004 
1005 	header->reserved = size;
1006 	return header->cmd;
1007 }
1008 
1009 /**
1010  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1011  *
1012  * @man: The command buffer manager.
1013  * @size: The size of the commands actually written.
1014  * @header: Header of the command buffer. NULL if the current command buffer
1015  * should be used.
1016  * @flush: Whether to flush the command buffer immediately.
1017  */
1018 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1019 		       struct vmw_cmdbuf_header *header, bool flush)
1020 {
1021 	if (!header) {
1022 		vmw_cmdbuf_commit_cur(man, size, flush);
1023 		return;
1024 	}
1025 
1026 	(void) vmw_cmdbuf_cur_lock(man, false);
1027 	__vmw_cmdbuf_cur_flush(man);
1028 	WARN_ON(size > header->reserved);
1029 	man->cur = header;
1030 	man->cur_pos = size;
1031 	if (!size)
1032 		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1033 	if (flush)
1034 		__vmw_cmdbuf_cur_flush(man);
1035 	vmw_cmdbuf_cur_unlock(man);
1036 }
1037 
1038 /**
1039  * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1040  *
1041  * @man: The command buffer manager.
1042  */
1043 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1044 {
1045 	if (!man)
1046 		return;
1047 
1048 	tasklet_schedule(&man->tasklet);
1049 }
1050 
1051 /**
1052  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1053  *
1054  * @man: The command buffer manager.
1055  * @command: Pointer to the command to send.
1056  * @size: Size of the command.
1057  *
1058  * Synchronously sends a device context command.
1059  */
1060 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1061 					  const void *command,
1062 					  size_t size)
1063 {
1064 	struct vmw_cmdbuf_header *header;
1065 	int status;
1066 	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1067 
1068 	if (IS_ERR(cmd))
1069 		return PTR_ERR(cmd);
1070 
1071 	memcpy(cmd, command, size);
1072 	header->cb_header->length = size;
1073 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1074 	spin_lock_bh(&man->lock);
1075 	status = vmw_cmdbuf_header_submit(header);
1076 	spin_unlock_bh(&man->lock);
1077 	vmw_cmdbuf_header_free(header);
1078 
1079 	if (status != SVGA_CB_STATUS_COMPLETED) {
1080 		DRM_ERROR("Device context command failed with status %d\n",
1081 			  status);
1082 		return -EINVAL;
1083 	}
1084 
1085 	return 0;
1086 }
1087 
1088 /**
1089  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1090  * context.
1091  *
1092  * @man: The command buffer manager.
1093  * @enable: Whether to enable or disable the context.
1094  *
1095  * Synchronously sends a device start / stop context command.
1096  */
1097 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1098 				bool enable)
1099 {
1100 	struct {
1101 		uint32 id;
1102 		SVGADCCmdStartStop body;
1103 	} __packed cmd;
1104 
1105 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1106 	cmd.body.enable = (enable) ? 1 : 0;
1107 	cmd.body.context = SVGA_CB_CONTEXT_0;
1108 
1109 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1110 }
1111 
1112 /**
1113  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1114  *
1115  * @man: The command buffer manager.
1116  * @size: The size of the main space pool.
1117  * @default_size: The default size of the command buffer for small kernel
1118  * submissions.
1119  *
1120  * Set the size and allocate the main command buffer space pool,
1121  * as well as the default size of the command buffer for
1122  * small kernel submissions. If successful, this enables large command
1123  * submissions. Note that this function requires that rudimentary command
1124  * submission is already available and that the MOB memory manager is alive.
1125  * Returns 0 on success. Negative error code on failure.
1126  */
1127 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1128 			     size_t size, size_t default_size)
1129 {
1130 	struct vmw_private *dev_priv = man->dev_priv;
1131 	bool dummy;
1132 	int ret;
1133 
1134 	if (man->has_pool)
1135 		return -EINVAL;
1136 
1137 	/* First, try to allocate a huge chunk of DMA memory */
1138 	size = PAGE_ALIGN(size);
1139 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1140 				      &man->handle, GFP_KERNEL);
1141 	if (man->map) {
1142 		man->using_mob = false;
1143 	} else {
1144 		/*
1145 		 * DMA memory failed. If we can have command buffers in a
1146 		 * MOB, try to use that instead. Note that this will
1147 		 * actually call into the already enabled manager, when
1148 		 * binding the MOB.
1149 		 */
1150 		if (!(dev_priv->capabilities & SVGA_CAP_DX))
1151 			return -ENOMEM;
1152 
1153 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1154 				    &vmw_mob_ne_placement, 0, false, NULL,
1155 				    &man->cmd_space);
1156 		if (ret)
1157 			return ret;
1158 
1159 		man->using_mob = true;
1160 		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1161 				  &man->map_obj);
1162 		if (ret)
1163 			goto out_no_map;
1164 
1165 		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1166 	}
1167 
1168 	man->size = size;
1169 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1170 
1171 	man->has_pool = true;
1172 
1173 	/*
1174 	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1175 	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1176 	 * needs to wait for space and we block on further command
1177 	 * submissions to be able to free up space.
1178 	 */
1179 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1180 	DRM_INFO("Using command buffers with %s pool.\n",
1181 		 (man->using_mob) ? "MOB" : "DMA");
1182 
1183 	return 0;
1184 
1185 out_no_map:
1186 	if (man->using_mob)
1187 		ttm_bo_unref(&man->cmd_space);
1188 
1189 	return ret;
1190 }
1191 
1192 /**
1193  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1194  * inline command buffer submissions only.
1195  *
1196  * @dev_priv: Pointer to device private structure.
1197  *
1198  * Returns a pointer to a cummand buffer manager to success or error pointer
1199  * on failure. The command buffer manager will be enabled for submissions of
1200  * size VMW_CMDBUF_INLINE_SIZE only.
1201  */
1202 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1203 {
1204 	struct vmw_cmdbuf_man *man;
1205 	struct vmw_cmdbuf_context *ctx;
1206 	int i;
1207 	int ret;
1208 
1209 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1210 		return ERR_PTR(-ENOSYS);
1211 
1212 	man = kzalloc(sizeof(*man), GFP_KERNEL);
1213 	if (!man)
1214 		return ERR_PTR(-ENOMEM);
1215 
1216 	man->headers = dma_pool_create("vmwgfx cmdbuf",
1217 				       &dev_priv->dev->pdev->dev,
1218 				       sizeof(SVGACBHeader),
1219 				       64, PAGE_SIZE);
1220 	if (!man->headers) {
1221 		ret = -ENOMEM;
1222 		goto out_no_pool;
1223 	}
1224 
1225 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1226 					&dev_priv->dev->pdev->dev,
1227 					sizeof(struct vmw_cmdbuf_dheader),
1228 					64, PAGE_SIZE);
1229 	if (!man->dheaders) {
1230 		ret = -ENOMEM;
1231 		goto out_no_dpool;
1232 	}
1233 
1234 	for_each_cmdbuf_ctx(man, i, ctx)
1235 		vmw_cmdbuf_ctx_init(ctx);
1236 
1237 	INIT_LIST_HEAD(&man->error);
1238 	spin_lock_init(&man->lock);
1239 	mutex_init(&man->cur_mutex);
1240 	mutex_init(&man->space_mutex);
1241 	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1242 		     (unsigned long) man);
1243 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1244 	init_waitqueue_head(&man->alloc_queue);
1245 	init_waitqueue_head(&man->idle_queue);
1246 	man->dev_priv = dev_priv;
1247 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1248 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1249 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1250 			       &dev_priv->error_waiters);
1251 	ret = vmw_cmdbuf_startstop(man, true);
1252 	if (ret) {
1253 		DRM_ERROR("Failed starting command buffer context 0.\n");
1254 		vmw_cmdbuf_man_destroy(man);
1255 		return ERR_PTR(ret);
1256 	}
1257 
1258 	return man;
1259 
1260 out_no_dpool:
1261 	dma_pool_destroy(man->headers);
1262 out_no_pool:
1263 	kfree(man);
1264 
1265 	return ERR_PTR(ret);
1266 }
1267 
1268 /**
1269  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1270  *
1271  * @man: Pointer to a command buffer manager.
1272  *
1273  * This function removes the main buffer space pool, and should be called
1274  * before MOB memory management is removed. When this function has been called,
1275  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1276  * less are allowed, and the default size of the command buffer for small kernel
1277  * submissions is also set to this size.
1278  */
1279 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1280 {
1281 	if (!man->has_pool)
1282 		return;
1283 
1284 	man->has_pool = false;
1285 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1286 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1287 	if (man->using_mob) {
1288 		(void) ttm_bo_kunmap(&man->map_obj);
1289 		ttm_bo_unref(&man->cmd_space);
1290 	} else {
1291 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1292 				  man->size, man->map, man->handle);
1293 	}
1294 }
1295 
1296 /**
1297  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1298  *
1299  * @man: Pointer to a command buffer manager.
1300  *
1301  * This function idles and then destroys a command buffer manager.
1302  */
1303 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1304 {
1305 	WARN_ON_ONCE(man->has_pool);
1306 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1307 	if (vmw_cmdbuf_startstop(man, false))
1308 		DRM_ERROR("Failed stopping command buffer context 0.\n");
1309 
1310 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1311 				  &man->dev_priv->error_waiters);
1312 	tasklet_kill(&man->tasklet);
1313 	(void) cancel_work_sync(&man->work);
1314 	dma_pool_destroy(man->dheaders);
1315 	dma_pool_destroy(man->headers);
1316 	mutex_destroy(&man->cur_mutex);
1317 	mutex_destroy(&man->space_mutex);
1318 	kfree(man);
1319 }
1320