1 /* $NetBSD: vmwgfx_fifo.c,v 1.4 2022/10/25 23:34:06 riastradh Exp $ */
2
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5 *
6 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fifo.c,v 1.4 2022/10/25 23:34:06 riastradh Exp $");
32
33 #include <linux/sched/signal.h>
34
35 #include <drm/ttm/ttm_placement.h>
36
37 #include "vmwgfx_drv.h"
38
39 #include <linux/nbsd-namespace.h>
40
41 struct vmw_temp_set_context {
42 SVGA3dCmdHeader header;
43 SVGA3dCmdDXTempSetContext body;
44 };
45
vmw_fifo_have_3d(struct vmw_private * dev_priv)46 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
47 {
48 u32 *fifo_mem = dev_priv->mmio_virt;
49 uint32_t fifo_min, hwversion;
50 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
51
52 if (!(dev_priv->capabilities & SVGA_CAP_3D))
53 return false;
54
55 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
56 uint32_t result;
57
58 if (!dev_priv->has_mob)
59 return false;
60
61 spin_lock(&dev_priv->cap_lock);
62 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
63 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
64 spin_unlock(&dev_priv->cap_lock);
65
66 return (result != 0);
67 }
68
69 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
70 return false;
71
72 fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
73 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
74 return false;
75
76 hwversion = vmw_mmio_read(fifo_mem +
77 ((fifo->capabilities &
78 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
79 SVGA_FIFO_3D_HWVERSION_REVISED :
80 SVGA_FIFO_3D_HWVERSION));
81
82 if (hwversion == 0)
83 return false;
84
85 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
86 return false;
87
88 /* Legacy Display Unit does not support surfaces */
89 if (dev_priv->active_display_unit == vmw_du_legacy)
90 return false;
91
92 return true;
93 }
94
vmw_fifo_have_pitchlock(struct vmw_private * dev_priv)95 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
96 {
97 u32 *fifo_mem = dev_priv->mmio_virt;
98 uint32_t caps;
99
100 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
101 return false;
102
103 caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
104 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
105 return true;
106
107 return false;
108 }
109
vmw_fifo_init(struct vmw_private * dev_priv,struct vmw_fifo_state * fifo)110 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
111 {
112 u32 *fifo_mem = dev_priv->mmio_virt;
113 uint32_t max;
114 uint32_t min;
115
116 fifo->dx = false;
117 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
118 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
119 if (unlikely(fifo->static_buffer == NULL))
120 return -ENOMEM;
121
122 fifo->dynamic_buffer = NULL;
123 fifo->reserved_size = 0;
124 fifo->using_bounce_buffer = false;
125
126 mutex_init(&fifo->fifo_mutex);
127 init_rwsem(&fifo->rwsem);
128
129 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
130 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
131 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
132
133 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
134 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
135 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
136
137 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
138 SVGA_REG_ENABLE_HIDE);
139 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
140
141 min = 4;
142 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
143 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
144 min <<= 2;
145
146 if (min < PAGE_SIZE)
147 min = PAGE_SIZE;
148
149 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
150 vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
151 wmb();
152 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
153 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
154 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
155 mb();
156
157 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
158
159 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
160 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
161 fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
162
163 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
164 (unsigned int) max,
165 (unsigned int) min,
166 (unsigned int) fifo->capabilities);
167
168 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
169 vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
170 vmw_marker_queue_init(&fifo->marker_queue);
171
172 return 0;
173 }
174
vmw_fifo_ping_host(struct vmw_private * dev_priv,uint32_t reason)175 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
176 {
177 u32 *fifo_mem = dev_priv->mmio_virt;
178
179 preempt_disable();
180 if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
181 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
182 preempt_enable();
183 }
184
vmw_fifo_release(struct vmw_private * dev_priv,struct vmw_fifo_state * fifo)185 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
186 {
187 u32 *fifo_mem = dev_priv->mmio_virt;
188
189 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
190 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
191 ;
192
193 dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
194
195 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
196 dev_priv->config_done_state);
197 vmw_write(dev_priv, SVGA_REG_ENABLE,
198 dev_priv->enable_state);
199 vmw_write(dev_priv, SVGA_REG_TRACES,
200 dev_priv->traces_state);
201
202 vmw_marker_queue_takedown(&fifo->marker_queue);
203
204 if (likely(fifo->static_buffer != NULL)) {
205 vfree(fifo->static_buffer);
206 fifo->static_buffer = NULL;
207 }
208
209 if (likely(fifo->dynamic_buffer != NULL)) {
210 vfree(fifo->dynamic_buffer);
211 fifo->dynamic_buffer = NULL;
212 }
213 }
214
vmw_fifo_is_full(struct vmw_private * dev_priv,uint32_t bytes)215 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
216 {
217 u32 *fifo_mem = dev_priv->mmio_virt;
218 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
219 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
220 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
221 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
222
223 return ((max - next_cmd) + (stop - min) <= bytes);
224 }
225
vmw_fifo_wait_noirq(struct vmw_private * dev_priv,uint32_t bytes,bool interruptible,unsigned long timeout)226 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
227 uint32_t bytes, bool interruptible,
228 unsigned long timeout)
229 {
230 int ret = 0;
231 unsigned long end_jiffies = jiffies + timeout;
232 #ifdef __NetBSD__
233 assert_spin_locked(&dev_priv->fifo_lock);
234 #else
235 DEFINE_WAIT(__wait);
236 #endif
237
238 DRM_INFO("Fifo wait noirq.\n");
239
240 for (;;) {
241 #ifndef __NetBSD__
242 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
243 (interruptible) ?
244 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
245 #endif
246 if (!vmw_fifo_is_full(dev_priv, bytes))
247 break;
248 if (time_after_eq(jiffies, end_jiffies)) {
249 ret = -EBUSY;
250 DRM_ERROR("SVGA device lockup.\n");
251 break;
252 }
253 #ifdef __NetBSD__
254 if (interruptible) {
255 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &dev_priv->fifo_queue,
256 &dev_priv->fifo_lock, 1,
257 !vmw_fifo_is_full(dev_priv, bytes));
258 } else {
259 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret,
260 &dev_priv->fifo_queue,
261 &dev_priv->fifo_lock, 1,
262 !vmw_fifo_is_full(dev_priv, bytes));
263 }
264 if (ret) {
265 if (ret > 0) /* success */
266 ret = 0;
267 break;
268 }
269 /*
270 * ret=0 means the wait timed out after one tick, so
271 * try again
272 */
273 #else
274 schedule_timeout(1);
275 if (interruptible && signal_pending(current)) {
276 ret = -ERESTARTSYS;
277 break;
278 }
279 #endif
280 }
281 #ifdef __NetBSD__
282 DRM_SPIN_WAKEUP_ALL(&dev_priv->fifo_queue, &dev_priv->fifo_lock);
283 #else
284 finish_wait(&dev_priv->fifo_queue, &__wait);
285 wake_up_all(&dev_priv->fifo_queue);
286 #endif
287 DRM_INFO("Fifo noirq exit.\n");
288 return ret;
289 }
290
vmw_fifo_wait(struct vmw_private * dev_priv,uint32_t bytes,bool interruptible,unsigned long timeout)291 static int vmw_fifo_wait(struct vmw_private *dev_priv,
292 uint32_t bytes, bool interruptible,
293 unsigned long timeout)
294 {
295 long ret = 1L;
296
297 spin_lock(&dev_priv->fifo_lock);
298
299 if (likely(!vmw_fifo_is_full(dev_priv, bytes))) {
300 spin_unlock(&dev_priv->fifo_lock);
301 return 0;
302 }
303
304 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
305 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) {
306 ret = vmw_fifo_wait_noirq(dev_priv, bytes,
307 interruptible, timeout);
308 spin_unlock(&dev_priv->fifo_lock);
309 return ret;
310 }
311
312 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
313 &dev_priv->fifo_queue_waiters);
314
315 if (interruptible)
316 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &dev_priv->fifo_queue,
317 &dev_priv->fifo_lock, timeout,
318 !vmw_fifo_is_full(dev_priv, bytes));
319 else
320 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &dev_priv->fifo_queue,
321 &dev_priv->fifo_lock, timeout,
322 !vmw_fifo_is_full(dev_priv, bytes));
323
324 if (unlikely(ret == 0))
325 ret = -EBUSY;
326 else if (likely(ret > 0))
327 ret = 0;
328
329 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
330 &dev_priv->fifo_queue_waiters);
331
332 spin_unlock(&dev_priv->fifo_lock);
333
334 return ret;
335 }
336
337 /**
338 * Reserve @bytes number of bytes in the fifo.
339 *
340 * This function will return NULL (error) on two conditions:
341 * If it timeouts waiting for fifo space, or if @bytes is larger than the
342 * available fifo space.
343 *
344 * Returns:
345 * Pointer to the fifo, or null on error (possible hardware hang).
346 */
vmw_local_fifo_reserve(struct vmw_private * dev_priv,uint32_t bytes)347 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
348 uint32_t bytes)
349 {
350 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
351 u32 *fifo_mem = dev_priv->mmio_virt;
352 uint32_t max;
353 uint32_t min;
354 uint32_t next_cmd;
355 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
356 int ret;
357
358 mutex_lock(&fifo_state->fifo_mutex);
359 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
360 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
361 next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
362
363 if (unlikely(bytes >= (max - min)))
364 goto out_err;
365
366 BUG_ON(fifo_state->reserved_size != 0);
367 BUG_ON(fifo_state->dynamic_buffer != NULL);
368
369 fifo_state->reserved_size = bytes;
370
371 while (1) {
372 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
373 bool need_bounce = false;
374 bool reserve_in_place = false;
375
376 if (next_cmd >= stop) {
377 if (likely((next_cmd + bytes < max ||
378 (next_cmd + bytes == max && stop > min))))
379 reserve_in_place = true;
380
381 else if (vmw_fifo_is_full(dev_priv, bytes)) {
382 ret = vmw_fifo_wait(dev_priv, bytes,
383 false, 3 * HZ);
384 if (unlikely(ret != 0))
385 goto out_err;
386 } else
387 need_bounce = true;
388
389 } else {
390
391 if (likely((next_cmd + bytes < stop)))
392 reserve_in_place = true;
393 else {
394 ret = vmw_fifo_wait(dev_priv, bytes,
395 false, 3 * HZ);
396 if (unlikely(ret != 0))
397 goto out_err;
398 }
399 }
400
401 if (reserve_in_place) {
402 if (reserveable || bytes <= sizeof(uint32_t)) {
403 fifo_state->using_bounce_buffer = false;
404
405 if (reserveable)
406 vmw_mmio_write(bytes, fifo_mem +
407 SVGA_FIFO_RESERVED);
408 return (void __force *) (fifo_mem +
409 (next_cmd >> 2));
410 } else {
411 need_bounce = true;
412 }
413 }
414
415 if (need_bounce) {
416 fifo_state->using_bounce_buffer = true;
417 if (bytes < fifo_state->static_buffer_size)
418 return fifo_state->static_buffer;
419 else {
420 fifo_state->dynamic_buffer = vmalloc(bytes);
421 if (!fifo_state->dynamic_buffer)
422 goto out_err;
423 return fifo_state->dynamic_buffer;
424 }
425 }
426 }
427 out_err:
428 fifo_state->reserved_size = 0;
429 mutex_unlock(&fifo_state->fifo_mutex);
430
431 return NULL;
432 }
433
vmw_fifo_reserve_dx(struct vmw_private * dev_priv,uint32_t bytes,int ctx_id)434 void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
435 int ctx_id)
436 {
437 void *ret;
438
439 if (dev_priv->cman)
440 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
441 ctx_id, false, NULL);
442 else if (ctx_id == SVGA3D_INVALID_ID)
443 ret = vmw_local_fifo_reserve(dev_priv, bytes);
444 else {
445 WARN(1, "Command buffer has not been allocated.\n");
446 ret = NULL;
447 }
448 if (IS_ERR_OR_NULL(ret))
449 return NULL;
450
451 return ret;
452 }
453
vmw_fifo_res_copy(struct vmw_fifo_state * fifo_state,u32 * fifo_mem,uint32_t next_cmd,uint32_t max,uint32_t min,uint32_t bytes)454 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
455 u32 *fifo_mem,
456 uint32_t next_cmd,
457 uint32_t max, uint32_t min, uint32_t bytes)
458 {
459 uint32_t chunk_size = max - next_cmd;
460 uint32_t rest;
461 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
462 fifo_state->dynamic_buffer : fifo_state->static_buffer;
463
464 if (bytes < chunk_size)
465 chunk_size = bytes;
466
467 vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
468 mb();
469 memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
470 rest = bytes - chunk_size;
471 if (rest)
472 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
473 }
474
vmw_fifo_slow_copy(struct vmw_fifo_state * fifo_state,u32 * fifo_mem,uint32_t next_cmd,uint32_t max,uint32_t min,uint32_t bytes)475 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
476 u32 *fifo_mem,
477 uint32_t next_cmd,
478 uint32_t max, uint32_t min, uint32_t bytes)
479 {
480 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
481 fifo_state->dynamic_buffer : fifo_state->static_buffer;
482
483 while (bytes > 0) {
484 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
485 next_cmd += sizeof(uint32_t);
486 if (unlikely(next_cmd == max))
487 next_cmd = min;
488 mb();
489 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
490 mb();
491 bytes -= sizeof(uint32_t);
492 }
493 }
494
vmw_local_fifo_commit(struct vmw_private * dev_priv,uint32_t bytes)495 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
496 {
497 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
498 u32 *fifo_mem = dev_priv->mmio_virt;
499 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
500 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
501 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
502 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
503
504 if (fifo_state->dx)
505 bytes += sizeof(struct vmw_temp_set_context);
506
507 fifo_state->dx = false;
508 BUG_ON((bytes & 3) != 0);
509 BUG_ON(bytes > fifo_state->reserved_size);
510
511 fifo_state->reserved_size = 0;
512
513 if (fifo_state->using_bounce_buffer) {
514 if (reserveable)
515 vmw_fifo_res_copy(fifo_state, fifo_mem,
516 next_cmd, max, min, bytes);
517 else
518 vmw_fifo_slow_copy(fifo_state, fifo_mem,
519 next_cmd, max, min, bytes);
520
521 if (fifo_state->dynamic_buffer) {
522 vfree(fifo_state->dynamic_buffer);
523 fifo_state->dynamic_buffer = NULL;
524 }
525
526 }
527
528 down_write(&fifo_state->rwsem);
529 if (fifo_state->using_bounce_buffer || reserveable) {
530 next_cmd += bytes;
531 if (next_cmd >= max)
532 next_cmd -= max - min;
533 mb();
534 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
535 }
536
537 if (reserveable)
538 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
539 mb();
540 up_write(&fifo_state->rwsem);
541 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
542 mutex_unlock(&fifo_state->fifo_mutex);
543 }
544
vmw_fifo_commit(struct vmw_private * dev_priv,uint32_t bytes)545 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
546 {
547 if (dev_priv->cman)
548 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
549 else
550 vmw_local_fifo_commit(dev_priv, bytes);
551 }
552
553
554 /**
555 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
556 *
557 * @dev_priv: Pointer to device private structure.
558 * @bytes: Number of bytes to commit.
559 */
vmw_fifo_commit_flush(struct vmw_private * dev_priv,uint32_t bytes)560 void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
561 {
562 if (dev_priv->cman)
563 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
564 else
565 vmw_local_fifo_commit(dev_priv, bytes);
566 }
567
568 /**
569 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
570 * starts.
571 *
572 * @dev_priv: Pointer to device private structure.
573 * @interruptible: Whether to wait interruptible if function needs to sleep.
574 */
vmw_fifo_flush(struct vmw_private * dev_priv,bool interruptible)575 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
576 {
577 might_sleep();
578
579 if (dev_priv->cman)
580 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
581 else
582 return 0;
583 }
584
vmw_fifo_send_fence(struct vmw_private * dev_priv,uint32_t * seqno)585 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
586 {
587 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
588 struct svga_fifo_cmd_fence *cmd_fence;
589 u32 *fm;
590 int ret = 0;
591 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
592
593 fm = VMW_FIFO_RESERVE(dev_priv, bytes);
594 if (unlikely(fm == NULL)) {
595 *seqno = atomic_read(&dev_priv->marker_seq);
596 ret = -ENOMEM;
597 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
598 false, 3*HZ);
599 goto out_err;
600 }
601
602 do {
603 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
604 } while (*seqno == 0);
605
606 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
607
608 /*
609 * Don't request hardware to send a fence. The
610 * waiting code in vmwgfx_irq.c will emulate this.
611 */
612
613 vmw_fifo_commit(dev_priv, 0);
614 return 0;
615 }
616
617 *fm++ = SVGA_CMD_FENCE;
618 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
619 cmd_fence->fence = *seqno;
620 vmw_fifo_commit_flush(dev_priv, bytes);
621 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
622 spin_lock(&dev_priv->fence_lock);
623 vmw_update_seqno(dev_priv, fifo_state);
624 spin_unlock(&dev_priv->fence_lock);
625
626 out_err:
627 return ret;
628 }
629
630 /**
631 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
632 * legacy query commands.
633 *
634 * @dev_priv: The device private structure.
635 * @cid: The hardware context id used for the query.
636 *
637 * See the vmw_fifo_emit_dummy_query documentation.
638 */
vmw_fifo_emit_dummy_legacy_query(struct vmw_private * dev_priv,uint32_t cid)639 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
640 uint32_t cid)
641 {
642 /*
643 * A query wait without a preceding query end will
644 * actually finish all queries for this cid
645 * without writing to the query result structure.
646 */
647
648 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
649 struct {
650 SVGA3dCmdHeader header;
651 SVGA3dCmdWaitForQuery body;
652 } *cmd;
653
654 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
655 if (unlikely(cmd == NULL))
656 return -ENOMEM;
657
658 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
659 cmd->header.size = sizeof(cmd->body);
660 cmd->body.cid = cid;
661 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
662
663 if (bo->mem.mem_type == TTM_PL_VRAM) {
664 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
665 cmd->body.guestResult.offset = bo->offset;
666 } else {
667 cmd->body.guestResult.gmrId = bo->mem.start;
668 cmd->body.guestResult.offset = 0;
669 }
670
671 vmw_fifo_commit(dev_priv, sizeof(*cmd));
672
673 return 0;
674 }
675
676 /**
677 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
678 * guest-backed resource query commands.
679 *
680 * @dev_priv: The device private structure.
681 * @cid: The hardware context id used for the query.
682 *
683 * See the vmw_fifo_emit_dummy_query documentation.
684 */
vmw_fifo_emit_dummy_gb_query(struct vmw_private * dev_priv,uint32_t cid)685 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
686 uint32_t cid)
687 {
688 /*
689 * A query wait without a preceding query end will
690 * actually finish all queries for this cid
691 * without writing to the query result structure.
692 */
693
694 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
695 struct {
696 SVGA3dCmdHeader header;
697 SVGA3dCmdWaitForGBQuery body;
698 } *cmd;
699
700 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
701 if (unlikely(cmd == NULL))
702 return -ENOMEM;
703
704 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
705 cmd->header.size = sizeof(cmd->body);
706 cmd->body.cid = cid;
707 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
708 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
709 cmd->body.mobid = bo->mem.start;
710 cmd->body.offset = 0;
711
712 vmw_fifo_commit(dev_priv, sizeof(*cmd));
713
714 return 0;
715 }
716
717
718 /**
719 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
720 * appropriate resource query commands.
721 *
722 * @dev_priv: The device private structure.
723 * @cid: The hardware context id used for the query.
724 *
725 * This function is used to emit a dummy occlusion query with
726 * no primitives rendered between query begin and query end.
727 * It's used to provide a query barrier, in order to know that when
728 * this query is finished, all preceding queries are also finished.
729 *
730 * A Query results structure should have been initialized at the start
731 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
732 * must also be either reserved or pinned when this function is called.
733 *
734 * Returns -ENOMEM on failure to reserve fifo space.
735 */
vmw_fifo_emit_dummy_query(struct vmw_private * dev_priv,uint32_t cid)736 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
737 uint32_t cid)
738 {
739 if (dev_priv->has_mob)
740 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
741
742 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
743 }
744