xref: /dflybsd-src/sys/dev/drm/i915/intel_ringbuffer.h (revision 56f51086aa3f6f77915d41cf7d311585f0086a49)
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
3 
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
6 
7 #define I915_CMD_HASH_ORDER 9
8 
9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11  * to give some inclination as to some of the magic values used in the various
12  * workarounds!
13  */
14 #define CACHELINE_BYTES 64
15 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
16 
17 /*
18  * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
19  * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
20  * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
21  *
22  * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
23  * cacheline, the Head Pointer must not be greater than the Tail
24  * Pointer."
25  */
26 #define I915_RING_FREE_SPACE 64
27 
28 struct  intel_hw_status_page {
29 	u32		*page_addr;
30 	unsigned int	gfx_addr;
31 	struct		drm_i915_gem_object *obj;
32 };
33 
34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
36 
37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
39 
40 #define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
42 
43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
45 
46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
48 
49 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
50 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
51 
52 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
53  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
54  */
55 #define i915_semaphore_seqno_size sizeof(uint64_t)
56 #define GEN8_SIGNAL_OFFSET(__ring, to)			     \
57 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
58 	((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +	\
59 	(i915_semaphore_seqno_size * (to)))
60 
61 #define GEN8_WAIT_OFFSET(__ring, from)			     \
62 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
64 	(i915_semaphore_seqno_size * (__ring)->id))
65 
66 #define GEN8_RING_SEMAPHORE_INIT do { \
67 	if (!dev_priv->semaphore_obj) { \
68 		break; \
69 	} \
70 	ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
71 	ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
72 	ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
73 	ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
74 	ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
75 	ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
76 	} while(0)
77 
78 enum intel_ring_hangcheck_action {
79 	HANGCHECK_IDLE = 0,
80 	HANGCHECK_WAIT,
81 	HANGCHECK_ACTIVE,
82 	HANGCHECK_ACTIVE_LOOP,
83 	HANGCHECK_KICK,
84 	HANGCHECK_HUNG,
85 };
86 
87 #define HANGCHECK_SCORE_RING_HUNG 31
88 
89 struct intel_ring_hangcheck {
90 	u64 acthd;
91 	u64 max_acthd;
92 	u32 seqno;
93 	int score;
94 	enum intel_ring_hangcheck_action action;
95 	int deadlock;
96 	u32 instdone[I915_NUM_INSTDONE_REG];
97 };
98 
99 struct intel_ringbuffer {
100 	struct drm_i915_gem_object *obj;
101 	char __iomem *virtual_start;
102 	struct i915_vma *vma;
103 
104 	struct intel_engine_cs *ring;
105 	struct list_head link;
106 
107 	unsigned int virtual_count;
108 	u32 head;
109 	u32 tail;
110 	int space;
111 	int size;
112 	int effective_size;
113 	int reserved_size;
114 	int reserved_tail;
115 	bool reserved_in_use;
116 
117 	/** We track the position of the requests in the ring buffer, and
118 	 * when each is retired we increment last_retired_head as the GPU
119 	 * must have finished processing the request and so we know we
120 	 * can advance the ringbuffer up to that position.
121 	 *
122 	 * last_retired_head is set to -1 after the value is consumed so
123 	 * we can detect new retirements.
124 	 */
125 	u32 last_retired_head;
126 };
127 
128 struct	intel_context;
129 struct drm_i915_reg_descriptor;
130 
131 /*
132  * we use a single page to load ctx workarounds so all of these
133  * values are referred in terms of dwords
134  *
135  * struct i915_wa_ctx_bb:
136  *  offset: specifies batch starting position, also helpful in case
137  *    if we want to have multiple batches at different offsets based on
138  *    some criteria. It is not a requirement at the moment but provides
139  *    an option for future use.
140  *  size: size of the batch in DWORDS
141  */
142 struct  i915_ctx_workarounds {
143 	struct i915_wa_ctx_bb {
144 		u32 offset;
145 		u32 size;
146 	} indirect_ctx, per_ctx;
147 	struct drm_i915_gem_object *obj;
148 };
149 
150 struct  intel_engine_cs {
151 	const char	*name;
152 	enum intel_ring_id {
153 		RCS = 0,
154 		BCS,
155 		VCS,
156 		VCS2,	/* Keep instances of the same type engine together. */
157 		VECS
158 	} id;
159 #define I915_NUM_RINGS 5
160 #define _VCS(n) (VCS + (n))
161 	unsigned int exec_id;
162 	unsigned int guc_id;
163 	u32		mmio_base;
164 	struct		drm_device *dev;
165 	struct intel_ringbuffer *buffer;
166 	struct list_head buffers;
167 
168 	/*
169 	 * A pool of objects to use as shadow copies of client batch buffers
170 	 * when the command parser is enabled. Prevents the client from
171 	 * modifying the batch contents after software parsing.
172 	 */
173 	struct i915_gem_batch_pool batch_pool;
174 
175 	struct intel_hw_status_page status_page;
176 	struct i915_ctx_workarounds wa_ctx;
177 
178 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
179 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
180 	struct drm_i915_gem_request *trace_irq_req;
181 	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
182 	void		(*irq_put)(struct intel_engine_cs *ring);
183 
184 	int		(*init_hw)(struct intel_engine_cs *ring);
185 
186 	int		(*init_context)(struct drm_i915_gem_request *req);
187 
188 	void		(*write_tail)(struct intel_engine_cs *ring,
189 				      u32 value);
190 	int __must_check (*flush)(struct drm_i915_gem_request *req,
191 				  u32	invalidate_domains,
192 				  u32	flush_domains);
193 	int		(*add_request)(struct drm_i915_gem_request *req);
194 	/* Some chipsets are not quite as coherent as advertised and need
195 	 * an expensive kick to force a true read of the up-to-date seqno.
196 	 * However, the up-to-date seqno is not always required and the last
197 	 * seen value is good enough. Note that the seqno will always be
198 	 * monotonic, even if not coherent.
199 	 */
200 	u32		(*get_seqno)(struct intel_engine_cs *ring,
201 				     bool lazy_coherency);
202 	void		(*set_seqno)(struct intel_engine_cs *ring,
203 				     u32 seqno);
204 	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
205 					       u64 offset, u32 length,
206 					       unsigned dispatch_flags);
207 #define I915_DISPATCH_SECURE 0x1
208 #define I915_DISPATCH_PINNED 0x2
209 #define I915_DISPATCH_RS     0x4
210 	void		(*cleanup)(struct intel_engine_cs *ring);
211 
212 	/* GEN8 signal/wait table - never trust comments!
213 	 *	  signal to	signal to    signal to   signal to      signal to
214 	 *	    RCS		   VCS          BCS        VECS		 VCS2
215 	 *      --------------------------------------------------------------------
216 	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
217 	 *	|-------------------------------------------------------------------
218 	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
219 	 *	|-------------------------------------------------------------------
220 	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
221 	 *	|-------------------------------------------------------------------
222 	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
223 	 *	|-------------------------------------------------------------------
224 	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
225 	 *	|-------------------------------------------------------------------
226 	 *
227 	 * Generalization:
228 	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
229 	 *  ie. transpose of g(x, y)
230 	 *
231 	 *	 sync from	sync from    sync from    sync from	sync from
232 	 *	    RCS		   VCS          BCS        VECS		 VCS2
233 	 *      --------------------------------------------------------------------
234 	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
235 	 *	|-------------------------------------------------------------------
236 	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
237 	 *	|-------------------------------------------------------------------
238 	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
239 	 *	|-------------------------------------------------------------------
240 	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
241 	 *	|-------------------------------------------------------------------
242 	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
243 	 *	|-------------------------------------------------------------------
244 	 *
245 	 * Generalization:
246 	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
247 	 *  ie. transpose of f(x, y)
248 	 */
249 	struct {
250 		u32	sync_seqno[I915_NUM_RINGS-1];
251 
252 		union {
253 			struct {
254 				/* our mbox written by others */
255 				u32		wait[I915_NUM_RINGS];
256 				/* mboxes this ring signals to */
257 				i915_reg_t	signal[I915_NUM_RINGS];
258 			} mbox;
259 			u64		signal_ggtt[I915_NUM_RINGS];
260 		};
261 
262 		/* AKA wait() */
263 		int	(*sync_to)(struct drm_i915_gem_request *to_req,
264 				   struct intel_engine_cs *from,
265 				   u32 seqno);
266 		int	(*signal)(struct drm_i915_gem_request *signaller_req,
267 				  /* num_dwords needed by caller */
268 				  unsigned int num_dwords);
269 	} semaphore;
270 
271 	/* Execlists */
272 	struct lock execlist_lock;
273 	struct list_head execlist_queue;
274 	struct list_head execlist_retired_req_list;
275 	u8 next_context_status_buffer;
276 	bool disable_lite_restore_wa;
277 	u32 ctx_desc_template;
278 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
279 	int		(*emit_request)(struct drm_i915_gem_request *request);
280 	int		(*emit_flush)(struct drm_i915_gem_request *request,
281 				      u32 invalidate_domains,
282 				      u32 flush_domains);
283 	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
284 					 u64 offset, unsigned dispatch_flags);
285 
286 	/**
287 	 * List of objects currently involved in rendering from the
288 	 * ringbuffer.
289 	 *
290 	 * Includes buffers having the contents of their GPU caches
291 	 * flushed, not necessarily primitives.  last_read_req
292 	 * represents when the rendering involved will be completed.
293 	 *
294 	 * A reference is held on the buffer while on this list.
295 	 */
296 	struct list_head active_list;
297 
298 	/**
299 	 * List of breadcrumbs associated with GPU requests currently
300 	 * outstanding.
301 	 */
302 	struct list_head request_list;
303 
304 	/**
305 	 * Seqno of request most recently submitted to request_list.
306 	 * Used exclusively by hang checker to avoid grabbing lock while
307 	 * inspecting request list.
308 	 */
309 	u32 last_submitted_seqno;
310 
311 	bool gpu_caches_dirty;
312 
313 	wait_queue_head_t irq_queue;
314 
315 	struct intel_context *last_context;
316 
317 	struct intel_ring_hangcheck hangcheck;
318 
319 	struct {
320 		struct drm_i915_gem_object *obj;
321 		u32 gtt_offset;
322 		volatile u32 *cpu_page;
323 	} scratch;
324 
325 	bool needs_cmd_parser;
326 
327 	/*
328 	 * Table of commands the command parser needs to know about
329 	 * for this ring.
330 	 */
331 	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
332 
333 	/*
334 	 * Table of registers allowed in commands that read/write registers.
335 	 */
336 	const struct drm_i915_reg_descriptor *reg_table;
337 	int reg_count;
338 
339 	/*
340 	 * Table of registers allowed in commands that read/write registers, but
341 	 * only from the DRM master.
342 	 */
343 	const struct drm_i915_reg_descriptor *master_reg_table;
344 	int master_reg_count;
345 
346 	/*
347 	 * Returns the bitmask for the length field of the specified command.
348 	 * Return 0 for an unrecognized/invalid command.
349 	 *
350 	 * If the command parser finds an entry for a command in the ring's
351 	 * cmd_tables, it gets the command's length based on the table entry.
352 	 * If not, it calls this function to determine the per-ring length field
353 	 * encoding for the command (i.e. certain opcode ranges use certain bits
354 	 * to encode the command length in the header).
355 	 */
356 	u32 (*get_cmd_length_mask)(u32 cmd_header);
357 };
358 
359 static inline bool
360 intel_ring_initialized(struct intel_engine_cs *ring)
361 {
362 	return ring->dev != NULL;
363 }
364 
365 static inline unsigned
366 intel_ring_flag(struct intel_engine_cs *ring)
367 {
368 	return 1 << ring->id;
369 }
370 
371 static inline u32
372 intel_ring_sync_index(struct intel_engine_cs *ring,
373 		      struct intel_engine_cs *other)
374 {
375 	int idx;
376 
377 	/*
378 	 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
379 	 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
380 	 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
381 	 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
382 	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
383 	 */
384 
385 	idx = (other - ring) - 1;
386 	if (idx < 0)
387 		idx += I915_NUM_RINGS;
388 
389 	return idx;
390 }
391 
392 static inline void
393 intel_flush_status_page(struct intel_engine_cs *ring, int reg)
394 {
395 	drm_clflush_virt_range(&ring->status_page.page_addr[reg],
396 			       sizeof(uint32_t));
397 }
398 
399 static inline u32
400 intel_read_status_page(struct intel_engine_cs *ring,
401 		       int reg)
402 {
403 	/* Ensure that the compiler doesn't optimize away the load. */
404 	barrier();
405 	return ring->status_page.page_addr[reg];
406 }
407 
408 static inline void
409 intel_write_status_page(struct intel_engine_cs *ring,
410 			int reg, u32 value)
411 {
412 	ring->status_page.page_addr[reg] = value;
413 }
414 
415 /*
416  * Reads a dword out of the status page, which is written to from the command
417  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
418  * MI_STORE_DATA_IMM.
419  *
420  * The following dwords have a reserved meaning:
421  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
422  * 0x04: ring 0 head pointer
423  * 0x05: ring 1 head pointer (915-class)
424  * 0x06: ring 2 head pointer (915-class)
425  * 0x10-0x1b: Context status DWords (GM45)
426  * 0x1f: Last written status offset. (GM45)
427  * 0x20-0x2f: Reserved (Gen6+)
428  *
429  * The area from dword 0x30 to 0x3ff is available for driver usage.
430  */
431 #define I915_GEM_HWS_INDEX		0x30
432 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
433 #define I915_GEM_HWS_SCRATCH_INDEX	0x40
434 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
435 
436 struct intel_ringbuffer *
437 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
438 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
439 				     struct intel_ringbuffer *ringbuf);
440 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
441 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
442 
443 void intel_stop_ring_buffer(struct intel_engine_cs *ring);
444 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
445 
446 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
447 
448 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
449 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
450 static inline void intel_ring_emit(struct intel_engine_cs *ring,
451 				   u32 data)
452 {
453 	struct intel_ringbuffer *ringbuf = ring->buffer;
454 	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
455 	ringbuf->tail += 4;
456 }
457 static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
458 				       i915_reg_t reg)
459 {
460 	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
461 }
462 static inline void intel_ring_advance(struct intel_engine_cs *ring)
463 {
464 	struct intel_ringbuffer *ringbuf = ring->buffer;
465 	ringbuf->tail &= ringbuf->size - 1;
466 }
467 int __intel_ring_space(int head, int tail, int size);
468 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
469 int intel_ring_space(struct intel_ringbuffer *ringbuf);
470 bool intel_ring_stopped(struct intel_engine_cs *ring);
471 
472 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
473 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
474 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
475 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
476 
477 void intel_fini_pipe_control(struct intel_engine_cs *ring);
478 int intel_init_pipe_control(struct intel_engine_cs *ring);
479 
480 int intel_init_render_ring_buffer(struct drm_device *dev);
481 int intel_init_bsd_ring_buffer(struct drm_device *dev);
482 int intel_init_bsd2_ring_buffer(struct drm_device *dev);
483 int intel_init_blt_ring_buffer(struct drm_device *dev);
484 int intel_init_vebox_ring_buffer(struct drm_device *dev);
485 
486 u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
487 
488 int init_workarounds_ring(struct intel_engine_cs *ring);
489 
490 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
491 {
492 	return ringbuf->tail;
493 }
494 
495 /*
496  * Arbitrary size for largest possible 'add request' sequence. The code paths
497  * are complex and variable. Empirical measurement shows that the worst case
498  * is ILK at 136 words. Reserving too much is better than reserving too little
499  * as that allows for corner cases that might have been missed. So the figure
500  * has been rounded up to 160 words.
501  */
502 #define MIN_SPACE_FOR_ADD_REQUEST	160
503 
504 /*
505  * Reserve space in the ring to guarantee that the i915_add_request() call
506  * will always have sufficient room to do its stuff. The request creation
507  * code calls this automatically.
508  */
509 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
510 /* Cancel the reservation, e.g. because the request is being discarded. */
511 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
512 /* Use the reserved space - for use by i915_add_request() only. */
513 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
514 /* Finish with the reserved space - for use by i915_add_request() only. */
515 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
516 
517 /* Legacy ringbuffer specific portion of reservation code: */
518 int intel_ring_reserve_space(struct drm_i915_gem_request *request);
519 
520 #endif /* _INTEL_RINGBUFFER_H_ */
521