xref: /openbsd-src/sys/dev/pci/drm/i915/gt/gen7_renderclear.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1c349dbc7Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6c349dbc7Sjsg #include "gen7_renderclear.h"
7c349dbc7Sjsg #include "i915_drv.h"
8c349dbc7Sjsg #include "intel_gpu_commands.h"
91bb76ff1Sjsg #include "intel_gt_regs.h"
10c349dbc7Sjsg 
11c349dbc7Sjsg #define GT3_INLINE_DATA_DELAYS 0x1E00
12c349dbc7Sjsg #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
13c349dbc7Sjsg 
14c349dbc7Sjsg struct cb_kernel {
15c349dbc7Sjsg 	const void *data;
16c349dbc7Sjsg 	u32 size;
17c349dbc7Sjsg };
18c349dbc7Sjsg 
19c349dbc7Sjsg #define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
20c349dbc7Sjsg 
21c349dbc7Sjsg #include "ivb_clear_kernel.c"
22c349dbc7Sjsg static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
23c349dbc7Sjsg 
24c349dbc7Sjsg #include "hsw_clear_kernel.c"
25c349dbc7Sjsg static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
26c349dbc7Sjsg 
27c349dbc7Sjsg struct batch_chunk {
28c349dbc7Sjsg 	struct i915_vma *vma;
29c349dbc7Sjsg 	u32 offset;
30c349dbc7Sjsg 	u32 *start;
31c349dbc7Sjsg 	u32 *end;
32c349dbc7Sjsg 	u32 max_items;
33c349dbc7Sjsg };
34c349dbc7Sjsg 
35c349dbc7Sjsg struct batch_vals {
36ad8b1aafSjsg 	u32 max_threads;
37c349dbc7Sjsg 	u32 state_start;
38ad8b1aafSjsg 	u32 surface_start;
39c349dbc7Sjsg 	u32 surface_height;
40c349dbc7Sjsg 	u32 surface_width;
41ad8b1aafSjsg 	u32 size;
42c349dbc7Sjsg };
43c349dbc7Sjsg 
num_primitives(const struct batch_vals * bv)445ca02815Sjsg static int num_primitives(const struct batch_vals *bv)
45ad8b1aafSjsg {
46ad8b1aafSjsg 	/*
47ad8b1aafSjsg 	 * We need to saturate the GPU with work in order to dispatch
48ad8b1aafSjsg 	 * a shader on every HW thread, and clear the thread-local registers.
49ad8b1aafSjsg 	 * In short, we have to dispatch work faster than the shaders can
50ad8b1aafSjsg 	 * run in order to fill the EU and occupy each HW thread.
51ad8b1aafSjsg 	 */
52ad8b1aafSjsg 	return bv->max_threads;
53ad8b1aafSjsg }
54ad8b1aafSjsg 
55c349dbc7Sjsg static void
batch_get_defaults(struct drm_i915_private * i915,struct batch_vals * bv)56c349dbc7Sjsg batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
57c349dbc7Sjsg {
58c349dbc7Sjsg 	if (IS_HASWELL(i915)) {
59ad8b1aafSjsg 		switch (INTEL_INFO(i915)->gt) {
60ad8b1aafSjsg 		default:
61ad8b1aafSjsg 		case 1:
62ad8b1aafSjsg 			bv->max_threads = 70;
63ad8b1aafSjsg 			break;
64ad8b1aafSjsg 		case 2:
65ad8b1aafSjsg 			bv->max_threads = 140;
66ad8b1aafSjsg 			break;
67ad8b1aafSjsg 		case 3:
68ad8b1aafSjsg 			bv->max_threads = 280;
69ad8b1aafSjsg 			break;
70ad8b1aafSjsg 		}
71c349dbc7Sjsg 		bv->surface_height = 16 * 16;
72c349dbc7Sjsg 		bv->surface_width = 32 * 2 * 16;
73c349dbc7Sjsg 	} else {
74ad8b1aafSjsg 		switch (INTEL_INFO(i915)->gt) {
75ad8b1aafSjsg 		default:
76ad8b1aafSjsg 		case 1: /* including vlv */
77ad8b1aafSjsg 			bv->max_threads = 36;
78ad8b1aafSjsg 			break;
79ad8b1aafSjsg 		case 2:
80ad8b1aafSjsg 			bv->max_threads = 128;
81ad8b1aafSjsg 			break;
82ad8b1aafSjsg 		}
83c349dbc7Sjsg 		bv->surface_height = 16 * 8;
84c349dbc7Sjsg 		bv->surface_width = 32 * 16;
85c349dbc7Sjsg 	}
86ad8b1aafSjsg 	bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
87ad8b1aafSjsg 	bv->surface_start = bv->state_start + SZ_4K;
88ad8b1aafSjsg 	bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
89c349dbc7Sjsg }
90c349dbc7Sjsg 
batch_init(struct batch_chunk * bc,struct i915_vma * vma,u32 * start,u32 offset,u32 max_bytes)91c349dbc7Sjsg static void batch_init(struct batch_chunk *bc,
92c349dbc7Sjsg 		       struct i915_vma *vma,
93c349dbc7Sjsg 		       u32 *start, u32 offset, u32 max_bytes)
94c349dbc7Sjsg {
95c349dbc7Sjsg 	bc->vma = vma;
96c349dbc7Sjsg 	bc->offset = offset;
97c349dbc7Sjsg 	bc->start = start + bc->offset / sizeof(*bc->start);
98c349dbc7Sjsg 	bc->end = bc->start;
99c349dbc7Sjsg 	bc->max_items = max_bytes / sizeof(*bc->start);
100c349dbc7Sjsg }
101c349dbc7Sjsg 
batch_offset(const struct batch_chunk * bc,u32 * cs)102c349dbc7Sjsg static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
103c349dbc7Sjsg {
104c349dbc7Sjsg 	return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
105c349dbc7Sjsg }
106c349dbc7Sjsg 
batch_addr(const struct batch_chunk * bc)107c349dbc7Sjsg static u32 batch_addr(const struct batch_chunk *bc)
108c349dbc7Sjsg {
109*f005ef32Sjsg 	return i915_vma_offset(bc->vma);
110c349dbc7Sjsg }
111c349dbc7Sjsg 
batch_add(struct batch_chunk * bc,const u32 d)112c349dbc7Sjsg static void batch_add(struct batch_chunk *bc, const u32 d)
113c349dbc7Sjsg {
114c349dbc7Sjsg 	GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
115c349dbc7Sjsg 	*bc->end++ = d;
116c349dbc7Sjsg }
117c349dbc7Sjsg 
batch_alloc_items(struct batch_chunk * bc,u32 align,u32 items)118c349dbc7Sjsg static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
119c349dbc7Sjsg {
120c349dbc7Sjsg 	u32 *map;
121c349dbc7Sjsg 
122c349dbc7Sjsg 	if (align) {
123c349dbc7Sjsg 		u32 *end = PTR_ALIGN(bc->end, align);
124c349dbc7Sjsg 
125c349dbc7Sjsg 		memset32(bc->end, 0, end - bc->end);
126c349dbc7Sjsg 		bc->end = end;
127c349dbc7Sjsg 	}
128c349dbc7Sjsg 
129c349dbc7Sjsg 	map = bc->end;
130c349dbc7Sjsg 	bc->end += items;
131c349dbc7Sjsg 
132c349dbc7Sjsg 	return map;
133c349dbc7Sjsg }
134c349dbc7Sjsg 
batch_alloc_bytes(struct batch_chunk * bc,u32 align,u32 bytes)135c349dbc7Sjsg static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
136c349dbc7Sjsg {
137c349dbc7Sjsg 	GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
138c349dbc7Sjsg 	return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
139c349dbc7Sjsg }
140c349dbc7Sjsg 
141c349dbc7Sjsg static u32
gen7_fill_surface_state(struct batch_chunk * state,const u32 dst_offset,const struct batch_vals * bv)142c349dbc7Sjsg gen7_fill_surface_state(struct batch_chunk *state,
143c349dbc7Sjsg 			const u32 dst_offset,
144c349dbc7Sjsg 			const struct batch_vals *bv)
145c349dbc7Sjsg {
146c349dbc7Sjsg 	u32 surface_h = bv->surface_height;
147c349dbc7Sjsg 	u32 surface_w = bv->surface_width;
148c349dbc7Sjsg 	u32 *cs = batch_alloc_items(state, 32, 8);
149c349dbc7Sjsg 	u32 offset = batch_offset(state, cs);
150c349dbc7Sjsg 
151c349dbc7Sjsg #define SURFACE_2D 1
152c349dbc7Sjsg #define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
153c349dbc7Sjsg #define RENDER_CACHE_READ_WRITE 1
154c349dbc7Sjsg 
155c349dbc7Sjsg 	*cs++ = SURFACE_2D << 29 |
156c349dbc7Sjsg 		(SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
157c349dbc7Sjsg 		(RENDER_CACHE_READ_WRITE << 8);
158c349dbc7Sjsg 
159c349dbc7Sjsg 	*cs++ = batch_addr(state) + dst_offset;
160c349dbc7Sjsg 
161c349dbc7Sjsg 	*cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
162c349dbc7Sjsg 	*cs++ = surface_w;
163c349dbc7Sjsg 	*cs++ = 0;
164c349dbc7Sjsg 	*cs++ = 0;
165c349dbc7Sjsg 	*cs++ = 0;
166c349dbc7Sjsg #define SHADER_CHANNELS(r, g, b, a) \
167c349dbc7Sjsg 	(((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
168c349dbc7Sjsg 	*cs++ = SHADER_CHANNELS(4, 5, 6, 7);
169c349dbc7Sjsg 	batch_advance(state, cs);
170c349dbc7Sjsg 
171c349dbc7Sjsg 	return offset;
172c349dbc7Sjsg }
173c349dbc7Sjsg 
174c349dbc7Sjsg static u32
gen7_fill_binding_table(struct batch_chunk * state,const struct batch_vals * bv)175c349dbc7Sjsg gen7_fill_binding_table(struct batch_chunk *state,
176c349dbc7Sjsg 			const struct batch_vals *bv)
177c349dbc7Sjsg {
178ad8b1aafSjsg 	u32 surface_start =
179ad8b1aafSjsg 		gen7_fill_surface_state(state, bv->surface_start, bv);
180c349dbc7Sjsg 	u32 *cs = batch_alloc_items(state, 32, 8);
181c349dbc7Sjsg 	u32 offset = batch_offset(state, cs);
182c349dbc7Sjsg 
183c349dbc7Sjsg 	*cs++ = surface_start - state->offset;
184c349dbc7Sjsg 	*cs++ = 0;
185c349dbc7Sjsg 	*cs++ = 0;
186c349dbc7Sjsg 	*cs++ = 0;
187c349dbc7Sjsg 	*cs++ = 0;
188c349dbc7Sjsg 	*cs++ = 0;
189c349dbc7Sjsg 	*cs++ = 0;
190c349dbc7Sjsg 	*cs++ = 0;
191c349dbc7Sjsg 	batch_advance(state, cs);
192c349dbc7Sjsg 
193c349dbc7Sjsg 	return offset;
194c349dbc7Sjsg }
195c349dbc7Sjsg 
196c349dbc7Sjsg static u32
gen7_fill_kernel_data(struct batch_chunk * state,const u32 * data,const u32 size)197c349dbc7Sjsg gen7_fill_kernel_data(struct batch_chunk *state,
198c349dbc7Sjsg 		      const u32 *data,
199c349dbc7Sjsg 		      const u32 size)
200c349dbc7Sjsg {
201c349dbc7Sjsg 	return batch_offset(state,
202c349dbc7Sjsg 			    memcpy(batch_alloc_bytes(state, 64, size),
203c349dbc7Sjsg 				   data, size));
204c349dbc7Sjsg }
205c349dbc7Sjsg 
206c349dbc7Sjsg static u32
gen7_fill_interface_descriptor(struct batch_chunk * state,const struct batch_vals * bv,const struct cb_kernel * kernel,unsigned int count)207c349dbc7Sjsg gen7_fill_interface_descriptor(struct batch_chunk *state,
208c349dbc7Sjsg 			       const struct batch_vals *bv,
209c349dbc7Sjsg 			       const struct cb_kernel *kernel,
210c349dbc7Sjsg 			       unsigned int count)
211c349dbc7Sjsg {
212c349dbc7Sjsg 	u32 kernel_offset =
213c349dbc7Sjsg 		gen7_fill_kernel_data(state, kernel->data, kernel->size);
214c349dbc7Sjsg 	u32 binding_table = gen7_fill_binding_table(state, bv);
215c349dbc7Sjsg 	u32 *cs = batch_alloc_items(state, 32, 8 * count);
216c349dbc7Sjsg 	u32 offset = batch_offset(state, cs);
217c349dbc7Sjsg 
218c349dbc7Sjsg 	*cs++ = kernel_offset;
219c349dbc7Sjsg 	*cs++ = (1 << 7) | (1 << 13);
220c349dbc7Sjsg 	*cs++ = 0;
221c349dbc7Sjsg 	*cs++ = (binding_table - state->offset) | 1;
222c349dbc7Sjsg 	*cs++ = 0;
223c349dbc7Sjsg 	*cs++ = 0;
224c349dbc7Sjsg 	*cs++ = 0;
225c349dbc7Sjsg 	*cs++ = 0;
226c349dbc7Sjsg 
227c349dbc7Sjsg 	/* 1 - 63dummy idds */
228c349dbc7Sjsg 	memset32(cs, 0x00, (count - 1) * 8);
229c349dbc7Sjsg 	batch_advance(state, cs + (count - 1) * 8);
230c349dbc7Sjsg 
231c349dbc7Sjsg 	return offset;
232c349dbc7Sjsg }
233c349dbc7Sjsg 
234c349dbc7Sjsg static void
gen7_emit_state_base_address(struct batch_chunk * batch,u32 surface_state_base)235c349dbc7Sjsg gen7_emit_state_base_address(struct batch_chunk *batch,
236c349dbc7Sjsg 			     u32 surface_state_base)
237c349dbc7Sjsg {
238ad8b1aafSjsg 	u32 *cs = batch_alloc_items(batch, 0, 10);
239c349dbc7Sjsg 
240ad8b1aafSjsg 	*cs++ = STATE_BASE_ADDRESS | (10 - 2);
241c349dbc7Sjsg 	/* general */
242c349dbc7Sjsg 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
243c349dbc7Sjsg 	/* surface */
244ad8b1aafSjsg 	*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
245c349dbc7Sjsg 	/* dynamic */
246c349dbc7Sjsg 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
247c349dbc7Sjsg 	/* indirect */
248c349dbc7Sjsg 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
249c349dbc7Sjsg 	/* instruction */
250c349dbc7Sjsg 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
251c349dbc7Sjsg 
252c349dbc7Sjsg 	/* general/dynamic/indirect/instruction access Bound */
253c349dbc7Sjsg 	*cs++ = 0;
254c349dbc7Sjsg 	*cs++ = BASE_ADDRESS_MODIFY;
255c349dbc7Sjsg 	*cs++ = 0;
256c349dbc7Sjsg 	*cs++ = BASE_ADDRESS_MODIFY;
257c349dbc7Sjsg 	batch_advance(batch, cs);
258c349dbc7Sjsg }
259c349dbc7Sjsg 
260c349dbc7Sjsg static void
gen7_emit_vfe_state(struct batch_chunk * batch,const struct batch_vals * bv,u32 urb_size,u32 curbe_size,u32 mode)261c349dbc7Sjsg gen7_emit_vfe_state(struct batch_chunk *batch,
262c349dbc7Sjsg 		    const struct batch_vals *bv,
263c349dbc7Sjsg 		    u32 urb_size, u32 curbe_size,
264c349dbc7Sjsg 		    u32 mode)
265c349dbc7Sjsg {
266ad8b1aafSjsg 	u32 threads = bv->max_threads - 1;
267c349dbc7Sjsg 	u32 *cs = batch_alloc_items(batch, 32, 8);
268c349dbc7Sjsg 
269c349dbc7Sjsg 	*cs++ = MEDIA_VFE_STATE | (8 - 2);
270c349dbc7Sjsg 
271c349dbc7Sjsg 	/* scratch buffer */
272c349dbc7Sjsg 	*cs++ = 0;
273c349dbc7Sjsg 
274c349dbc7Sjsg 	/* number of threads & urb entries for GPGPU vs Media Mode */
275ad8b1aafSjsg 	*cs++ = threads << 16 | 1 << 8 | mode << 2;
276c349dbc7Sjsg 
277c349dbc7Sjsg 	*cs++ = 0;
278c349dbc7Sjsg 
279c349dbc7Sjsg 	/* urb entry size & curbe size in 256 bits unit */
280c349dbc7Sjsg 	*cs++ = urb_size << 16 | curbe_size;
281c349dbc7Sjsg 
282c349dbc7Sjsg 	/* scoreboard */
283c349dbc7Sjsg 	*cs++ = 0;
284c349dbc7Sjsg 	*cs++ = 0;
285c349dbc7Sjsg 	*cs++ = 0;
286c349dbc7Sjsg 	batch_advance(batch, cs);
287c349dbc7Sjsg }
288c349dbc7Sjsg 
289c349dbc7Sjsg static void
gen7_emit_interface_descriptor_load(struct batch_chunk * batch,const u32 interface_descriptor,unsigned int count)290c349dbc7Sjsg gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
291c349dbc7Sjsg 				    const u32 interface_descriptor,
292c349dbc7Sjsg 				    unsigned int count)
293c349dbc7Sjsg {
294c349dbc7Sjsg 	u32 *cs = batch_alloc_items(batch, 8, 4);
295c349dbc7Sjsg 
296c349dbc7Sjsg 	*cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
297c349dbc7Sjsg 	*cs++ = 0;
298c349dbc7Sjsg 	*cs++ = count * 8 * sizeof(*cs);
299c349dbc7Sjsg 
300c349dbc7Sjsg 	/*
301c349dbc7Sjsg 	 * interface descriptor address - it is relative to the dynamics base
302c349dbc7Sjsg 	 * address
303c349dbc7Sjsg 	 */
304c349dbc7Sjsg 	*cs++ = interface_descriptor;
305c349dbc7Sjsg 	batch_advance(batch, cs);
306c349dbc7Sjsg }
307c349dbc7Sjsg 
308c349dbc7Sjsg static void
gen7_emit_media_object(struct batch_chunk * batch,unsigned int media_object_index)309c349dbc7Sjsg gen7_emit_media_object(struct batch_chunk *batch,
310c349dbc7Sjsg 		       unsigned int media_object_index)
311c349dbc7Sjsg {
312c349dbc7Sjsg 	unsigned int x_offset = (media_object_index % 16) * 64;
313c349dbc7Sjsg 	unsigned int y_offset = (media_object_index / 16) * 16;
314ad8b1aafSjsg 	unsigned int pkt = 6 + 3;
315c349dbc7Sjsg 	u32 *cs;
316c349dbc7Sjsg 
317ad8b1aafSjsg 	cs = batch_alloc_items(batch, 8, pkt);
318c349dbc7Sjsg 
319ad8b1aafSjsg 	*cs++ = MEDIA_OBJECT | (pkt - 2);
320c349dbc7Sjsg 
321c349dbc7Sjsg 	/* interface descriptor offset */
322c349dbc7Sjsg 	*cs++ = 0;
323c349dbc7Sjsg 
324c349dbc7Sjsg 	/* without indirect data */
325c349dbc7Sjsg 	*cs++ = 0;
326c349dbc7Sjsg 	*cs++ = 0;
327c349dbc7Sjsg 
328c349dbc7Sjsg 	/* scoreboard */
329c349dbc7Sjsg 	*cs++ = 0;
330c349dbc7Sjsg 	*cs++ = 0;
331c349dbc7Sjsg 
332c349dbc7Sjsg 	/* inline */
333ad8b1aafSjsg 	*cs++ = y_offset << 16 | x_offset;
334c349dbc7Sjsg 	*cs++ = 0;
335c349dbc7Sjsg 	*cs++ = GT3_INLINE_DATA_DELAYS;
336c349dbc7Sjsg 
337c349dbc7Sjsg 	batch_advance(batch, cs);
338c349dbc7Sjsg }
339c349dbc7Sjsg 
gen7_emit_pipeline_flush(struct batch_chunk * batch)340c349dbc7Sjsg static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
341c349dbc7Sjsg {
342ad8b1aafSjsg 	u32 *cs = batch_alloc_items(batch, 0, 4);
343ad8b1aafSjsg 
344ad8b1aafSjsg 	*cs++ = GFX_OP_PIPE_CONTROL(4);
345ad8b1aafSjsg 	*cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
346ad8b1aafSjsg 		PIPE_CONTROL_DEPTH_CACHE_FLUSH |
347ad8b1aafSjsg 		PIPE_CONTROL_DC_FLUSH_ENABLE |
348ad8b1aafSjsg 		PIPE_CONTROL_CS_STALL;
349ad8b1aafSjsg 	*cs++ = 0;
350ad8b1aafSjsg 	*cs++ = 0;
351ad8b1aafSjsg 
352ad8b1aafSjsg 	batch_advance(batch, cs);
353ad8b1aafSjsg }
354ad8b1aafSjsg 
gen7_emit_pipeline_invalidate(struct batch_chunk * batch)355ad8b1aafSjsg static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
356ad8b1aafSjsg {
357ad8b1aafSjsg 	u32 *cs = batch_alloc_items(batch, 0, 10);
358ad8b1aafSjsg 
359ad8b1aafSjsg 	/* ivb: Stall before STATE_CACHE_INVALIDATE */
360ad8b1aafSjsg 	*cs++ = GFX_OP_PIPE_CONTROL(5);
361ad8b1aafSjsg 	*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
362ad8b1aafSjsg 		PIPE_CONTROL_CS_STALL;
363ad8b1aafSjsg 	*cs++ = 0;
364ad8b1aafSjsg 	*cs++ = 0;
365ad8b1aafSjsg 	*cs++ = 0;
366c349dbc7Sjsg 
367c349dbc7Sjsg 	*cs++ = GFX_OP_PIPE_CONTROL(5);
368ad8b1aafSjsg 	*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
369c349dbc7Sjsg 	*cs++ = 0;
370c349dbc7Sjsg 	*cs++ = 0;
371c349dbc7Sjsg 	*cs++ = 0;
372ad8b1aafSjsg 
373c349dbc7Sjsg 	batch_advance(batch, cs);
374c349dbc7Sjsg }
375c349dbc7Sjsg 
emit_batch(struct i915_vma * const vma,u32 * start,const struct batch_vals * bv)376c349dbc7Sjsg static void emit_batch(struct i915_vma * const vma,
377c349dbc7Sjsg 		       u32 *start,
378c349dbc7Sjsg 		       const struct batch_vals *bv)
379c349dbc7Sjsg {
380c349dbc7Sjsg 	struct drm_i915_private *i915 = vma->vm->i915;
381ad8b1aafSjsg 	const unsigned int desc_count = 1;
382ad8b1aafSjsg 	const unsigned int urb_size = 1;
383c349dbc7Sjsg 	struct batch_chunk cmds, state;
384ad8b1aafSjsg 	u32 descriptors;
385c349dbc7Sjsg 	unsigned int i;
386c349dbc7Sjsg 
387ad8b1aafSjsg 	batch_init(&cmds, vma, start, 0, bv->state_start);
388ad8b1aafSjsg 	batch_init(&state, vma, start, bv->state_start, SZ_4K);
389c349dbc7Sjsg 
390ad8b1aafSjsg 	descriptors = gen7_fill_interface_descriptor(&state, bv,
391c349dbc7Sjsg 						     IS_HASWELL(i915) ?
392c349dbc7Sjsg 						     &cb_kernel_hsw :
393c349dbc7Sjsg 						     &cb_kernel_ivb,
394c349dbc7Sjsg 						     desc_count);
395ad8b1aafSjsg 
396ad8b1aafSjsg 	/* Reset inherited context registers */
397c349dbc7Sjsg 	gen7_emit_pipeline_flush(&cmds);
398ad8b1aafSjsg 	gen7_emit_pipeline_invalidate(&cmds);
399ad8b1aafSjsg 	batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
400ad8b1aafSjsg 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
401ad8b1aafSjsg 	batch_add(&cmds, 0xffff0000 |
402ad8b1aafSjsg 			((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
403ad8b1aafSjsg 			 HIZ_RAW_STALL_OPT_DISABLE :
404ad8b1aafSjsg 			 0));
405ad8b1aafSjsg 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
406ad8b1aafSjsg 	batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
407ad8b1aafSjsg 	gen7_emit_pipeline_invalidate(&cmds);
408ad8b1aafSjsg 	gen7_emit_pipeline_flush(&cmds);
409ad8b1aafSjsg 
410ad8b1aafSjsg 	/* Switch to the media pipeline and our base address */
411ad8b1aafSjsg 	gen7_emit_pipeline_invalidate(&cmds);
412c349dbc7Sjsg 	batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
413c349dbc7Sjsg 	batch_add(&cmds, MI_NOOP);
414ad8b1aafSjsg 	gen7_emit_pipeline_invalidate(&cmds);
415ad8b1aafSjsg 
416c349dbc7Sjsg 	gen7_emit_pipeline_flush(&cmds);
417ad8b1aafSjsg 	gen7_emit_state_base_address(&cmds, descriptors);
418ad8b1aafSjsg 	gen7_emit_pipeline_invalidate(&cmds);
419c349dbc7Sjsg 
420ad8b1aafSjsg 	/* Set the clear-residual kernel state */
421c349dbc7Sjsg 	gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
422ad8b1aafSjsg 	gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
423c349dbc7Sjsg 
424ad8b1aafSjsg 	/* Execute the kernel on all HW threads */
425ad8b1aafSjsg 	for (i = 0; i < num_primitives(bv); i++)
426c349dbc7Sjsg 		gen7_emit_media_object(&cmds, i);
427c349dbc7Sjsg 
428c349dbc7Sjsg 	batch_add(&cmds, MI_BATCH_BUFFER_END);
429c349dbc7Sjsg }
430c349dbc7Sjsg 
gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,struct i915_vma * const vma)431c349dbc7Sjsg int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
432c349dbc7Sjsg 			    struct i915_vma * const vma)
433c349dbc7Sjsg {
434c349dbc7Sjsg 	struct batch_vals bv;
435c349dbc7Sjsg 	u32 *batch;
436c349dbc7Sjsg 
437c349dbc7Sjsg 	batch_get_defaults(engine->i915, &bv);
438c349dbc7Sjsg 	if (!vma)
439ad8b1aafSjsg 		return bv.size;
440c349dbc7Sjsg 
441ad8b1aafSjsg 	GEM_BUG_ON(vma->obj->base.size < bv.size);
442c349dbc7Sjsg 
443c349dbc7Sjsg 	batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
444c349dbc7Sjsg 	if (IS_ERR(batch))
445c349dbc7Sjsg 		return PTR_ERR(batch);
446c349dbc7Sjsg 
447ad8b1aafSjsg 	emit_batch(vma, memset(batch, 0, bv.size), &bv);
448c349dbc7Sjsg 
449c349dbc7Sjsg 	i915_gem_object_flush_map(vma->obj);
450ad8b1aafSjsg 	__i915_gem_object_release_map(vma->obj);
451c349dbc7Sjsg 
452c349dbc7Sjsg 	return 0;
453c349dbc7Sjsg }
454