xref: /openbsd-src/sys/dev/pci/drm/i915/gvt/handlers.c (revision cc7c299056f99e7af56a945ec499f8804f85a826)
1c349dbc7Sjsg /*
2c349dbc7Sjsg  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3c349dbc7Sjsg  *
4c349dbc7Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5c349dbc7Sjsg  * copy of this software and associated documentation files (the "Software"),
6c349dbc7Sjsg  * to deal in the Software without restriction, including without limitation
7c349dbc7Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8c349dbc7Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9c349dbc7Sjsg  * Software is furnished to do so, subject to the following conditions:
10c349dbc7Sjsg  *
11c349dbc7Sjsg  * The above copyright notice and this permission notice (including the next
12c349dbc7Sjsg  * paragraph) shall be included in all copies or substantial portions of the
13c349dbc7Sjsg  * Software.
14c349dbc7Sjsg  *
15c349dbc7Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16c349dbc7Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17c349dbc7Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18c349dbc7Sjsg  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19c349dbc7Sjsg  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20c349dbc7Sjsg  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21c349dbc7Sjsg  * SOFTWARE.
22c349dbc7Sjsg  *
23c349dbc7Sjsg  * Authors:
24c349dbc7Sjsg  *    Kevin Tian <kevin.tian@intel.com>
25c349dbc7Sjsg  *    Eddie Dong <eddie.dong@intel.com>
26c349dbc7Sjsg  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27c349dbc7Sjsg  *
28c349dbc7Sjsg  * Contributors:
29c349dbc7Sjsg  *    Min He <min.he@intel.com>
30c349dbc7Sjsg  *    Tina Zhang <tina.zhang@intel.com>
31c349dbc7Sjsg  *    Pei Zhang <pei.zhang@intel.com>
32c349dbc7Sjsg  *    Niu Bing <bing.niu@intel.com>
33c349dbc7Sjsg  *    Ping Gao <ping.a.gao@intel.com>
34c349dbc7Sjsg  *    Zhi Wang <zhi.a.wang@intel.com>
35c349dbc7Sjsg  *
36c349dbc7Sjsg 
37c349dbc7Sjsg  */
38c349dbc7Sjsg 
39c349dbc7Sjsg #include "i915_drv.h"
401bb76ff1Sjsg #include "i915_reg.h"
41c349dbc7Sjsg #include "gvt.h"
42c349dbc7Sjsg #include "i915_pvinfo.h"
431bb76ff1Sjsg #include "intel_mchbar_regs.h"
445ca02815Sjsg #include "display/intel_display_types.h"
451bb76ff1Sjsg #include "display/intel_dmc_regs.h"
46f005ef32Sjsg #include "display/intel_dp_aux_regs.h"
47f005ef32Sjsg #include "display/intel_dpio_phy.h"
481bb76ff1Sjsg #include "display/intel_fbc.h"
49f005ef32Sjsg #include "display/intel_fdi_regs.h"
50f005ef32Sjsg #include "display/intel_pps_regs.h"
51f005ef32Sjsg #include "display/intel_psr_regs.h"
52f005ef32Sjsg #include "display/skl_watermark_regs.h"
531bb76ff1Sjsg #include "display/vlv_dsi_pll_regs.h"
541bb76ff1Sjsg #include "gt/intel_gt_regs.h"
55c349dbc7Sjsg 
56c349dbc7Sjsg /* XXX FIXME i915 has changed PP_XXX definition */
57c349dbc7Sjsg #define PCH_PP_STATUS  _MMIO(0xc7200)
58c349dbc7Sjsg #define PCH_PP_CONTROL _MMIO(0xc7204)
59c349dbc7Sjsg #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
60c349dbc7Sjsg #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
61c349dbc7Sjsg #define PCH_PP_DIVISOR _MMIO(0xc7210)
62c349dbc7Sjsg 
intel_gvt_get_device_type(struct intel_gvt * gvt)63c349dbc7Sjsg unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
64c349dbc7Sjsg {
65c349dbc7Sjsg 	struct drm_i915_private *i915 = gvt->gt->i915;
66c349dbc7Sjsg 
67c349dbc7Sjsg 	if (IS_BROADWELL(i915))
68c349dbc7Sjsg 		return D_BDW;
69c349dbc7Sjsg 	else if (IS_SKYLAKE(i915))
70c349dbc7Sjsg 		return D_SKL;
71c349dbc7Sjsg 	else if (IS_KABYLAKE(i915))
72c349dbc7Sjsg 		return D_KBL;
73c349dbc7Sjsg 	else if (IS_BROXTON(i915))
74c349dbc7Sjsg 		return D_BXT;
75ad8b1aafSjsg 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
76c349dbc7Sjsg 		return D_CFL;
77c349dbc7Sjsg 
78c349dbc7Sjsg 	return 0;
79c349dbc7Sjsg }
80c349dbc7Sjsg 
intel_gvt_match_device(struct intel_gvt * gvt,unsigned long device)811bb76ff1Sjsg static bool intel_gvt_match_device(struct intel_gvt *gvt,
82c349dbc7Sjsg 		unsigned long device)
83c349dbc7Sjsg {
84c349dbc7Sjsg 	return intel_gvt_get_device_type(gvt) & device;
85c349dbc7Sjsg }
86c349dbc7Sjsg 
read_vreg(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)87c349dbc7Sjsg static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
88c349dbc7Sjsg 	void *p_data, unsigned int bytes)
89c349dbc7Sjsg {
90c349dbc7Sjsg 	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
91c349dbc7Sjsg }
92c349dbc7Sjsg 
write_vreg(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)93c349dbc7Sjsg static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
94c349dbc7Sjsg 	void *p_data, unsigned int bytes)
95c349dbc7Sjsg {
96c349dbc7Sjsg 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
97c349dbc7Sjsg }
98c349dbc7Sjsg 
intel_gvt_find_mmio_info(struct intel_gvt * gvt,unsigned int offset)995ca02815Sjsg struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
100c349dbc7Sjsg 						  unsigned int offset)
101c349dbc7Sjsg {
102c349dbc7Sjsg 	struct intel_gvt_mmio_info *e;
103c349dbc7Sjsg 
104c349dbc7Sjsg 	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
105c349dbc7Sjsg 		if (e->offset == offset)
106c349dbc7Sjsg 			return e;
107c349dbc7Sjsg 	}
108c349dbc7Sjsg 	return NULL;
109c349dbc7Sjsg }
110c349dbc7Sjsg 
setup_mmio_info(struct intel_gvt * gvt,u32 offset,u32 size,u16 flags,u32 addr_mask,u32 ro_mask,u32 device,gvt_mmio_func read,gvt_mmio_func write)1111bb76ff1Sjsg static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
1121bb76ff1Sjsg 			   u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
113c349dbc7Sjsg 			   gvt_mmio_func read, gvt_mmio_func write)
114c349dbc7Sjsg {
1151bb76ff1Sjsg 	struct intel_gvt_mmio_info *p;
116c349dbc7Sjsg 	u32 start, end, i;
117c349dbc7Sjsg 
118c349dbc7Sjsg 	if (!intel_gvt_match_device(gvt, device))
119c349dbc7Sjsg 		return 0;
120c349dbc7Sjsg 
121c349dbc7Sjsg 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
122c349dbc7Sjsg 		return -EINVAL;
123c349dbc7Sjsg 
124c349dbc7Sjsg 	start = offset;
125c349dbc7Sjsg 	end = offset + size;
126c349dbc7Sjsg 
127c349dbc7Sjsg 	for (i = start; i < end; i += 4) {
1281bb76ff1Sjsg 		p = intel_gvt_find_mmio_info(gvt, i);
1291bb76ff1Sjsg 		if (!p) {
1301bb76ff1Sjsg 			WARN(1, "assign a handler to a non-tracked mmio %x\n",
1311bb76ff1Sjsg 				i);
1321bb76ff1Sjsg 			return -ENODEV;
133c349dbc7Sjsg 		}
1341bb76ff1Sjsg 		p->ro_mask = ro_mask;
1351bb76ff1Sjsg 		gvt->mmio.mmio_attribute[i / 4] = flags;
1361bb76ff1Sjsg 		if (read)
1371bb76ff1Sjsg 			p->read = read;
1381bb76ff1Sjsg 		if (write)
1391bb76ff1Sjsg 			p->write = write;
140c349dbc7Sjsg 	}
141c349dbc7Sjsg 	return 0;
142c349dbc7Sjsg }
143c349dbc7Sjsg 
144c349dbc7Sjsg /**
145c349dbc7Sjsg  * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
146c349dbc7Sjsg  * @gvt: a GVT device
147c349dbc7Sjsg  * @offset: register offset
148c349dbc7Sjsg  *
149c349dbc7Sjsg  * Returns:
150c349dbc7Sjsg  * The engine containing the offset within its mmio page.
151c349dbc7Sjsg  */
152c349dbc7Sjsg const struct intel_engine_cs *
intel_gvt_render_mmio_to_engine(struct intel_gvt * gvt,unsigned int offset)153c349dbc7Sjsg intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
154c349dbc7Sjsg {
155c349dbc7Sjsg 	struct intel_engine_cs *engine;
156c349dbc7Sjsg 	enum intel_engine_id id;
157c349dbc7Sjsg 
158c349dbc7Sjsg 	offset &= ~GENMASK(11, 0);
159c349dbc7Sjsg 	for_each_engine(engine, gvt->gt, id)
160c349dbc7Sjsg 		if (engine->mmio_base == offset)
161c349dbc7Sjsg 			return engine;
162c349dbc7Sjsg 
163c349dbc7Sjsg 	return NULL;
164c349dbc7Sjsg }
165c349dbc7Sjsg 
166c349dbc7Sjsg #define offset_to_fence_num(offset) \
167c349dbc7Sjsg 	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
168c349dbc7Sjsg 
169c349dbc7Sjsg #define fence_num_to_offset(num) \
170c349dbc7Sjsg 	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
171c349dbc7Sjsg 
172c349dbc7Sjsg 
enter_failsafe_mode(struct intel_vgpu * vgpu,int reason)173c349dbc7Sjsg void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
174c349dbc7Sjsg {
175c349dbc7Sjsg 	switch (reason) {
176c349dbc7Sjsg 	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
177c349dbc7Sjsg 		pr_err("Detected your guest driver doesn't support GVT-g.\n");
178c349dbc7Sjsg 		break;
179c349dbc7Sjsg 	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
180c349dbc7Sjsg 		pr_err("Graphics resource is not enough for the guest\n");
181c349dbc7Sjsg 		break;
182c349dbc7Sjsg 	case GVT_FAILSAFE_GUEST_ERR:
183c349dbc7Sjsg 		pr_err("GVT Internal error  for the guest\n");
184c349dbc7Sjsg 		break;
185c349dbc7Sjsg 	default:
186c349dbc7Sjsg 		break;
187c349dbc7Sjsg 	}
188c349dbc7Sjsg 	pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
189c349dbc7Sjsg 	vgpu->failsafe = true;
190c349dbc7Sjsg }
191c349dbc7Sjsg 
sanitize_fence_mmio_access(struct intel_vgpu * vgpu,unsigned int fence_num,void * p_data,unsigned int bytes)192c349dbc7Sjsg static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
193c349dbc7Sjsg 		unsigned int fence_num, void *p_data, unsigned int bytes)
194c349dbc7Sjsg {
195c349dbc7Sjsg 	unsigned int max_fence = vgpu_fence_sz(vgpu);
196c349dbc7Sjsg 
197c349dbc7Sjsg 	if (fence_num >= max_fence) {
198c349dbc7Sjsg 		gvt_vgpu_err("access oob fence reg %d/%d\n",
199c349dbc7Sjsg 			     fence_num, max_fence);
200c349dbc7Sjsg 
201c349dbc7Sjsg 		/* When guest access oob fence regs without access
202c349dbc7Sjsg 		 * pv_info first, we treat guest not supporting GVT,
203c349dbc7Sjsg 		 * and we will let vgpu enter failsafe mode.
204c349dbc7Sjsg 		 */
205c349dbc7Sjsg 		if (!vgpu->pv_notified)
206c349dbc7Sjsg 			enter_failsafe_mode(vgpu,
207c349dbc7Sjsg 					GVT_FAILSAFE_UNSUPPORTED_GUEST);
208c349dbc7Sjsg 
209c349dbc7Sjsg 		memset(p_data, 0, bytes);
210c349dbc7Sjsg 		return -EINVAL;
211c349dbc7Sjsg 	}
212c349dbc7Sjsg 	return 0;
213c349dbc7Sjsg }
214c349dbc7Sjsg 
gamw_echo_dev_rw_ia_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)215c349dbc7Sjsg static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
216c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
217c349dbc7Sjsg {
218c349dbc7Sjsg 	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
219c349dbc7Sjsg 
2205ca02815Sjsg 	if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
221c349dbc7Sjsg 		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
222c349dbc7Sjsg 			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
223c349dbc7Sjsg 		else if (!ips)
224c349dbc7Sjsg 			gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
225c349dbc7Sjsg 		else {
226c349dbc7Sjsg 			/* All engines must be enabled together for vGPU,
227c349dbc7Sjsg 			 * since we don't know which engine the ppgtt will
228c349dbc7Sjsg 			 * bind to when shadowing.
229c349dbc7Sjsg 			 */
230c349dbc7Sjsg 			gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
231c349dbc7Sjsg 				     ips);
232c349dbc7Sjsg 			return -EINVAL;
233c349dbc7Sjsg 		}
234c349dbc7Sjsg 	}
235c349dbc7Sjsg 
236c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
237c349dbc7Sjsg 	return 0;
238c349dbc7Sjsg }
239c349dbc7Sjsg 
fence_mmio_read(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)240c349dbc7Sjsg static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
241c349dbc7Sjsg 		void *p_data, unsigned int bytes)
242c349dbc7Sjsg {
243c349dbc7Sjsg 	int ret;
244c349dbc7Sjsg 
245c349dbc7Sjsg 	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
246c349dbc7Sjsg 			p_data, bytes);
247c349dbc7Sjsg 	if (ret)
248c349dbc7Sjsg 		return ret;
249c349dbc7Sjsg 	read_vreg(vgpu, off, p_data, bytes);
250c349dbc7Sjsg 	return 0;
251c349dbc7Sjsg }
252c349dbc7Sjsg 
fence_mmio_write(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)253c349dbc7Sjsg static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
254c349dbc7Sjsg 		void *p_data, unsigned int bytes)
255c349dbc7Sjsg {
256c349dbc7Sjsg 	struct intel_gvt *gvt = vgpu->gvt;
257c349dbc7Sjsg 	unsigned int fence_num = offset_to_fence_num(off);
258c349dbc7Sjsg 	int ret;
259c349dbc7Sjsg 
260c349dbc7Sjsg 	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
261c349dbc7Sjsg 	if (ret)
262c349dbc7Sjsg 		return ret;
263c349dbc7Sjsg 	write_vreg(vgpu, off, p_data, bytes);
264c349dbc7Sjsg 
265c349dbc7Sjsg 	mmio_hw_access_pre(gvt->gt);
266c349dbc7Sjsg 	intel_vgpu_write_fence(vgpu, fence_num,
267c349dbc7Sjsg 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
268c349dbc7Sjsg 	mmio_hw_access_post(gvt->gt);
269c349dbc7Sjsg 	return 0;
270c349dbc7Sjsg }
271c349dbc7Sjsg 
272c349dbc7Sjsg #define CALC_MODE_MASK_REG(old, new) \
273c349dbc7Sjsg 	(((new) & GENMASK(31, 16)) \
274c349dbc7Sjsg 	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
275c349dbc7Sjsg 	 | ((new) & ((new) >> 16))))
276c349dbc7Sjsg 
mul_force_wake_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)277c349dbc7Sjsg static int mul_force_wake_write(struct intel_vgpu *vgpu,
278c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
279c349dbc7Sjsg {
280c349dbc7Sjsg 	u32 old, new;
281c349dbc7Sjsg 	u32 ack_reg_offset;
282c349dbc7Sjsg 
283c349dbc7Sjsg 	old = vgpu_vreg(vgpu, offset);
284c349dbc7Sjsg 	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
285c349dbc7Sjsg 
2865ca02815Sjsg 	if (GRAPHICS_VER(vgpu->gvt->gt->i915)  >=  9) {
287c349dbc7Sjsg 		switch (offset) {
288c349dbc7Sjsg 		case FORCEWAKE_RENDER_GEN9_REG:
289c349dbc7Sjsg 			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
290c349dbc7Sjsg 			break;
2915ca02815Sjsg 		case FORCEWAKE_GT_GEN9_REG:
2925ca02815Sjsg 			ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
293c349dbc7Sjsg 			break;
294c349dbc7Sjsg 		case FORCEWAKE_MEDIA_GEN9_REG:
295c349dbc7Sjsg 			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
296c349dbc7Sjsg 			break;
297c349dbc7Sjsg 		default:
298c349dbc7Sjsg 			/*should not hit here*/
299c349dbc7Sjsg 			gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
300c349dbc7Sjsg 			return -EINVAL;
301c349dbc7Sjsg 		}
302c349dbc7Sjsg 	} else {
303c349dbc7Sjsg 		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
304c349dbc7Sjsg 	}
305c349dbc7Sjsg 
306c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = new;
307c349dbc7Sjsg 	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
308c349dbc7Sjsg 	return 0;
309c349dbc7Sjsg }
310c349dbc7Sjsg 
gdrst_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)311c349dbc7Sjsg static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
312c349dbc7Sjsg 			    void *p_data, unsigned int bytes)
313c349dbc7Sjsg {
314c349dbc7Sjsg 	intel_engine_mask_t engine_mask = 0;
315c349dbc7Sjsg 	u32 data;
316c349dbc7Sjsg 
317c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
318c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
319c349dbc7Sjsg 
320c349dbc7Sjsg 	if (data & GEN6_GRDOM_FULL) {
321c349dbc7Sjsg 		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
322c349dbc7Sjsg 		engine_mask = ALL_ENGINES;
323c349dbc7Sjsg 	} else {
324c349dbc7Sjsg 		if (data & GEN6_GRDOM_RENDER) {
325c349dbc7Sjsg 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
326c349dbc7Sjsg 			engine_mask |= BIT(RCS0);
327c349dbc7Sjsg 		}
328c349dbc7Sjsg 		if (data & GEN6_GRDOM_MEDIA) {
329c349dbc7Sjsg 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
330c349dbc7Sjsg 			engine_mask |= BIT(VCS0);
331c349dbc7Sjsg 		}
332c349dbc7Sjsg 		if (data & GEN6_GRDOM_BLT) {
333c349dbc7Sjsg 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
334c349dbc7Sjsg 			engine_mask |= BIT(BCS0);
335c349dbc7Sjsg 		}
336c349dbc7Sjsg 		if (data & GEN6_GRDOM_VECS) {
337c349dbc7Sjsg 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
338c349dbc7Sjsg 			engine_mask |= BIT(VECS0);
339c349dbc7Sjsg 		}
340c349dbc7Sjsg 		if (data & GEN8_GRDOM_MEDIA2) {
341c349dbc7Sjsg 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
342c349dbc7Sjsg 			engine_mask |= BIT(VCS1);
343c349dbc7Sjsg 		}
344c349dbc7Sjsg 		if (data & GEN9_GRDOM_GUC) {
345c349dbc7Sjsg 			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
346c349dbc7Sjsg 			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
347c349dbc7Sjsg 		}
348ad8b1aafSjsg 		engine_mask &= vgpu->gvt->gt->info.engine_mask;
349c349dbc7Sjsg 	}
350c349dbc7Sjsg 
351c349dbc7Sjsg 	/* vgpu_lock already hold by emulate mmio r/w */
352c349dbc7Sjsg 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
353c349dbc7Sjsg 
354c349dbc7Sjsg 	/* sw will wait for the device to ack the reset request */
355c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = 0;
356c349dbc7Sjsg 
357c349dbc7Sjsg 	return 0;
358c349dbc7Sjsg }
359c349dbc7Sjsg 
gmbus_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)360c349dbc7Sjsg static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
361c349dbc7Sjsg 		void *p_data, unsigned int bytes)
362c349dbc7Sjsg {
363c349dbc7Sjsg 	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
364c349dbc7Sjsg }
365c349dbc7Sjsg 
gmbus_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)366c349dbc7Sjsg static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
367c349dbc7Sjsg 		void *p_data, unsigned int bytes)
368c349dbc7Sjsg {
369c349dbc7Sjsg 	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
370c349dbc7Sjsg }
371c349dbc7Sjsg 
pch_pp_control_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)372c349dbc7Sjsg static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
373c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
374c349dbc7Sjsg {
375c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
376c349dbc7Sjsg 
377c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
378c349dbc7Sjsg 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
379c349dbc7Sjsg 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
380c349dbc7Sjsg 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
381c349dbc7Sjsg 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
382c349dbc7Sjsg 
383c349dbc7Sjsg 	} else
384c349dbc7Sjsg 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
385c349dbc7Sjsg 			~(PP_ON | PP_SEQUENCE_POWER_DOWN
386c349dbc7Sjsg 					| PP_CYCLE_DELAY_ACTIVE);
387c349dbc7Sjsg 	return 0;
388c349dbc7Sjsg }
389c349dbc7Sjsg 
transconf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)390c349dbc7Sjsg static int transconf_mmio_write(struct intel_vgpu *vgpu,
391c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
392c349dbc7Sjsg {
393c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
394c349dbc7Sjsg 
395c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
396c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
397c349dbc7Sjsg 	else
398c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
399c349dbc7Sjsg 	return 0;
400c349dbc7Sjsg }
401c349dbc7Sjsg 
lcpll_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)402c349dbc7Sjsg static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
403c349dbc7Sjsg 		void *p_data, unsigned int bytes)
404c349dbc7Sjsg {
405c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
406c349dbc7Sjsg 
407c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
408c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
409c349dbc7Sjsg 	else
410c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
411c349dbc7Sjsg 
412c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
413c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
414c349dbc7Sjsg 	else
415c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
416c349dbc7Sjsg 
417c349dbc7Sjsg 	return 0;
418c349dbc7Sjsg }
419c349dbc7Sjsg 
dpy_reg_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)420c349dbc7Sjsg static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
421c349dbc7Sjsg 		void *p_data, unsigned int bytes)
422c349dbc7Sjsg {
423c349dbc7Sjsg 	switch (offset) {
424c349dbc7Sjsg 	case 0xe651c:
425c349dbc7Sjsg 	case 0xe661c:
426c349dbc7Sjsg 	case 0xe671c:
427c349dbc7Sjsg 	case 0xe681c:
428c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) = 1 << 17;
429c349dbc7Sjsg 		break;
430c349dbc7Sjsg 	case 0xe6c04:
431c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) = 0x3;
432c349dbc7Sjsg 		break;
433c349dbc7Sjsg 	case 0xe6e1c:
434c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) = 0x2f << 16;
435c349dbc7Sjsg 		break;
436c349dbc7Sjsg 	default:
437c349dbc7Sjsg 		return -EINVAL;
438c349dbc7Sjsg 	}
439c349dbc7Sjsg 
440c349dbc7Sjsg 	read_vreg(vgpu, offset, p_data, bytes);
441c349dbc7Sjsg 	return 0;
442c349dbc7Sjsg }
443c349dbc7Sjsg 
4445ca02815Sjsg /*
4455ca02815Sjsg  * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
4465ca02815Sjsg  *   TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
4475ca02815Sjsg  *   setup_virtual_dp_monitor().
4485ca02815Sjsg  * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
4495ca02815Sjsg  *   DPLL. Later guest driver may setup a different DPLLx when setting mode.
4505ca02815Sjsg  * So the correct sequence to find DP stream clock is:
4515ca02815Sjsg  *   Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
4525ca02815Sjsg  *   Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
4535ca02815Sjsg  * Then Refresh rate then can be calculated based on follow equations:
4545ca02815Sjsg  *   Pixel clock = h_total * v_total * refresh_rate
4555ca02815Sjsg  *   stream clock = Pixel clock
4565ca02815Sjsg  *   ls_clk = DP bitrate
4575ca02815Sjsg  *   Link M/N = strm_clk / ls_clk
4585ca02815Sjsg  */
4595ca02815Sjsg 
bdw_vgpu_get_dp_bitrate(struct intel_vgpu * vgpu,enum port port)4605ca02815Sjsg static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
4615ca02815Sjsg {
4625ca02815Sjsg 	u32 dp_br = 0;
4635ca02815Sjsg 	u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
4645ca02815Sjsg 
4655ca02815Sjsg 	switch (ddi_pll_sel) {
4665ca02815Sjsg 	case PORT_CLK_SEL_LCPLL_2700:
4675ca02815Sjsg 		dp_br = 270000 * 2;
4685ca02815Sjsg 		break;
4695ca02815Sjsg 	case PORT_CLK_SEL_LCPLL_1350:
4705ca02815Sjsg 		dp_br = 135000 * 2;
4715ca02815Sjsg 		break;
4725ca02815Sjsg 	case PORT_CLK_SEL_LCPLL_810:
4735ca02815Sjsg 		dp_br = 81000 * 2;
4745ca02815Sjsg 		break;
4755ca02815Sjsg 	case PORT_CLK_SEL_SPLL:
4765ca02815Sjsg 	{
4775ca02815Sjsg 		switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
4785ca02815Sjsg 		case SPLL_FREQ_810MHz:
4795ca02815Sjsg 			dp_br = 81000 * 2;
4805ca02815Sjsg 			break;
4815ca02815Sjsg 		case SPLL_FREQ_1350MHz:
4825ca02815Sjsg 			dp_br = 135000 * 2;
4835ca02815Sjsg 			break;
4845ca02815Sjsg 		case SPLL_FREQ_2700MHz:
4855ca02815Sjsg 			dp_br = 270000 * 2;
4865ca02815Sjsg 			break;
4875ca02815Sjsg 		default:
4885ca02815Sjsg 			gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
4895ca02815Sjsg 				    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
4905ca02815Sjsg 			break;
4915ca02815Sjsg 		}
4925ca02815Sjsg 		break;
4935ca02815Sjsg 	}
4945ca02815Sjsg 	case PORT_CLK_SEL_WRPLL1:
4955ca02815Sjsg 	case PORT_CLK_SEL_WRPLL2:
4965ca02815Sjsg 	{
4975ca02815Sjsg 		u32 wrpll_ctl;
4985ca02815Sjsg 		int refclk, n, p, r;
4995ca02815Sjsg 
5005ca02815Sjsg 		if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
5015ca02815Sjsg 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
5025ca02815Sjsg 		else
5035ca02815Sjsg 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
5045ca02815Sjsg 
5055ca02815Sjsg 		switch (wrpll_ctl & WRPLL_REF_MASK) {
5065ca02815Sjsg 		case WRPLL_REF_PCH_SSC:
5071bb76ff1Sjsg 			refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
5085ca02815Sjsg 			break;
5095ca02815Sjsg 		case WRPLL_REF_LCPLL:
5105ca02815Sjsg 			refclk = 2700000;
5115ca02815Sjsg 			break;
5125ca02815Sjsg 		default:
5135ca02815Sjsg 			gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
5145ca02815Sjsg 				    vgpu->id, port_name(port), wrpll_ctl);
5155ca02815Sjsg 			goto out;
5165ca02815Sjsg 		}
5175ca02815Sjsg 
5185ca02815Sjsg 		r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
5195ca02815Sjsg 		p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
5205ca02815Sjsg 		n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
5215ca02815Sjsg 
5225ca02815Sjsg 		dp_br = (refclk * n / 10) / (p * r) * 2;
5235ca02815Sjsg 		break;
5245ca02815Sjsg 	}
5255ca02815Sjsg 	default:
5265ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
5275ca02815Sjsg 			    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
5285ca02815Sjsg 		break;
5295ca02815Sjsg 	}
5305ca02815Sjsg 
5315ca02815Sjsg out:
5325ca02815Sjsg 	return dp_br;
5335ca02815Sjsg }
5345ca02815Sjsg 
bxt_vgpu_get_dp_bitrate(struct intel_vgpu * vgpu,enum port port)5355ca02815Sjsg static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
5365ca02815Sjsg {
5375ca02815Sjsg 	u32 dp_br = 0;
5381bb76ff1Sjsg 	int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
5395ca02815Sjsg 	enum dpio_phy phy = DPIO_PHY0;
5405ca02815Sjsg 	enum dpio_channel ch = DPIO_CH0;
5415ca02815Sjsg 	struct dpll clock = {0};
5425ca02815Sjsg 	u32 temp;
5435ca02815Sjsg 
5445ca02815Sjsg 	/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
5455ca02815Sjsg 	switch (port) {
5465ca02815Sjsg 	case PORT_A:
5475ca02815Sjsg 		phy = DPIO_PHY1;
5485ca02815Sjsg 		ch = DPIO_CH0;
5495ca02815Sjsg 		break;
5505ca02815Sjsg 	case PORT_B:
5515ca02815Sjsg 		phy = DPIO_PHY0;
5525ca02815Sjsg 		ch = DPIO_CH0;
5535ca02815Sjsg 		break;
5545ca02815Sjsg 	case PORT_C:
5555ca02815Sjsg 		phy = DPIO_PHY0;
5565ca02815Sjsg 		ch = DPIO_CH1;
5575ca02815Sjsg 		break;
5585ca02815Sjsg 	default:
5595ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
5605ca02815Sjsg 		goto out;
5615ca02815Sjsg 	}
5625ca02815Sjsg 
5635ca02815Sjsg 	temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
5645ca02815Sjsg 	if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
5655ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
5665ca02815Sjsg 			    vgpu->id, port_name(port), temp);
5675ca02815Sjsg 		goto out;
5685ca02815Sjsg 	}
5695ca02815Sjsg 
5705ca02815Sjsg 	clock.m1 = 2;
5711bb76ff1Sjsg 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
5721bb76ff1Sjsg 				 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
5735ca02815Sjsg 	if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
5741bb76ff1Sjsg 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
5751bb76ff1Sjsg 					  vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
5761bb76ff1Sjsg 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
5771bb76ff1Sjsg 				vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
5781bb76ff1Sjsg 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
5791bb76ff1Sjsg 				 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
5801bb76ff1Sjsg 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
5811bb76ff1Sjsg 				 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
5825ca02815Sjsg 	clock.m = clock.m1 * clock.m2;
5831bb76ff1Sjsg 	clock.p = clock.p1 * clock.p2 * 5;
5845ca02815Sjsg 
5855ca02815Sjsg 	if (clock.n == 0 || clock.p == 0) {
5865ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
5875ca02815Sjsg 		goto out;
5885ca02815Sjsg 	}
5895ca02815Sjsg 
5905ca02815Sjsg 	clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
5915ca02815Sjsg 	clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
5925ca02815Sjsg 
5931bb76ff1Sjsg 	dp_br = clock.dot;
5945ca02815Sjsg 
5955ca02815Sjsg out:
5965ca02815Sjsg 	return dp_br;
5975ca02815Sjsg }
5985ca02815Sjsg 
skl_vgpu_get_dp_bitrate(struct intel_vgpu * vgpu,enum port port)5995ca02815Sjsg static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
6005ca02815Sjsg {
6015ca02815Sjsg 	u32 dp_br = 0;
6025ca02815Sjsg 	enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
6035ca02815Sjsg 
6045ca02815Sjsg 	/* Find the enabled DPLL for the DDI/PORT */
6055ca02815Sjsg 	if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
6065ca02815Sjsg 	    (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
6075ca02815Sjsg 		dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
6085ca02815Sjsg 			DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
6095ca02815Sjsg 			DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
6105ca02815Sjsg 	} else {
6115ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
6125ca02815Sjsg 			    vgpu->id, port_name(port));
6135ca02815Sjsg 		return dp_br;
6145ca02815Sjsg 	}
6155ca02815Sjsg 
6165ca02815Sjsg 	/* Find PLL output frequency from correct DPLL, and get bir rate */
6175ca02815Sjsg 	switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
6185ca02815Sjsg 		DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
6195ca02815Sjsg 		DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
6205ca02815Sjsg 		case DPLL_CTRL1_LINK_RATE_810:
6215ca02815Sjsg 			dp_br = 81000 * 2;
6225ca02815Sjsg 			break;
6235ca02815Sjsg 		case DPLL_CTRL1_LINK_RATE_1080:
6245ca02815Sjsg 			dp_br = 108000 * 2;
6255ca02815Sjsg 			break;
6265ca02815Sjsg 		case DPLL_CTRL1_LINK_RATE_1350:
6275ca02815Sjsg 			dp_br = 135000 * 2;
6285ca02815Sjsg 			break;
6295ca02815Sjsg 		case DPLL_CTRL1_LINK_RATE_1620:
6305ca02815Sjsg 			dp_br = 162000 * 2;
6315ca02815Sjsg 			break;
6325ca02815Sjsg 		case DPLL_CTRL1_LINK_RATE_2160:
6335ca02815Sjsg 			dp_br = 216000 * 2;
6345ca02815Sjsg 			break;
6355ca02815Sjsg 		case DPLL_CTRL1_LINK_RATE_2700:
6365ca02815Sjsg 			dp_br = 270000 * 2;
6375ca02815Sjsg 			break;
6385ca02815Sjsg 		default:
6395ca02815Sjsg 			dp_br = 0;
6405ca02815Sjsg 			gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
6415ca02815Sjsg 				    vgpu->id, port_name(port), dpll_id);
6425ca02815Sjsg 	}
6435ca02815Sjsg 
6445ca02815Sjsg 	return dp_br;
6455ca02815Sjsg }
6465ca02815Sjsg 
vgpu_update_refresh_rate(struct intel_vgpu * vgpu)6475ca02815Sjsg static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
6485ca02815Sjsg {
6495ca02815Sjsg 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
6505ca02815Sjsg 	enum port port;
6515ca02815Sjsg 	u32 dp_br, link_m, link_n, htotal, vtotal;
6525ca02815Sjsg 
6535ca02815Sjsg 	/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
6545ca02815Sjsg 	port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
6555ca02815Sjsg 		TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
6565ca02815Sjsg 	if (port != PORT_B && port != PORT_D) {
6575ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
6585ca02815Sjsg 		return;
6595ca02815Sjsg 	}
6605ca02815Sjsg 
6615ca02815Sjsg 	/* Calculate DP bitrate from PLL */
6625ca02815Sjsg 	if (IS_BROADWELL(dev_priv))
6635ca02815Sjsg 		dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
6645ca02815Sjsg 	else if (IS_BROXTON(dev_priv))
6655ca02815Sjsg 		dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
6665ca02815Sjsg 	else
6675ca02815Sjsg 		dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
6685ca02815Sjsg 
6695ca02815Sjsg 	/* Get DP link symbol clock M/N */
6705ca02815Sjsg 	link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
6715ca02815Sjsg 	link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
6725ca02815Sjsg 
6735ca02815Sjsg 	/* Get H/V total from transcoder timing */
674f005ef32Sjsg 	htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
675f005ef32Sjsg 	vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
6765ca02815Sjsg 
6775ca02815Sjsg 	if (dp_br && link_n && htotal && vtotal) {
6785ca02815Sjsg 		u64 pixel_clk = 0;
6795ca02815Sjsg 		u32 new_rate = 0;
6805ca02815Sjsg 		u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
6815ca02815Sjsg 
6825ca02815Sjsg 		/* Calcuate pixel clock by (ls_clk * M / N) */
6835ca02815Sjsg 		pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
6845ca02815Sjsg 		pixel_clk *= MSEC_PER_SEC;
6855ca02815Sjsg 
6865ca02815Sjsg 		/* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
6875ca02815Sjsg 		new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
6885ca02815Sjsg 
6895ca02815Sjsg 		if (*old_rate != new_rate)
6905ca02815Sjsg 			*old_rate = new_rate;
6915ca02815Sjsg 
6925ca02815Sjsg 		gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
6935ca02815Sjsg 			    vgpu->id, pipe_name(PIPE_A), new_rate);
6945ca02815Sjsg 	}
6955ca02815Sjsg }
6965ca02815Sjsg 
pipeconf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)697c349dbc7Sjsg static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
698c349dbc7Sjsg 		void *p_data, unsigned int bytes)
699c349dbc7Sjsg {
700c349dbc7Sjsg 	u32 data;
701c349dbc7Sjsg 
702c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
703c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
704c349dbc7Sjsg 
705f005ef32Sjsg 	if (data & TRANSCONF_ENABLE) {
706f005ef32Sjsg 		vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
7075ca02815Sjsg 		vgpu_update_refresh_rate(vgpu);
7085ca02815Sjsg 		vgpu_update_vblank_emulation(vgpu, true);
7095ca02815Sjsg 	} else {
710f005ef32Sjsg 		vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
7115ca02815Sjsg 		vgpu_update_vblank_emulation(vgpu, false);
7125ca02815Sjsg 	}
713c349dbc7Sjsg 	return 0;
714c349dbc7Sjsg }
715c349dbc7Sjsg 
716c349dbc7Sjsg /* sorted in ascending order */
717c349dbc7Sjsg static i915_reg_t force_nonpriv_white_list[] = {
718c349dbc7Sjsg 	_MMIO(0xd80),
719c349dbc7Sjsg 	GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
720c349dbc7Sjsg 	GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
721c349dbc7Sjsg 	CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
722c349dbc7Sjsg 	PS_INVOCATION_COUNT, //_MMIO(0x2348)
723c349dbc7Sjsg 	PS_DEPTH_COUNT, //_MMIO(0x2350)
724c349dbc7Sjsg 	GEN8_CS_CHICKEN1,//_MMIO(0x2580)
725c349dbc7Sjsg 	_MMIO(0x2690),
726c349dbc7Sjsg 	_MMIO(0x2694),
727c349dbc7Sjsg 	_MMIO(0x2698),
728c349dbc7Sjsg 	_MMIO(0x2754),
729c349dbc7Sjsg 	_MMIO(0x28a0),
730c349dbc7Sjsg 	_MMIO(0x4de0),
731c349dbc7Sjsg 	_MMIO(0x4de4),
732c349dbc7Sjsg 	_MMIO(0x4dfc),
733c349dbc7Sjsg 	GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
734c349dbc7Sjsg 	_MMIO(0x7014),
735c349dbc7Sjsg 	HDC_CHICKEN0,//_MMIO(0x7300)
736c349dbc7Sjsg 	GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
737c349dbc7Sjsg 	_MMIO(0x7700),
738c349dbc7Sjsg 	_MMIO(0x7704),
739c349dbc7Sjsg 	_MMIO(0x7708),
740c349dbc7Sjsg 	_MMIO(0x770c),
741c349dbc7Sjsg 	_MMIO(0x83a8),
742c349dbc7Sjsg 	_MMIO(0xb110),
743f005ef32Sjsg 	_MMIO(0xb118),
744c349dbc7Sjsg 	_MMIO(0xe100),
745c349dbc7Sjsg 	_MMIO(0xe18c),
746c349dbc7Sjsg 	_MMIO(0xe48c),
747c349dbc7Sjsg 	_MMIO(0xe5f4),
748c349dbc7Sjsg 	_MMIO(0x64844),
749c349dbc7Sjsg };
750c349dbc7Sjsg 
751c349dbc7Sjsg /* a simple bsearch */
in_whitelist(u32 reg)752c349dbc7Sjsg static inline bool in_whitelist(u32 reg)
753c349dbc7Sjsg {
754c349dbc7Sjsg 	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
755c349dbc7Sjsg 	i915_reg_t *array = force_nonpriv_white_list;
756c349dbc7Sjsg 
757c349dbc7Sjsg 	while (left < right) {
758c349dbc7Sjsg 		int mid = (left + right)/2;
759c349dbc7Sjsg 
760c349dbc7Sjsg 		if (reg > array[mid].reg)
761c349dbc7Sjsg 			left = mid + 1;
762c349dbc7Sjsg 		else if (reg < array[mid].reg)
763c349dbc7Sjsg 			right = mid;
764c349dbc7Sjsg 		else
765c349dbc7Sjsg 			return true;
766c349dbc7Sjsg 	}
767c349dbc7Sjsg 	return false;
768c349dbc7Sjsg }
769c349dbc7Sjsg 
force_nonpriv_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)770c349dbc7Sjsg static int force_nonpriv_write(struct intel_vgpu *vgpu,
771c349dbc7Sjsg 	unsigned int offset, void *p_data, unsigned int bytes)
772c349dbc7Sjsg {
773c349dbc7Sjsg 	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
774c349dbc7Sjsg 	const struct intel_engine_cs *engine =
775c349dbc7Sjsg 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
776c349dbc7Sjsg 
777c349dbc7Sjsg 	if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
778c349dbc7Sjsg 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
779c349dbc7Sjsg 			vgpu->id, offset, bytes);
780c349dbc7Sjsg 		return -EINVAL;
781c349dbc7Sjsg 	}
782c349dbc7Sjsg 
783c349dbc7Sjsg 	if (!in_whitelist(reg_nonpriv) &&
784c349dbc7Sjsg 	    reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
785c349dbc7Sjsg 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
786c349dbc7Sjsg 			vgpu->id, reg_nonpriv, offset);
787c349dbc7Sjsg 	} else
788c349dbc7Sjsg 		intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
789c349dbc7Sjsg 
790c349dbc7Sjsg 	return 0;
791c349dbc7Sjsg }
792c349dbc7Sjsg 
ddi_buf_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)793c349dbc7Sjsg static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
794c349dbc7Sjsg 		void *p_data, unsigned int bytes)
795c349dbc7Sjsg {
796c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
797c349dbc7Sjsg 
798c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
799c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
800c349dbc7Sjsg 	} else {
801c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
802c349dbc7Sjsg 		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
803c349dbc7Sjsg 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
804c349dbc7Sjsg 				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
805c349dbc7Sjsg 	}
806c349dbc7Sjsg 	return 0;
807c349dbc7Sjsg }
808c349dbc7Sjsg 
fdi_rx_iir_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)809c349dbc7Sjsg static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
810c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
811c349dbc7Sjsg {
812c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
813c349dbc7Sjsg 	return 0;
814c349dbc7Sjsg }
815c349dbc7Sjsg 
816c349dbc7Sjsg #define FDI_LINK_TRAIN_PATTERN1         0
817c349dbc7Sjsg #define FDI_LINK_TRAIN_PATTERN2         1
818c349dbc7Sjsg 
fdi_auto_training_started(struct intel_vgpu * vgpu)819c349dbc7Sjsg static int fdi_auto_training_started(struct intel_vgpu *vgpu)
820c349dbc7Sjsg {
821c349dbc7Sjsg 	u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
822c349dbc7Sjsg 	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
823c349dbc7Sjsg 	u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
824c349dbc7Sjsg 
825c349dbc7Sjsg 	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
826c349dbc7Sjsg 			(rx_ctl & FDI_RX_ENABLE) &&
827c349dbc7Sjsg 			(rx_ctl & FDI_AUTO_TRAINING) &&
828c349dbc7Sjsg 			(tx_ctl & DP_TP_CTL_ENABLE) &&
829c349dbc7Sjsg 			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
830c349dbc7Sjsg 		return 1;
831c349dbc7Sjsg 	else
832c349dbc7Sjsg 		return 0;
833c349dbc7Sjsg }
834c349dbc7Sjsg 
check_fdi_rx_train_status(struct intel_vgpu * vgpu,enum pipe pipe,unsigned int train_pattern)835c349dbc7Sjsg static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
836c349dbc7Sjsg 		enum pipe pipe, unsigned int train_pattern)
837c349dbc7Sjsg {
838c349dbc7Sjsg 	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
839c349dbc7Sjsg 	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
840c349dbc7Sjsg 	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
841c349dbc7Sjsg 	unsigned int fdi_iir_check_bits;
842c349dbc7Sjsg 
843c349dbc7Sjsg 	fdi_rx_imr = FDI_RX_IMR(pipe);
844c349dbc7Sjsg 	fdi_tx_ctl = FDI_TX_CTL(pipe);
845c349dbc7Sjsg 	fdi_rx_ctl = FDI_RX_CTL(pipe);
846c349dbc7Sjsg 
847c349dbc7Sjsg 	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
848c349dbc7Sjsg 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
849c349dbc7Sjsg 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
850c349dbc7Sjsg 		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
851c349dbc7Sjsg 	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
852c349dbc7Sjsg 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
853c349dbc7Sjsg 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
854c349dbc7Sjsg 		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
855c349dbc7Sjsg 	} else {
856c349dbc7Sjsg 		gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
857c349dbc7Sjsg 		return -EINVAL;
858c349dbc7Sjsg 	}
859c349dbc7Sjsg 
860c349dbc7Sjsg 	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
861c349dbc7Sjsg 	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
862c349dbc7Sjsg 
863c349dbc7Sjsg 	/* If imr bit has been masked */
864c349dbc7Sjsg 	if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
865c349dbc7Sjsg 		return 0;
866c349dbc7Sjsg 
867c349dbc7Sjsg 	if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
868c349dbc7Sjsg 			== fdi_tx_check_bits)
869c349dbc7Sjsg 		&& ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
870c349dbc7Sjsg 			== fdi_rx_check_bits))
871c349dbc7Sjsg 		return 1;
872c349dbc7Sjsg 	else
873c349dbc7Sjsg 		return 0;
874c349dbc7Sjsg }
875c349dbc7Sjsg 
876c349dbc7Sjsg #define INVALID_INDEX (~0U)
877c349dbc7Sjsg 
calc_index(unsigned int offset,unsigned int start,unsigned int next,unsigned int end,i915_reg_t i915_end)878c349dbc7Sjsg static unsigned int calc_index(unsigned int offset, unsigned int start,
879c349dbc7Sjsg 	unsigned int next, unsigned int end, i915_reg_t i915_end)
880c349dbc7Sjsg {
881c349dbc7Sjsg 	unsigned int range = next - start;
882c349dbc7Sjsg 
883c349dbc7Sjsg 	if (!end)
884c349dbc7Sjsg 		end = i915_mmio_reg_offset(i915_end);
885c349dbc7Sjsg 	if (offset < start || offset > end)
886c349dbc7Sjsg 		return INVALID_INDEX;
887c349dbc7Sjsg 	offset -= start;
888c349dbc7Sjsg 	return offset / range;
889c349dbc7Sjsg }
890c349dbc7Sjsg 
891c349dbc7Sjsg #define FDI_RX_CTL_TO_PIPE(offset) \
892c349dbc7Sjsg 	calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
893c349dbc7Sjsg 
894c349dbc7Sjsg #define FDI_TX_CTL_TO_PIPE(offset) \
895c349dbc7Sjsg 	calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
896c349dbc7Sjsg 
897c349dbc7Sjsg #define FDI_RX_IMR_TO_PIPE(offset) \
898c349dbc7Sjsg 	calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
899c349dbc7Sjsg 
update_fdi_rx_iir_status(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)900c349dbc7Sjsg static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
901c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
902c349dbc7Sjsg {
903c349dbc7Sjsg 	i915_reg_t fdi_rx_iir;
904c349dbc7Sjsg 	unsigned int index;
905c349dbc7Sjsg 	int ret;
906c349dbc7Sjsg 
907c349dbc7Sjsg 	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
908c349dbc7Sjsg 		index = FDI_RX_CTL_TO_PIPE(offset);
909c349dbc7Sjsg 	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
910c349dbc7Sjsg 		index = FDI_TX_CTL_TO_PIPE(offset);
911c349dbc7Sjsg 	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
912c349dbc7Sjsg 		index = FDI_RX_IMR_TO_PIPE(offset);
913c349dbc7Sjsg 	else {
914b676b14dSjsg 		gvt_vgpu_err("Unsupported registers %x\n", offset);
915c349dbc7Sjsg 		return -EINVAL;
916c349dbc7Sjsg 	}
917c349dbc7Sjsg 
918c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
919c349dbc7Sjsg 
920c349dbc7Sjsg 	fdi_rx_iir = FDI_RX_IIR(index);
921c349dbc7Sjsg 
922c349dbc7Sjsg 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
923c349dbc7Sjsg 	if (ret < 0)
924c349dbc7Sjsg 		return ret;
925c349dbc7Sjsg 	if (ret)
926c349dbc7Sjsg 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
927c349dbc7Sjsg 
928c349dbc7Sjsg 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
929c349dbc7Sjsg 	if (ret < 0)
930c349dbc7Sjsg 		return ret;
931c349dbc7Sjsg 	if (ret)
932c349dbc7Sjsg 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
933c349dbc7Sjsg 
934c349dbc7Sjsg 	if (offset == _FDI_RXA_CTL)
935c349dbc7Sjsg 		if (fdi_auto_training_started(vgpu))
936c349dbc7Sjsg 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
937c349dbc7Sjsg 				DP_TP_STATUS_AUTOTRAIN_DONE;
938c349dbc7Sjsg 	return 0;
939c349dbc7Sjsg }
940c349dbc7Sjsg 
941c349dbc7Sjsg #define DP_TP_CTL_TO_PORT(offset) \
942c349dbc7Sjsg 	calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
943c349dbc7Sjsg 
dp_tp_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)944c349dbc7Sjsg static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
945c349dbc7Sjsg 		void *p_data, unsigned int bytes)
946c349dbc7Sjsg {
947c349dbc7Sjsg 	i915_reg_t status_reg;
948c349dbc7Sjsg 	unsigned int index;
949c349dbc7Sjsg 	u32 data;
950c349dbc7Sjsg 
951c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
952c349dbc7Sjsg 
953c349dbc7Sjsg 	index = DP_TP_CTL_TO_PORT(offset);
954c349dbc7Sjsg 	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
955c349dbc7Sjsg 	if (data == 0x2) {
956c349dbc7Sjsg 		status_reg = DP_TP_STATUS(index);
957c349dbc7Sjsg 		vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
958c349dbc7Sjsg 	}
959c349dbc7Sjsg 	return 0;
960c349dbc7Sjsg }
961c349dbc7Sjsg 
dp_tp_status_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)962c349dbc7Sjsg static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
963c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
964c349dbc7Sjsg {
965c349dbc7Sjsg 	u32 reg_val;
966c349dbc7Sjsg 	u32 sticky_mask;
967c349dbc7Sjsg 
968c349dbc7Sjsg 	reg_val = *((u32 *)p_data);
969c349dbc7Sjsg 	sticky_mask = GENMASK(27, 26) | (1 << 24);
970c349dbc7Sjsg 
971c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
972c349dbc7Sjsg 		(vgpu_vreg(vgpu, offset) & sticky_mask);
973c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
974c349dbc7Sjsg 	return 0;
975c349dbc7Sjsg }
976c349dbc7Sjsg 
pch_adpa_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)977c349dbc7Sjsg static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
978c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
979c349dbc7Sjsg {
980c349dbc7Sjsg 	u32 data;
981c349dbc7Sjsg 
982c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
983c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
984c349dbc7Sjsg 
985c349dbc7Sjsg 	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
986c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
987c349dbc7Sjsg 	return 0;
988c349dbc7Sjsg }
989c349dbc7Sjsg 
south_chicken2_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)990c349dbc7Sjsg static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
991c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
992c349dbc7Sjsg {
993c349dbc7Sjsg 	u32 data;
994c349dbc7Sjsg 
995c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
996c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
997c349dbc7Sjsg 
998c349dbc7Sjsg 	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
999c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1000c349dbc7Sjsg 	else
1001c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1002c349dbc7Sjsg 	return 0;
1003c349dbc7Sjsg }
1004c349dbc7Sjsg 
1005c349dbc7Sjsg #define DSPSURF_TO_PIPE(offset) \
1006c349dbc7Sjsg 	calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
1007c349dbc7Sjsg 
pri_surf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1008c349dbc7Sjsg static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1009c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1010c349dbc7Sjsg {
1011c349dbc7Sjsg 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1012c349dbc7Sjsg 	u32 pipe = DSPSURF_TO_PIPE(offset);
1013c349dbc7Sjsg 	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1014c349dbc7Sjsg 
1015c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1016c349dbc7Sjsg 	vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1017c349dbc7Sjsg 
1018c349dbc7Sjsg 	vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1019c349dbc7Sjsg 
1020c349dbc7Sjsg 	if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
1021c349dbc7Sjsg 		intel_vgpu_trigger_virtual_event(vgpu, event);
1022c349dbc7Sjsg 	else
1023c349dbc7Sjsg 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1024c349dbc7Sjsg 
1025c349dbc7Sjsg 	return 0;
1026c349dbc7Sjsg }
1027c349dbc7Sjsg 
1028c349dbc7Sjsg #define SPRSURF_TO_PIPE(offset) \
1029c349dbc7Sjsg 	calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
1030c349dbc7Sjsg 
spr_surf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1031c349dbc7Sjsg static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1032c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1033c349dbc7Sjsg {
1034c349dbc7Sjsg 	u32 pipe = SPRSURF_TO_PIPE(offset);
1035c349dbc7Sjsg 	int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1036c349dbc7Sjsg 
1037c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1038c349dbc7Sjsg 	vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1039c349dbc7Sjsg 
1040c349dbc7Sjsg 	if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1041c349dbc7Sjsg 		intel_vgpu_trigger_virtual_event(vgpu, event);
1042c349dbc7Sjsg 	else
1043c349dbc7Sjsg 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1044c349dbc7Sjsg 
1045c349dbc7Sjsg 	return 0;
1046c349dbc7Sjsg }
1047c349dbc7Sjsg 
reg50080_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1048c349dbc7Sjsg static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1049c349dbc7Sjsg 			       unsigned int offset, void *p_data,
1050c349dbc7Sjsg 			       unsigned int bytes)
1051c349dbc7Sjsg {
1052c349dbc7Sjsg 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1053c349dbc7Sjsg 	enum pipe pipe = REG_50080_TO_PIPE(offset);
1054c349dbc7Sjsg 	enum plane_id plane = REG_50080_TO_PLANE(offset);
1055c349dbc7Sjsg 	int event = SKL_FLIP_EVENT(pipe, plane);
1056c349dbc7Sjsg 
1057c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1058c349dbc7Sjsg 	if (plane == PLANE_PRIMARY) {
1059c349dbc7Sjsg 		vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1060c349dbc7Sjsg 		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1061c349dbc7Sjsg 	} else {
1062c349dbc7Sjsg 		vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1063c349dbc7Sjsg 	}
1064c349dbc7Sjsg 
1065c349dbc7Sjsg 	if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1066c349dbc7Sjsg 		intel_vgpu_trigger_virtual_event(vgpu, event);
1067c349dbc7Sjsg 	else
1068c349dbc7Sjsg 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1069c349dbc7Sjsg 
1070c349dbc7Sjsg 	return 0;
1071c349dbc7Sjsg }
1072c349dbc7Sjsg 
trigger_aux_channel_interrupt(struct intel_vgpu * vgpu,unsigned int reg)1073c349dbc7Sjsg static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1074c349dbc7Sjsg 		unsigned int reg)
1075c349dbc7Sjsg {
1076c349dbc7Sjsg 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1077c349dbc7Sjsg 	enum intel_gvt_event_type event;
1078c349dbc7Sjsg 
1079c349dbc7Sjsg 	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1080c349dbc7Sjsg 		event = AUX_CHANNEL_A;
1081c349dbc7Sjsg 	else if (reg == _PCH_DPB_AUX_CH_CTL ||
1082c349dbc7Sjsg 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1083c349dbc7Sjsg 		event = AUX_CHANNEL_B;
1084c349dbc7Sjsg 	else if (reg == _PCH_DPC_AUX_CH_CTL ||
1085c349dbc7Sjsg 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1086c349dbc7Sjsg 		event = AUX_CHANNEL_C;
1087c349dbc7Sjsg 	else if (reg == _PCH_DPD_AUX_CH_CTL ||
1088c349dbc7Sjsg 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1089c349dbc7Sjsg 		event = AUX_CHANNEL_D;
1090c349dbc7Sjsg 	else {
1091c349dbc7Sjsg 		drm_WARN_ON(&dev_priv->drm, true);
1092c349dbc7Sjsg 		return -EINVAL;
1093c349dbc7Sjsg 	}
1094c349dbc7Sjsg 
1095c349dbc7Sjsg 	intel_vgpu_trigger_virtual_event(vgpu, event);
1096c349dbc7Sjsg 	return 0;
1097c349dbc7Sjsg }
1098c349dbc7Sjsg 
dp_aux_ch_ctl_trans_done(struct intel_vgpu * vgpu,u32 value,unsigned int reg,int len,bool data_valid)1099c349dbc7Sjsg static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1100c349dbc7Sjsg 		unsigned int reg, int len, bool data_valid)
1101c349dbc7Sjsg {
1102c349dbc7Sjsg 	/* mark transaction done */
1103c349dbc7Sjsg 	value |= DP_AUX_CH_CTL_DONE;
1104c349dbc7Sjsg 	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1105c349dbc7Sjsg 	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1106c349dbc7Sjsg 
1107c349dbc7Sjsg 	if (data_valid)
1108c349dbc7Sjsg 		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1109c349dbc7Sjsg 	else
1110c349dbc7Sjsg 		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1111c349dbc7Sjsg 
1112c349dbc7Sjsg 	/* message size */
1113c349dbc7Sjsg 	value &= ~(0xf << 20);
1114c349dbc7Sjsg 	value |= (len << 20);
1115c349dbc7Sjsg 	vgpu_vreg(vgpu, reg) = value;
1116c349dbc7Sjsg 
1117c349dbc7Sjsg 	if (value & DP_AUX_CH_CTL_INTERRUPT)
1118c349dbc7Sjsg 		return trigger_aux_channel_interrupt(vgpu, reg);
1119c349dbc7Sjsg 	return 0;
1120c349dbc7Sjsg }
1121c349dbc7Sjsg 
dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data * dpcd,u8 t)1122c349dbc7Sjsg static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1123c349dbc7Sjsg 		u8 t)
1124c349dbc7Sjsg {
1125c349dbc7Sjsg 	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
1126c349dbc7Sjsg 		/* training pattern 1 for CR */
1127c349dbc7Sjsg 		/* set LANE0_CR_DONE, LANE1_CR_DONE */
1128c349dbc7Sjsg 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
1129c349dbc7Sjsg 		/* set LANE2_CR_DONE, LANE3_CR_DONE */
1130c349dbc7Sjsg 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
1131c349dbc7Sjsg 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1132c349dbc7Sjsg 			DPCD_TRAINING_PATTERN_2) {
1133c349dbc7Sjsg 		/* training pattern 2 for EQ */
1134c349dbc7Sjsg 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
1135c349dbc7Sjsg 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
1136c349dbc7Sjsg 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
1137c349dbc7Sjsg 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
1138c349dbc7Sjsg 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
1139c349dbc7Sjsg 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
1140c349dbc7Sjsg 		/* set INTERLANE_ALIGN_DONE */
1141c349dbc7Sjsg 		dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
1142c349dbc7Sjsg 			DPCD_INTERLANE_ALIGN_DONE;
1143c349dbc7Sjsg 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1144c349dbc7Sjsg 			DPCD_LINK_TRAINING_DISABLED) {
1145c349dbc7Sjsg 		/* finish link training */
1146c349dbc7Sjsg 		/* set sink status as synchronized */
1147c349dbc7Sjsg 		dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
1148c349dbc7Sjsg 	}
1149c349dbc7Sjsg }
1150c349dbc7Sjsg 
1151c349dbc7Sjsg #define _REG_HSW_DP_AUX_CH_CTL(dp) \
1152c349dbc7Sjsg 	((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
1153c349dbc7Sjsg 
1154c349dbc7Sjsg #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
1155c349dbc7Sjsg 
1156c349dbc7Sjsg #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1157c349dbc7Sjsg 
1158c349dbc7Sjsg #define dpy_is_valid_port(port)	\
1159c349dbc7Sjsg 		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1160c349dbc7Sjsg 
dp_aux_ch_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1161c349dbc7Sjsg static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1162c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1163c349dbc7Sjsg {
1164c349dbc7Sjsg 	struct intel_vgpu_display *display = &vgpu->display;
1165c349dbc7Sjsg 	int msg, addr, ctrl, op, len;
1166c349dbc7Sjsg 	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1167c349dbc7Sjsg 	struct intel_vgpu_dpcd_data *dpcd = NULL;
1168c349dbc7Sjsg 	struct intel_vgpu_port *port = NULL;
1169c349dbc7Sjsg 	u32 data;
1170c349dbc7Sjsg 
1171c349dbc7Sjsg 	if (!dpy_is_valid_port(port_index)) {
1172c349dbc7Sjsg 		gvt_vgpu_err("Unsupported DP port access!\n");
1173c349dbc7Sjsg 		return 0;
1174c349dbc7Sjsg 	}
1175c349dbc7Sjsg 
1176c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1177c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
1178c349dbc7Sjsg 
11795ca02815Sjsg 	if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1180c349dbc7Sjsg 		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
1181c349dbc7Sjsg 		/* SKL DPB/C/D aux ctl register changed */
1182c349dbc7Sjsg 		return 0;
1183c349dbc7Sjsg 	} else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1184c349dbc7Sjsg 		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
1185c349dbc7Sjsg 		/* write to the data registers */
1186c349dbc7Sjsg 		return 0;
1187c349dbc7Sjsg 	}
1188c349dbc7Sjsg 
1189c349dbc7Sjsg 	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1190c349dbc7Sjsg 		/* just want to clear the sticky bits */
1191c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) = 0;
1192c349dbc7Sjsg 		return 0;
1193c349dbc7Sjsg 	}
1194c349dbc7Sjsg 
1195c349dbc7Sjsg 	port = &display->ports[port_index];
1196c349dbc7Sjsg 	dpcd = port->dpcd;
1197c349dbc7Sjsg 
1198c349dbc7Sjsg 	/* read out message from DATA1 register */
1199c349dbc7Sjsg 	msg = vgpu_vreg(vgpu, offset + 4);
1200c349dbc7Sjsg 	addr = (msg >> 8) & 0xffff;
1201c349dbc7Sjsg 	ctrl = (msg >> 24) & 0xff;
1202c349dbc7Sjsg 	len = msg & 0xff;
1203c349dbc7Sjsg 	op = ctrl >> 4;
1204c349dbc7Sjsg 
1205c349dbc7Sjsg 	if (op == GVT_AUX_NATIVE_WRITE) {
1206c349dbc7Sjsg 		int t;
1207c349dbc7Sjsg 		u8 buf[16];
1208c349dbc7Sjsg 
1209c349dbc7Sjsg 		if ((addr + len + 1) >= DPCD_SIZE) {
1210c349dbc7Sjsg 			/*
1211c349dbc7Sjsg 			 * Write request exceeds what we supported,
1212c349dbc7Sjsg 			 * DCPD spec: When a Source Device is writing a DPCD
1213c349dbc7Sjsg 			 * address not supported by the Sink Device, the Sink
1214c349dbc7Sjsg 			 * Device shall reply with AUX NACK and “M” equal to
1215c349dbc7Sjsg 			 * zero.
1216c349dbc7Sjsg 			 */
1217c349dbc7Sjsg 
1218c349dbc7Sjsg 			/* NAK the write */
1219c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1220c349dbc7Sjsg 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1221c349dbc7Sjsg 			return 0;
1222c349dbc7Sjsg 		}
1223c349dbc7Sjsg 
1224c349dbc7Sjsg 		/*
1225c349dbc7Sjsg 		 * Write request format: Headr (command + address + size) occupies
1226c349dbc7Sjsg 		 * 4 bytes, followed by (len + 1) bytes of data. See details at
1227c349dbc7Sjsg 		 * intel_dp_aux_transfer().
1228c349dbc7Sjsg 		 */
1229c349dbc7Sjsg 		if ((len + 1 + 4) > AUX_BURST_SIZE) {
1230c349dbc7Sjsg 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1231c349dbc7Sjsg 			return -EINVAL;
1232c349dbc7Sjsg 		}
1233c349dbc7Sjsg 
1234c349dbc7Sjsg 		/* unpack data from vreg to buf */
1235c349dbc7Sjsg 		for (t = 0; t < 4; t++) {
1236c349dbc7Sjsg 			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1237c349dbc7Sjsg 
1238c349dbc7Sjsg 			buf[t * 4] = (r >> 24) & 0xff;
1239c349dbc7Sjsg 			buf[t * 4 + 1] = (r >> 16) & 0xff;
1240c349dbc7Sjsg 			buf[t * 4 + 2] = (r >> 8) & 0xff;
1241c349dbc7Sjsg 			buf[t * 4 + 3] = r & 0xff;
1242c349dbc7Sjsg 		}
1243c349dbc7Sjsg 
1244c349dbc7Sjsg 		/* write to virtual DPCD */
1245c349dbc7Sjsg 		if (dpcd && dpcd->data_valid) {
1246c349dbc7Sjsg 			for (t = 0; t <= len; t++) {
1247c349dbc7Sjsg 				int p = addr + t;
1248c349dbc7Sjsg 
1249c349dbc7Sjsg 				dpcd->data[p] = buf[t];
1250c349dbc7Sjsg 				/* check for link training */
1251c349dbc7Sjsg 				if (p == DPCD_TRAINING_PATTERN_SET)
1252c349dbc7Sjsg 					dp_aux_ch_ctl_link_training(dpcd,
1253c349dbc7Sjsg 							buf[t]);
1254c349dbc7Sjsg 			}
1255c349dbc7Sjsg 		}
1256c349dbc7Sjsg 
1257c349dbc7Sjsg 		/* ACK the write */
1258c349dbc7Sjsg 		vgpu_vreg(vgpu, offset + 4) = 0;
1259c349dbc7Sjsg 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1260c349dbc7Sjsg 				dpcd && dpcd->data_valid);
1261c349dbc7Sjsg 		return 0;
1262c349dbc7Sjsg 	}
1263c349dbc7Sjsg 
1264c349dbc7Sjsg 	if (op == GVT_AUX_NATIVE_READ) {
1265c349dbc7Sjsg 		int idx, i, ret = 0;
1266c349dbc7Sjsg 
1267c349dbc7Sjsg 		if ((addr + len + 1) >= DPCD_SIZE) {
1268c349dbc7Sjsg 			/*
1269c349dbc7Sjsg 			 * read request exceeds what we supported
1270c349dbc7Sjsg 			 * DPCD spec: A Sink Device receiving a Native AUX CH
1271c349dbc7Sjsg 			 * read request for an unsupported DPCD address must
1272c349dbc7Sjsg 			 * reply with an AUX ACK and read data set equal to
1273c349dbc7Sjsg 			 * zero instead of replying with AUX NACK.
1274c349dbc7Sjsg 			 */
1275c349dbc7Sjsg 
1276c349dbc7Sjsg 			/* ACK the READ*/
1277c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 4) = 0;
1278c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 8) = 0;
1279c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 12) = 0;
1280c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 16) = 0;
1281c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 20) = 0;
1282c349dbc7Sjsg 
1283c349dbc7Sjsg 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1284c349dbc7Sjsg 					true);
1285c349dbc7Sjsg 			return 0;
1286c349dbc7Sjsg 		}
1287c349dbc7Sjsg 
1288c349dbc7Sjsg 		for (idx = 1; idx <= 5; idx++) {
1289c349dbc7Sjsg 			/* clear the data registers */
1290c349dbc7Sjsg 			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1291c349dbc7Sjsg 		}
1292c349dbc7Sjsg 
1293c349dbc7Sjsg 		/*
1294c349dbc7Sjsg 		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
1295c349dbc7Sjsg 		 */
1296c349dbc7Sjsg 		if ((len + 2) > AUX_BURST_SIZE) {
1297c349dbc7Sjsg 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1298c349dbc7Sjsg 			return -EINVAL;
1299c349dbc7Sjsg 		}
1300c349dbc7Sjsg 
1301c349dbc7Sjsg 		/* read from virtual DPCD to vreg */
1302c349dbc7Sjsg 		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
1303c349dbc7Sjsg 		if (dpcd && dpcd->data_valid) {
1304c349dbc7Sjsg 			for (i = 1; i <= (len + 1); i++) {
1305c349dbc7Sjsg 				int t;
1306c349dbc7Sjsg 
1307c349dbc7Sjsg 				t = dpcd->data[addr + i - 1];
1308c349dbc7Sjsg 				t <<= (24 - 8 * (i % 4));
1309c349dbc7Sjsg 				ret |= t;
1310c349dbc7Sjsg 
1311c349dbc7Sjsg 				if ((i % 4 == 3) || (i == (len + 1))) {
1312c349dbc7Sjsg 					vgpu_vreg(vgpu, offset +
1313c349dbc7Sjsg 							(i / 4 + 1) * 4) = ret;
1314c349dbc7Sjsg 					ret = 0;
1315c349dbc7Sjsg 				}
1316c349dbc7Sjsg 			}
1317c349dbc7Sjsg 		}
1318c349dbc7Sjsg 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1319c349dbc7Sjsg 				dpcd && dpcd->data_valid);
1320c349dbc7Sjsg 		return 0;
1321c349dbc7Sjsg 	}
1322c349dbc7Sjsg 
1323c349dbc7Sjsg 	/* i2c transaction starts */
1324c349dbc7Sjsg 	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1325c349dbc7Sjsg 
1326c349dbc7Sjsg 	if (data & DP_AUX_CH_CTL_INTERRUPT)
1327c349dbc7Sjsg 		trigger_aux_channel_interrupt(vgpu, offset);
1328c349dbc7Sjsg 	return 0;
1329c349dbc7Sjsg }
1330c349dbc7Sjsg 
mbctl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1331c349dbc7Sjsg static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1332c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1333c349dbc7Sjsg {
1334c349dbc7Sjsg 	*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1335c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1336c349dbc7Sjsg 	return 0;
1337c349dbc7Sjsg }
1338c349dbc7Sjsg 
vga_control_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1339c349dbc7Sjsg static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1340c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1341c349dbc7Sjsg {
1342c349dbc7Sjsg 	bool vga_disable;
1343c349dbc7Sjsg 
1344c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1345c349dbc7Sjsg 	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1346c349dbc7Sjsg 
1347c349dbc7Sjsg 	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1348c349dbc7Sjsg 			vga_disable ? "Disable" : "Enable");
1349c349dbc7Sjsg 	return 0;
1350c349dbc7Sjsg }
1351c349dbc7Sjsg 
read_virtual_sbi_register(struct intel_vgpu * vgpu,unsigned int sbi_offset)1352c349dbc7Sjsg static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1353c349dbc7Sjsg 		unsigned int sbi_offset)
1354c349dbc7Sjsg {
1355c349dbc7Sjsg 	struct intel_vgpu_display *display = &vgpu->display;
1356c349dbc7Sjsg 	int num = display->sbi.number;
1357c349dbc7Sjsg 	int i;
1358c349dbc7Sjsg 
1359c349dbc7Sjsg 	for (i = 0; i < num; ++i)
1360c349dbc7Sjsg 		if (display->sbi.registers[i].offset == sbi_offset)
1361c349dbc7Sjsg 			break;
1362c349dbc7Sjsg 
1363c349dbc7Sjsg 	if (i == num)
1364c349dbc7Sjsg 		return 0;
1365c349dbc7Sjsg 
1366c349dbc7Sjsg 	return display->sbi.registers[i].value;
1367c349dbc7Sjsg }
1368c349dbc7Sjsg 
write_virtual_sbi_register(struct intel_vgpu * vgpu,unsigned int offset,u32 value)1369c349dbc7Sjsg static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1370c349dbc7Sjsg 		unsigned int offset, u32 value)
1371c349dbc7Sjsg {
1372c349dbc7Sjsg 	struct intel_vgpu_display *display = &vgpu->display;
1373c349dbc7Sjsg 	int num = display->sbi.number;
1374c349dbc7Sjsg 	int i;
1375c349dbc7Sjsg 
1376c349dbc7Sjsg 	for (i = 0; i < num; ++i) {
1377c349dbc7Sjsg 		if (display->sbi.registers[i].offset == offset)
1378c349dbc7Sjsg 			break;
1379c349dbc7Sjsg 	}
1380c349dbc7Sjsg 
1381c349dbc7Sjsg 	if (i == num) {
1382c349dbc7Sjsg 		if (num == SBI_REG_MAX) {
1383c349dbc7Sjsg 			gvt_vgpu_err("SBI caching meets maximum limits\n");
1384c349dbc7Sjsg 			return;
1385c349dbc7Sjsg 		}
1386c349dbc7Sjsg 		display->sbi.number++;
1387c349dbc7Sjsg 	}
1388c349dbc7Sjsg 
1389c349dbc7Sjsg 	display->sbi.registers[i].offset = offset;
1390c349dbc7Sjsg 	display->sbi.registers[i].value = value;
1391c349dbc7Sjsg }
1392c349dbc7Sjsg 
sbi_data_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1393c349dbc7Sjsg static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1394c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1395c349dbc7Sjsg {
1396c349dbc7Sjsg 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1397c349dbc7Sjsg 				SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
1398c349dbc7Sjsg 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1399c349dbc7Sjsg 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1400c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
1401c349dbc7Sjsg 				sbi_offset);
1402c349dbc7Sjsg 	}
1403c349dbc7Sjsg 	read_vreg(vgpu, offset, p_data, bytes);
1404c349dbc7Sjsg 	return 0;
1405c349dbc7Sjsg }
1406c349dbc7Sjsg 
sbi_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1407c349dbc7Sjsg static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1408c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1409c349dbc7Sjsg {
1410c349dbc7Sjsg 	u32 data;
1411c349dbc7Sjsg 
1412c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1413c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
1414c349dbc7Sjsg 
1415c349dbc7Sjsg 	data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
1416c349dbc7Sjsg 	data |= SBI_READY;
1417c349dbc7Sjsg 
1418c349dbc7Sjsg 	data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
1419c349dbc7Sjsg 	data |= SBI_RESPONSE_SUCCESS;
1420c349dbc7Sjsg 
1421c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = data;
1422c349dbc7Sjsg 
1423c349dbc7Sjsg 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1424c349dbc7Sjsg 				SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
1425c349dbc7Sjsg 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1426c349dbc7Sjsg 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1427c349dbc7Sjsg 
1428c349dbc7Sjsg 		write_virtual_sbi_register(vgpu, sbi_offset,
1429c349dbc7Sjsg 					   vgpu_vreg_t(vgpu, SBI_DATA));
1430c349dbc7Sjsg 	}
1431c349dbc7Sjsg 	return 0;
1432c349dbc7Sjsg }
1433c349dbc7Sjsg 
1434c349dbc7Sjsg #define _vgtif_reg(x) \
1435c349dbc7Sjsg 	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1436c349dbc7Sjsg 
pvinfo_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1437c349dbc7Sjsg static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1438c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1439c349dbc7Sjsg {
1440c349dbc7Sjsg 	bool invalid_read = false;
1441c349dbc7Sjsg 
1442c349dbc7Sjsg 	read_vreg(vgpu, offset, p_data, bytes);
1443c349dbc7Sjsg 
1444c349dbc7Sjsg 	switch (offset) {
1445c349dbc7Sjsg 	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1446c349dbc7Sjsg 		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1447c349dbc7Sjsg 			invalid_read = true;
1448c349dbc7Sjsg 		break;
1449c349dbc7Sjsg 	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1450c349dbc7Sjsg 		_vgtif_reg(avail_rs.fence_num):
1451c349dbc7Sjsg 		if (offset + bytes >
1452c349dbc7Sjsg 			_vgtif_reg(avail_rs.fence_num) + 4)
1453c349dbc7Sjsg 			invalid_read = true;
1454c349dbc7Sjsg 		break;
1455c349dbc7Sjsg 	case 0x78010:	/* vgt_caps */
1456c349dbc7Sjsg 	case 0x7881c:
1457c349dbc7Sjsg 		break;
1458c349dbc7Sjsg 	default:
1459c349dbc7Sjsg 		invalid_read = true;
1460c349dbc7Sjsg 		break;
1461c349dbc7Sjsg 	}
1462c349dbc7Sjsg 	if (invalid_read)
1463c349dbc7Sjsg 		gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1464c349dbc7Sjsg 				offset, bytes, *(u32 *)p_data);
1465c349dbc7Sjsg 	vgpu->pv_notified = true;
1466c349dbc7Sjsg 	return 0;
1467c349dbc7Sjsg }
1468c349dbc7Sjsg 
handle_g2v_notification(struct intel_vgpu * vgpu,int notification)1469c349dbc7Sjsg static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1470c349dbc7Sjsg {
1471c349dbc7Sjsg 	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1472c349dbc7Sjsg 	struct intel_vgpu_mm *mm;
1473c349dbc7Sjsg 	u64 *pdps;
1474c349dbc7Sjsg 
1475c349dbc7Sjsg 	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1476c349dbc7Sjsg 
1477c349dbc7Sjsg 	switch (notification) {
1478c349dbc7Sjsg 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1479c349dbc7Sjsg 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1480ad8b1aafSjsg 		fallthrough;
1481c349dbc7Sjsg 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1482c349dbc7Sjsg 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1483c349dbc7Sjsg 		return PTR_ERR_OR_ZERO(mm);
1484c349dbc7Sjsg 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1485c349dbc7Sjsg 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1486c349dbc7Sjsg 		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1487c349dbc7Sjsg 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1488c349dbc7Sjsg 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1489c349dbc7Sjsg 	case 1:	/* Remove this in guest driver. */
1490c349dbc7Sjsg 		break;
1491c349dbc7Sjsg 	default:
1492c349dbc7Sjsg 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
1493c349dbc7Sjsg 	}
1494c349dbc7Sjsg 	return 0;
1495c349dbc7Sjsg }
1496c349dbc7Sjsg 
send_display_ready_uevent(struct intel_vgpu * vgpu,int ready)1497c349dbc7Sjsg static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1498c349dbc7Sjsg {
1499c349dbc7Sjsg 	struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1500c349dbc7Sjsg 	char *env[3] = {NULL, NULL, NULL};
1501c349dbc7Sjsg 	char vmid_str[20];
1502c349dbc7Sjsg 	char display_ready_str[20];
1503c349dbc7Sjsg 
1504c349dbc7Sjsg 	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1505c349dbc7Sjsg 	env[0] = display_ready_str;
1506c349dbc7Sjsg 
1507c349dbc7Sjsg 	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1508c349dbc7Sjsg 	env[1] = vmid_str;
1509c349dbc7Sjsg 
1510c349dbc7Sjsg 	return kobject_uevent_env(kobj, KOBJ_ADD, env);
1511c349dbc7Sjsg }
1512c349dbc7Sjsg 
pvinfo_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1513c349dbc7Sjsg static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1514c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1515c349dbc7Sjsg {
1516c349dbc7Sjsg 	u32 data = *(u32 *)p_data;
1517c349dbc7Sjsg 	bool invalid_write = false;
1518c349dbc7Sjsg 
1519c349dbc7Sjsg 	switch (offset) {
1520c349dbc7Sjsg 	case _vgtif_reg(display_ready):
1521c349dbc7Sjsg 		send_display_ready_uevent(vgpu, data ? 1 : 0);
1522c349dbc7Sjsg 		break;
1523c349dbc7Sjsg 	case _vgtif_reg(g2v_notify):
1524c349dbc7Sjsg 		handle_g2v_notification(vgpu, data);
1525c349dbc7Sjsg 		break;
1526c349dbc7Sjsg 	/* add xhot and yhot to handled list to avoid error log */
1527c349dbc7Sjsg 	case _vgtif_reg(cursor_x_hot):
1528c349dbc7Sjsg 	case _vgtif_reg(cursor_y_hot):
1529c349dbc7Sjsg 	case _vgtif_reg(pdp[0].lo):
1530c349dbc7Sjsg 	case _vgtif_reg(pdp[0].hi):
1531c349dbc7Sjsg 	case _vgtif_reg(pdp[1].lo):
1532c349dbc7Sjsg 	case _vgtif_reg(pdp[1].hi):
1533c349dbc7Sjsg 	case _vgtif_reg(pdp[2].lo):
1534c349dbc7Sjsg 	case _vgtif_reg(pdp[2].hi):
1535c349dbc7Sjsg 	case _vgtif_reg(pdp[3].lo):
1536c349dbc7Sjsg 	case _vgtif_reg(pdp[3].hi):
1537c349dbc7Sjsg 	case _vgtif_reg(execlist_context_descriptor_lo):
1538c349dbc7Sjsg 	case _vgtif_reg(execlist_context_descriptor_hi):
1539c349dbc7Sjsg 		break;
1540c349dbc7Sjsg 	case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1541c349dbc7Sjsg 		invalid_write = true;
1542c349dbc7Sjsg 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1543c349dbc7Sjsg 		break;
1544c349dbc7Sjsg 	default:
1545c349dbc7Sjsg 		invalid_write = true;
1546c349dbc7Sjsg 		gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1547c349dbc7Sjsg 				offset, bytes, data);
1548c349dbc7Sjsg 		break;
1549c349dbc7Sjsg 	}
1550c349dbc7Sjsg 
1551c349dbc7Sjsg 	if (!invalid_write)
1552c349dbc7Sjsg 		write_vreg(vgpu, offset, p_data, bytes);
1553c349dbc7Sjsg 
1554c349dbc7Sjsg 	return 0;
1555c349dbc7Sjsg }
1556c349dbc7Sjsg 
pf_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1557c349dbc7Sjsg static int pf_write(struct intel_vgpu *vgpu,
1558c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1559c349dbc7Sjsg {
1560c349dbc7Sjsg 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1561c349dbc7Sjsg 	u32 val = *(u32 *)p_data;
1562c349dbc7Sjsg 
1563c349dbc7Sjsg 	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1564c349dbc7Sjsg 	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1565f005ef32Sjsg 	   offset == _PS_1C_CTRL) && (val & PS_BINDING_MASK) != PS_BINDING_PIPE) {
1566c349dbc7Sjsg 		drm_WARN_ONCE(&i915->drm, true,
1567c349dbc7Sjsg 			      "VM(%d): guest is trying to scaling a plane\n",
1568c349dbc7Sjsg 			      vgpu->id);
1569c349dbc7Sjsg 		return 0;
1570c349dbc7Sjsg 	}
1571c349dbc7Sjsg 
1572c349dbc7Sjsg 	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1573c349dbc7Sjsg }
1574c349dbc7Sjsg 
power_well_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1575c349dbc7Sjsg static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1576c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1577c349dbc7Sjsg {
1578c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1579c349dbc7Sjsg 
1580c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) &
1581c349dbc7Sjsg 	    HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1582c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |=
1583c349dbc7Sjsg 			HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1584c349dbc7Sjsg 	else
1585c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &=
1586c349dbc7Sjsg 			~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1587c349dbc7Sjsg 	return 0;
1588c349dbc7Sjsg }
1589c349dbc7Sjsg 
gen9_dbuf_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1590c349dbc7Sjsg static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1591c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1592c349dbc7Sjsg {
1593c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1594c349dbc7Sjsg 
1595c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1596c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1597c349dbc7Sjsg 	else
1598c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1599c349dbc7Sjsg 
1600c349dbc7Sjsg 	return 0;
1601c349dbc7Sjsg }
1602c349dbc7Sjsg 
fpga_dbg_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1603c349dbc7Sjsg static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1604c349dbc7Sjsg 	unsigned int offset, void *p_data, unsigned int bytes)
1605c349dbc7Sjsg {
1606c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1607c349dbc7Sjsg 
1608c349dbc7Sjsg 	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1609c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1610c349dbc7Sjsg 	return 0;
1611c349dbc7Sjsg }
1612c349dbc7Sjsg 
dma_ctrl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1613c349dbc7Sjsg static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1614c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1615c349dbc7Sjsg {
1616c349dbc7Sjsg 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1617c349dbc7Sjsg 	u32 mode;
1618c349dbc7Sjsg 
1619c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1620c349dbc7Sjsg 	mode = vgpu_vreg(vgpu, offset);
1621c349dbc7Sjsg 
1622c349dbc7Sjsg 	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1623c349dbc7Sjsg 		drm_WARN_ONCE(&i915->drm, 1,
1624c349dbc7Sjsg 				"VM(%d): iGVT-g doesn't support GuC\n",
1625c349dbc7Sjsg 				vgpu->id);
1626c349dbc7Sjsg 		return 0;
1627c349dbc7Sjsg 	}
1628c349dbc7Sjsg 
1629c349dbc7Sjsg 	return 0;
1630c349dbc7Sjsg }
1631c349dbc7Sjsg 
gen9_trtte_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1632c349dbc7Sjsg static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1633c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1634c349dbc7Sjsg {
1635c349dbc7Sjsg 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1636c349dbc7Sjsg 	u32 trtte = *(u32 *)p_data;
1637c349dbc7Sjsg 
1638c349dbc7Sjsg 	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1639c349dbc7Sjsg 		drm_WARN(&i915->drm, 1,
1640c349dbc7Sjsg 				"VM(%d): Use physical address for TRTT!\n",
1641c349dbc7Sjsg 				vgpu->id);
1642c349dbc7Sjsg 		return -EINVAL;
1643c349dbc7Sjsg 	}
1644c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1645c349dbc7Sjsg 
1646c349dbc7Sjsg 	return 0;
1647c349dbc7Sjsg }
1648c349dbc7Sjsg 
gen9_trtt_chicken_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1649c349dbc7Sjsg static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1650c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1651c349dbc7Sjsg {
1652c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
1653c349dbc7Sjsg 	return 0;
1654c349dbc7Sjsg }
1655c349dbc7Sjsg 
dpll_status_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1656c349dbc7Sjsg static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1657c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1658c349dbc7Sjsg {
1659c349dbc7Sjsg 	u32 v = 0;
1660c349dbc7Sjsg 
1661c349dbc7Sjsg 	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1662c349dbc7Sjsg 		v |= (1 << 0);
1663c349dbc7Sjsg 
1664c349dbc7Sjsg 	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1665c349dbc7Sjsg 		v |= (1 << 8);
1666c349dbc7Sjsg 
1667c349dbc7Sjsg 	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1668c349dbc7Sjsg 		v |= (1 << 16);
1669c349dbc7Sjsg 
1670c349dbc7Sjsg 	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1671c349dbc7Sjsg 		v |= (1 << 24);
1672c349dbc7Sjsg 
1673c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1674c349dbc7Sjsg 
1675c349dbc7Sjsg 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1676c349dbc7Sjsg }
1677c349dbc7Sjsg 
mailbox_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1678c349dbc7Sjsg static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1679c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1680c349dbc7Sjsg {
1681c349dbc7Sjsg 	u32 value = *(u32 *)p_data;
1682c349dbc7Sjsg 	u32 cmd = value & 0xff;
1683c349dbc7Sjsg 	u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1684c349dbc7Sjsg 
1685c349dbc7Sjsg 	switch (cmd) {
1686c349dbc7Sjsg 	case GEN9_PCODE_READ_MEM_LATENCY:
1687c349dbc7Sjsg 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1688c349dbc7Sjsg 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1689ad8b1aafSjsg 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1690ad8b1aafSjsg 		    IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1691c349dbc7Sjsg 			/**
1692c349dbc7Sjsg 			 * "Read memory latency" command on gen9.
1693c349dbc7Sjsg 			 * Below memory latency values are read
1694c349dbc7Sjsg 			 * from skylake platform.
1695c349dbc7Sjsg 			 */
1696c349dbc7Sjsg 			if (!*data0)
1697c349dbc7Sjsg 				*data0 = 0x1e1a1100;
1698c349dbc7Sjsg 			else
1699c349dbc7Sjsg 				*data0 = 0x61514b3d;
1700c349dbc7Sjsg 		} else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1701c349dbc7Sjsg 			/**
1702c349dbc7Sjsg 			 * "Read memory latency" command on gen9.
1703c349dbc7Sjsg 			 * Below memory latency values are read
1704c349dbc7Sjsg 			 * from Broxton MRB.
1705c349dbc7Sjsg 			 */
1706c349dbc7Sjsg 			if (!*data0)
1707c349dbc7Sjsg 				*data0 = 0x16080707;
1708c349dbc7Sjsg 			else
1709c349dbc7Sjsg 				*data0 = 0x16161616;
1710c349dbc7Sjsg 		}
1711c349dbc7Sjsg 		break;
1712c349dbc7Sjsg 	case SKL_PCODE_CDCLK_CONTROL:
1713c349dbc7Sjsg 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1714c349dbc7Sjsg 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1715ad8b1aafSjsg 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1716ad8b1aafSjsg 		    IS_COMETLAKE(vgpu->gvt->gt->i915))
1717c349dbc7Sjsg 			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
1718c349dbc7Sjsg 		break;
1719c349dbc7Sjsg 	case GEN6_PCODE_READ_RC6VIDS:
1720c349dbc7Sjsg 		*data0 |= 0x1;
1721c349dbc7Sjsg 		break;
1722c349dbc7Sjsg 	}
1723c349dbc7Sjsg 
1724c349dbc7Sjsg 	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1725c349dbc7Sjsg 		     vgpu->id, value, *data0);
1726c349dbc7Sjsg 	/**
1727c349dbc7Sjsg 	 * PCODE_READY clear means ready for pcode read/write,
1728c349dbc7Sjsg 	 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1729c349dbc7Sjsg 	 * always emulate as pcode read/write success and ready for access
1730c349dbc7Sjsg 	 * anytime, since we don't touch real physical registers here.
1731c349dbc7Sjsg 	 */
1732c349dbc7Sjsg 	value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1733c349dbc7Sjsg 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1734c349dbc7Sjsg }
1735c349dbc7Sjsg 
hws_pga_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1736c349dbc7Sjsg static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1737c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1738c349dbc7Sjsg {
1739c349dbc7Sjsg 	u32 value = *(u32 *)p_data;
1740c349dbc7Sjsg 	const struct intel_engine_cs *engine =
1741c349dbc7Sjsg 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1742c349dbc7Sjsg 
1743ad8b1aafSjsg 	if (value != 0 &&
1744ad8b1aafSjsg 	    !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1745c349dbc7Sjsg 		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1746c349dbc7Sjsg 			      offset, value);
1747c349dbc7Sjsg 		return -EINVAL;
1748c349dbc7Sjsg 	}
1749c349dbc7Sjsg 
1750c349dbc7Sjsg 	/*
1751c349dbc7Sjsg 	 * Need to emulate all the HWSP register write to ensure host can
1752c349dbc7Sjsg 	 * update the VM CSB status correctly. Here listed registers can
1753c349dbc7Sjsg 	 * support BDW, SKL or other platforms with same HWSP registers.
1754c349dbc7Sjsg 	 */
1755c349dbc7Sjsg 	if (unlikely(!engine)) {
1756c349dbc7Sjsg 		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1757c349dbc7Sjsg 			     offset);
1758c349dbc7Sjsg 		return -EINVAL;
1759c349dbc7Sjsg 	}
1760c349dbc7Sjsg 	vgpu->hws_pga[engine->id] = value;
1761c349dbc7Sjsg 	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1762c349dbc7Sjsg 		     vgpu->id, value, offset);
1763c349dbc7Sjsg 
1764c349dbc7Sjsg 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1765c349dbc7Sjsg }
1766c349dbc7Sjsg 
skl_power_well_ctl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1767c349dbc7Sjsg static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1768c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1769c349dbc7Sjsg {
1770c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1771c349dbc7Sjsg 
1772c349dbc7Sjsg 	if (IS_BROXTON(vgpu->gvt->gt->i915))
1773c349dbc7Sjsg 		v &= (1 << 31) | (1 << 29);
1774c349dbc7Sjsg 	else
1775c349dbc7Sjsg 		v &= (1 << 31) | (1 << 29) | (1 << 9) |
1776c349dbc7Sjsg 			(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1777c349dbc7Sjsg 	v |= (v >> 1);
1778c349dbc7Sjsg 
1779c349dbc7Sjsg 	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1780c349dbc7Sjsg }
1781c349dbc7Sjsg 
skl_lcpll_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1782c349dbc7Sjsg static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1783c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1784c349dbc7Sjsg {
1785c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1786c349dbc7Sjsg 
1787c349dbc7Sjsg 	/* other bits are MBZ. */
1788c349dbc7Sjsg 	v &= (1 << 31) | (1 << 30);
1789c349dbc7Sjsg 	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1790c349dbc7Sjsg 
1791c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1792c349dbc7Sjsg 
1793c349dbc7Sjsg 	return 0;
1794c349dbc7Sjsg }
1795c349dbc7Sjsg 
bxt_de_pll_enable_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1796c349dbc7Sjsg static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1797c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1798c349dbc7Sjsg {
1799c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1800c349dbc7Sjsg 
1801c349dbc7Sjsg 	if (v & BXT_DE_PLL_PLL_ENABLE)
1802c349dbc7Sjsg 		v |= BXT_DE_PLL_LOCK;
1803c349dbc7Sjsg 
1804c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1805c349dbc7Sjsg 
1806c349dbc7Sjsg 	return 0;
1807c349dbc7Sjsg }
1808c349dbc7Sjsg 
bxt_port_pll_enable_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1809c349dbc7Sjsg static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1810c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1811c349dbc7Sjsg {
1812c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1813c349dbc7Sjsg 
1814c349dbc7Sjsg 	if (v & PORT_PLL_ENABLE)
1815c349dbc7Sjsg 		v |= PORT_PLL_LOCK;
1816c349dbc7Sjsg 
1817c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1818c349dbc7Sjsg 
1819c349dbc7Sjsg 	return 0;
1820c349dbc7Sjsg }
1821c349dbc7Sjsg 
bxt_phy_ctl_family_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1822c349dbc7Sjsg static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1823c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1824c349dbc7Sjsg {
1825c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1826c349dbc7Sjsg 	u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1827c349dbc7Sjsg 
1828c349dbc7Sjsg 	switch (offset) {
1829c349dbc7Sjsg 	case _PHY_CTL_FAMILY_EDP:
1830c349dbc7Sjsg 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1831c349dbc7Sjsg 		break;
1832c349dbc7Sjsg 	case _PHY_CTL_FAMILY_DDI:
1833c349dbc7Sjsg 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1834c349dbc7Sjsg 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1835c349dbc7Sjsg 		break;
1836c349dbc7Sjsg 	}
1837c349dbc7Sjsg 
1838c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1839c349dbc7Sjsg 
1840c349dbc7Sjsg 	return 0;
1841c349dbc7Sjsg }
1842c349dbc7Sjsg 
bxt_port_tx_dw3_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1843c349dbc7Sjsg static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1844c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1845c349dbc7Sjsg {
1846c349dbc7Sjsg 	u32 v = vgpu_vreg(vgpu, offset);
1847c349dbc7Sjsg 
1848c349dbc7Sjsg 	v &= ~UNIQUE_TRANGE_EN_METHOD;
1849c349dbc7Sjsg 
1850c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1851c349dbc7Sjsg 
1852c349dbc7Sjsg 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1853c349dbc7Sjsg }
1854c349dbc7Sjsg 
bxt_pcs_dw12_grp_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1855c349dbc7Sjsg static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1856c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1857c349dbc7Sjsg {
1858c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1859c349dbc7Sjsg 
1860c349dbc7Sjsg 	if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1861c349dbc7Sjsg 		vgpu_vreg(vgpu, offset - 0x600) = v;
1862c349dbc7Sjsg 		vgpu_vreg(vgpu, offset - 0x800) = v;
1863c349dbc7Sjsg 	} else {
1864c349dbc7Sjsg 		vgpu_vreg(vgpu, offset - 0x400) = v;
1865c349dbc7Sjsg 		vgpu_vreg(vgpu, offset - 0x600) = v;
1866c349dbc7Sjsg 	}
1867c349dbc7Sjsg 
1868c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1869c349dbc7Sjsg 
1870c349dbc7Sjsg 	return 0;
1871c349dbc7Sjsg }
1872c349dbc7Sjsg 
bxt_gt_disp_pwron_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1873c349dbc7Sjsg static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1874c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1875c349dbc7Sjsg {
1876c349dbc7Sjsg 	u32 v = *(u32 *)p_data;
1877c349dbc7Sjsg 
1878c349dbc7Sjsg 	if (v & BIT(0)) {
1879c349dbc7Sjsg 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1880c349dbc7Sjsg 			~PHY_RESERVED;
1881c349dbc7Sjsg 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1882c349dbc7Sjsg 			PHY_POWER_GOOD;
1883c349dbc7Sjsg 	}
1884c349dbc7Sjsg 
1885c349dbc7Sjsg 	if (v & BIT(1)) {
1886c349dbc7Sjsg 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1887c349dbc7Sjsg 			~PHY_RESERVED;
1888c349dbc7Sjsg 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1889c349dbc7Sjsg 			PHY_POWER_GOOD;
1890c349dbc7Sjsg 	}
1891c349dbc7Sjsg 
1892c349dbc7Sjsg 
1893c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = v;
1894c349dbc7Sjsg 
1895c349dbc7Sjsg 	return 0;
1896c349dbc7Sjsg }
1897c349dbc7Sjsg 
edp_psr_imr_iir_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1898c349dbc7Sjsg static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1899c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1900c349dbc7Sjsg {
1901c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = 0;
1902c349dbc7Sjsg 	return 0;
1903c349dbc7Sjsg }
1904c349dbc7Sjsg 
19055ca02815Sjsg /*
1906ad8b1aafSjsg  * FixMe:
1907ad8b1aafSjsg  * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1908ad8b1aafSjsg  * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1909ad8b1aafSjsg  * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1910ad8b1aafSjsg  * these MI_BATCH_BUFFER.
1911ad8b1aafSjsg  * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1912ad8b1aafSjsg  * PML4 PTE: PAT(0) PCD(1) PWT(1).
1913ad8b1aafSjsg  * The performance is still expected to be low, will need further improvement.
1914ad8b1aafSjsg  */
bxt_ppat_low_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1915ad8b1aafSjsg static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1916ad8b1aafSjsg 			      void *p_data, unsigned int bytes)
1917ad8b1aafSjsg {
1918ad8b1aafSjsg 	u64 pat =
1919ad8b1aafSjsg 		GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1920ad8b1aafSjsg 		GEN8_PPAT(1, 0) |
1921ad8b1aafSjsg 		GEN8_PPAT(2, 0) |
1922ad8b1aafSjsg 		GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1923ad8b1aafSjsg 		GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1924ad8b1aafSjsg 		GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1925ad8b1aafSjsg 		GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1926ad8b1aafSjsg 		GEN8_PPAT(7, CHV_PPAT_SNOOP);
1927ad8b1aafSjsg 
1928ad8b1aafSjsg 	vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1929ad8b1aafSjsg 
1930ad8b1aafSjsg 	return 0;
1931ad8b1aafSjsg }
1932ad8b1aafSjsg 
guc_status_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1933c349dbc7Sjsg static int guc_status_read(struct intel_vgpu *vgpu,
1934c349dbc7Sjsg 			   unsigned int offset, void *p_data,
1935c349dbc7Sjsg 			   unsigned int bytes)
1936c349dbc7Sjsg {
1937c349dbc7Sjsg 	/* keep MIA_IN_RESET before clearing */
1938c349dbc7Sjsg 	read_vreg(vgpu, offset, p_data, bytes);
1939c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1940c349dbc7Sjsg 	return 0;
1941c349dbc7Sjsg }
1942c349dbc7Sjsg 
mmio_read_from_hw(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1943c349dbc7Sjsg static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1944c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
1945c349dbc7Sjsg {
1946c349dbc7Sjsg 	struct intel_gvt *gvt = vgpu->gvt;
1947c349dbc7Sjsg 	const struct intel_engine_cs *engine =
1948c349dbc7Sjsg 		intel_gvt_render_mmio_to_engine(gvt, offset);
1949c349dbc7Sjsg 
1950c349dbc7Sjsg 	/**
1951c349dbc7Sjsg 	 * Read HW reg in following case
1952c349dbc7Sjsg 	 * a. the offset isn't a ring mmio
1953c349dbc7Sjsg 	 * b. the offset's ring is running on hw.
1954c349dbc7Sjsg 	 * c. the offset is ring time stamp mmio
1955c349dbc7Sjsg 	 */
1956c349dbc7Sjsg 
1957c349dbc7Sjsg 	if (!engine ||
1958c349dbc7Sjsg 	    vgpu == gvt->scheduler.engine_owner[engine->id] ||
1959c349dbc7Sjsg 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1960c349dbc7Sjsg 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1961c349dbc7Sjsg 		mmio_hw_access_pre(gvt->gt);
1962c349dbc7Sjsg 		vgpu_vreg(vgpu, offset) =
1963c349dbc7Sjsg 			intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1964c349dbc7Sjsg 		mmio_hw_access_post(gvt->gt);
1965c349dbc7Sjsg 	}
1966c349dbc7Sjsg 
1967c349dbc7Sjsg 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1968c349dbc7Sjsg }
1969c349dbc7Sjsg 
elsp_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1970c349dbc7Sjsg static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1971c349dbc7Sjsg 		void *p_data, unsigned int bytes)
1972c349dbc7Sjsg {
1973c349dbc7Sjsg 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1974c349dbc7Sjsg 	const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1975c349dbc7Sjsg 	struct intel_vgpu_execlist *execlist;
1976c349dbc7Sjsg 	u32 data = *(u32 *)p_data;
1977c349dbc7Sjsg 	int ret = 0;
1978c349dbc7Sjsg 
1979c349dbc7Sjsg 	if (drm_WARN_ON(&i915->drm, !engine))
1980c349dbc7Sjsg 		return -EINVAL;
1981c349dbc7Sjsg 
198234e7cf1aSjsg 	/*
198334e7cf1aSjsg 	 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
198434e7cf1aSjsg 	 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
198534e7cf1aSjsg 	 * vGPU reset if in resuming.
198634e7cf1aSjsg 	 * In S0ix exit, the device power state also transite from D3 to D0 as
198734e7cf1aSjsg 	 * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
198834e7cf1aSjsg 	 * S0ix exit, all engines continue to work. However the d3_entered
198934e7cf1aSjsg 	 * remains set which will break next vGPU reset logic (miss the expected
199034e7cf1aSjsg 	 * PPGTT invalidation).
199134e7cf1aSjsg 	 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
199234e7cf1aSjsg 	 * chance to clear d3_entered.
199334e7cf1aSjsg 	 */
199434e7cf1aSjsg 	if (vgpu->d3_entered)
199534e7cf1aSjsg 		vgpu->d3_entered = false;
199634e7cf1aSjsg 
1997c349dbc7Sjsg 	execlist = &vgpu->submission.execlist[engine->id];
1998c349dbc7Sjsg 
1999c349dbc7Sjsg 	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
2000c349dbc7Sjsg 	if (execlist->elsp_dwords.index == 3) {
2001c349dbc7Sjsg 		ret = intel_vgpu_submit_execlist(vgpu, engine);
2002c349dbc7Sjsg 		if(ret)
2003c349dbc7Sjsg 			gvt_vgpu_err("fail submit workload on ring %s\n",
2004c349dbc7Sjsg 				     engine->name);
2005c349dbc7Sjsg 	}
2006c349dbc7Sjsg 
2007c349dbc7Sjsg 	++execlist->elsp_dwords.index;
2008c349dbc7Sjsg 	execlist->elsp_dwords.index &= 0x3;
2009c349dbc7Sjsg 	return ret;
2010c349dbc7Sjsg }
2011c349dbc7Sjsg 
ring_mode_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2012c349dbc7Sjsg static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2013c349dbc7Sjsg 		void *p_data, unsigned int bytes)
2014c349dbc7Sjsg {
2015c349dbc7Sjsg 	u32 data = *(u32 *)p_data;
2016c349dbc7Sjsg 	const struct intel_engine_cs *engine =
2017c349dbc7Sjsg 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2018c349dbc7Sjsg 	bool enable_execlist;
2019c349dbc7Sjsg 	int ret;
2020c349dbc7Sjsg 
2021c349dbc7Sjsg 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2022ad8b1aafSjsg 	if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2023ad8b1aafSjsg 	    IS_COMETLAKE(vgpu->gvt->gt->i915))
2024c349dbc7Sjsg 		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2025c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
2026c349dbc7Sjsg 
2027ad8b1aafSjsg 	if (IS_MASKED_BITS_ENABLED(data, 1)) {
2028c349dbc7Sjsg 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2029c349dbc7Sjsg 		return 0;
2030c349dbc7Sjsg 	}
2031c349dbc7Sjsg 
2032ad8b1aafSjsg 	if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2033ad8b1aafSjsg 	     IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2034ad8b1aafSjsg 	    IS_MASKED_BITS_ENABLED(data, 2)) {
2035c349dbc7Sjsg 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2036c349dbc7Sjsg 		return 0;
2037c349dbc7Sjsg 	}
2038c349dbc7Sjsg 
2039c349dbc7Sjsg 	/* when PPGTT mode enabled, we will check if guest has called
2040c349dbc7Sjsg 	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
2041c349dbc7Sjsg 	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
2042c349dbc7Sjsg 	 */
2043ad8b1aafSjsg 	if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2044ad8b1aafSjsg 	    IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2045ad8b1aafSjsg 	    !vgpu->pv_notified) {
2046c349dbc7Sjsg 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2047c349dbc7Sjsg 		return 0;
2048c349dbc7Sjsg 	}
2049ad8b1aafSjsg 	if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2050ad8b1aafSjsg 	    IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2051c349dbc7Sjsg 		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2052c349dbc7Sjsg 
2053c349dbc7Sjsg 		gvt_dbg_core("EXECLIST %s on ring %s\n",
2054c349dbc7Sjsg 			     (enable_execlist ? "enabling" : "disabling"),
2055c349dbc7Sjsg 			     engine->name);
2056c349dbc7Sjsg 
2057c349dbc7Sjsg 		if (!enable_execlist)
2058c349dbc7Sjsg 			return 0;
2059c349dbc7Sjsg 
2060c349dbc7Sjsg 		ret = intel_vgpu_select_submission_ops(vgpu,
2061c349dbc7Sjsg 						       engine->mask,
2062c349dbc7Sjsg 						       INTEL_VGPU_EXECLIST_SUBMISSION);
2063c349dbc7Sjsg 		if (ret)
2064c349dbc7Sjsg 			return ret;
2065c349dbc7Sjsg 
2066c349dbc7Sjsg 		intel_vgpu_start_schedule(vgpu);
2067c349dbc7Sjsg 	}
2068c349dbc7Sjsg 	return 0;
2069c349dbc7Sjsg }
2070c349dbc7Sjsg 
gvt_reg_tlb_control_handler(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2071c349dbc7Sjsg static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2072c349dbc7Sjsg 		unsigned int offset, void *p_data, unsigned int bytes)
2073c349dbc7Sjsg {
2074c349dbc7Sjsg 	unsigned int id = 0;
2075c349dbc7Sjsg 
2076c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
2077c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = 0;
2078c349dbc7Sjsg 
2079c349dbc7Sjsg 	switch (offset) {
2080c349dbc7Sjsg 	case 0x4260:
2081c349dbc7Sjsg 		id = RCS0;
2082c349dbc7Sjsg 		break;
2083c349dbc7Sjsg 	case 0x4264:
2084c349dbc7Sjsg 		id = VCS0;
2085c349dbc7Sjsg 		break;
2086c349dbc7Sjsg 	case 0x4268:
2087c349dbc7Sjsg 		id = VCS1;
2088c349dbc7Sjsg 		break;
2089c349dbc7Sjsg 	case 0x426c:
2090c349dbc7Sjsg 		id = BCS0;
2091c349dbc7Sjsg 		break;
2092c349dbc7Sjsg 	case 0x4270:
2093c349dbc7Sjsg 		id = VECS0;
2094c349dbc7Sjsg 		break;
2095c349dbc7Sjsg 	default:
2096c349dbc7Sjsg 		return -EINVAL;
2097c349dbc7Sjsg 	}
2098c349dbc7Sjsg 	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2099c349dbc7Sjsg 
2100c349dbc7Sjsg 	return 0;
2101c349dbc7Sjsg }
2102c349dbc7Sjsg 
ring_reset_ctl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2103c349dbc7Sjsg static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2104c349dbc7Sjsg 	unsigned int offset, void *p_data, unsigned int bytes)
2105c349dbc7Sjsg {
2106c349dbc7Sjsg 	u32 data;
2107c349dbc7Sjsg 
2108c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
2109c349dbc7Sjsg 	data = vgpu_vreg(vgpu, offset);
2110c349dbc7Sjsg 
2111ad8b1aafSjsg 	if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2112c349dbc7Sjsg 		data |= RESET_CTL_READY_TO_RESET;
2113c349dbc7Sjsg 	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2114c349dbc7Sjsg 		data &= ~RESET_CTL_READY_TO_RESET;
2115c349dbc7Sjsg 
2116c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = data;
2117c349dbc7Sjsg 	return 0;
2118c349dbc7Sjsg }
2119c349dbc7Sjsg 
csfe_chicken1_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2120c349dbc7Sjsg static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2121c349dbc7Sjsg 				    unsigned int offset, void *p_data,
2122c349dbc7Sjsg 				    unsigned int bytes)
2123c349dbc7Sjsg {
2124c349dbc7Sjsg 	u32 data = *(u32 *)p_data;
2125c349dbc7Sjsg 
2126c349dbc7Sjsg 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2127c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
2128c349dbc7Sjsg 
2129ad8b1aafSjsg 	if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2130ad8b1aafSjsg 	    IS_MASKED_BITS_ENABLED(data, 0x8))
2131c349dbc7Sjsg 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2132c349dbc7Sjsg 
2133c349dbc7Sjsg 	return 0;
2134c349dbc7Sjsg }
2135c349dbc7Sjsg 
2136c349dbc7Sjsg #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
21371bb76ff1Sjsg 	ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
21381bb76ff1Sjsg 		s, f, am, rm, d, r, w); \
2139c349dbc7Sjsg 	if (ret) \
2140c349dbc7Sjsg 		return ret; \
2141c349dbc7Sjsg } while (0)
2142c349dbc7Sjsg 
2143c349dbc7Sjsg #define MMIO_DH(reg, d, r, w) \
2144c349dbc7Sjsg 	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2145c349dbc7Sjsg 
2146c349dbc7Sjsg #define MMIO_DFH(reg, d, f, r, w) \
2147c349dbc7Sjsg 	MMIO_F(reg, 4, f, 0, 0, d, r, w)
2148c349dbc7Sjsg 
2149c349dbc7Sjsg #define MMIO_GM(reg, d, r, w) \
2150c349dbc7Sjsg 	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2151c349dbc7Sjsg 
2152c349dbc7Sjsg #define MMIO_GM_RDR(reg, d, r, w) \
2153c349dbc7Sjsg 	MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2154c349dbc7Sjsg 
2155c349dbc7Sjsg #define MMIO_RO(reg, d, f, rm, r, w) \
2156c349dbc7Sjsg 	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2157c349dbc7Sjsg 
2158c349dbc7Sjsg #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2159c349dbc7Sjsg 	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2160c349dbc7Sjsg 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2161c349dbc7Sjsg 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2162c349dbc7Sjsg 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2163ad8b1aafSjsg 	if (HAS_ENGINE(gvt->gt, VCS1)) \
2164c349dbc7Sjsg 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2165c349dbc7Sjsg } while (0)
2166c349dbc7Sjsg 
2167c349dbc7Sjsg #define MMIO_RING_DFH(prefix, d, f, r, w) \
2168c349dbc7Sjsg 	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2169c349dbc7Sjsg 
2170c349dbc7Sjsg #define MMIO_RING_GM(prefix, d, r, w) \
2171c349dbc7Sjsg 	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2172c349dbc7Sjsg 
2173c349dbc7Sjsg #define MMIO_RING_GM_RDR(prefix, d, r, w) \
2174c349dbc7Sjsg 	MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2175c349dbc7Sjsg 
2176c349dbc7Sjsg #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2177c349dbc7Sjsg 	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2178c349dbc7Sjsg 
init_generic_mmio_info(struct intel_gvt * gvt)2179c349dbc7Sjsg static int init_generic_mmio_info(struct intel_gvt *gvt)
2180c349dbc7Sjsg {
2181c349dbc7Sjsg 	struct drm_i915_private *dev_priv = gvt->gt->i915;
2182c349dbc7Sjsg 	int ret;
2183c349dbc7Sjsg 
2184ad8b1aafSjsg 	MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2185c349dbc7Sjsg 		intel_vgpu_reg_imr_handler);
2186c349dbc7Sjsg 
2187c349dbc7Sjsg 	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2188c349dbc7Sjsg 	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2189c349dbc7Sjsg 	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2190c349dbc7Sjsg 
2191ad8b1aafSjsg 	MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2192ad8b1aafSjsg 
2193c349dbc7Sjsg 
2194c349dbc7Sjsg 	MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2195c349dbc7Sjsg 		gamw_echo_dev_rw_ia_write);
2196c349dbc7Sjsg 
2197c349dbc7Sjsg 	MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2198c349dbc7Sjsg 	MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2199c349dbc7Sjsg 	MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2200c349dbc7Sjsg 
2201c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x28)
2202c349dbc7Sjsg 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2203c349dbc7Sjsg #undef RING_REG
2204c349dbc7Sjsg 
2205c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x134)
2206c349dbc7Sjsg 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2207c349dbc7Sjsg #undef RING_REG
2208c349dbc7Sjsg 
2209c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x6c)
2210c349dbc7Sjsg 	MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2211c349dbc7Sjsg #undef RING_REG
2212c349dbc7Sjsg 	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2213c349dbc7Sjsg 
2214c349dbc7Sjsg 	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2215c349dbc7Sjsg 	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2216c349dbc7Sjsg 	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2217c349dbc7Sjsg 
2218ad8b1aafSjsg 	MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2219ad8b1aafSjsg 	MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2220ad8b1aafSjsg 	MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2221ad8b1aafSjsg 	MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2222ad8b1aafSjsg 	MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2223c349dbc7Sjsg 
2224c349dbc7Sjsg 	/* RING MODE */
2225c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x29c)
22265ca02815Sjsg 	MMIO_RING_DFH(RING_REG, D_ALL,
22275ca02815Sjsg 		F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2228c349dbc7Sjsg 		ring_mode_mmio_write);
2229c349dbc7Sjsg #undef RING_REG
2230c349dbc7Sjsg 
2231c349dbc7Sjsg 	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2232c349dbc7Sjsg 		NULL, NULL);
2233c349dbc7Sjsg 	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2234c349dbc7Sjsg 			NULL, NULL);
2235c349dbc7Sjsg 	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2236c349dbc7Sjsg 			mmio_read_from_hw, NULL);
2237c349dbc7Sjsg 	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2238c349dbc7Sjsg 			mmio_read_from_hw, NULL);
2239c349dbc7Sjsg 
2240c349dbc7Sjsg 	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2241c349dbc7Sjsg 	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2242c349dbc7Sjsg 		NULL, NULL);
2243c349dbc7Sjsg 	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2244c349dbc7Sjsg 	MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2245c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2246c349dbc7Sjsg 
2247c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2248c349dbc7Sjsg 	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2249c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2250c349dbc7Sjsg 	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2251c349dbc7Sjsg 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2252c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2253c349dbc7Sjsg 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2254c349dbc7Sjsg 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2255c349dbc7Sjsg 		NULL, NULL);
2256c349dbc7Sjsg 	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2257c349dbc7Sjsg 		 NULL, NULL);
2258c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2259c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2260c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2261c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2262c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2263c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2264c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2265c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2266f005ef32Sjsg 	MMIO_DFH(HSW_HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2267c349dbc7Sjsg 	MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2268c349dbc7Sjsg 
2269c349dbc7Sjsg 	/* display */
2270f005ef32Sjsg 	MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write);
2271f005ef32Sjsg 	MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write);
2272f005ef32Sjsg 	MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write);
2273f005ef32Sjsg 	MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write);
2274c349dbc7Sjsg 	MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2275c349dbc7Sjsg 	MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2276c349dbc7Sjsg 		reg50080_mmio_write);
2277c349dbc7Sjsg 	MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2278c349dbc7Sjsg 	MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2279c349dbc7Sjsg 		reg50080_mmio_write);
2280c349dbc7Sjsg 	MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2281c349dbc7Sjsg 	MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2282c349dbc7Sjsg 		reg50080_mmio_write);
2283c349dbc7Sjsg 	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2284c349dbc7Sjsg 	MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2285c349dbc7Sjsg 		reg50080_mmio_write);
2286c349dbc7Sjsg 	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2287c349dbc7Sjsg 	MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2288c349dbc7Sjsg 		reg50080_mmio_write);
2289c349dbc7Sjsg 	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2290c349dbc7Sjsg 	MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2291c349dbc7Sjsg 		reg50080_mmio_write);
2292c349dbc7Sjsg 
2293c349dbc7Sjsg 	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2294c349dbc7Sjsg 		gmbus_mmio_write);
2295c349dbc7Sjsg 	MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2296c349dbc7Sjsg 
2297c349dbc7Sjsg 	MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2298c349dbc7Sjsg 		dp_aux_ch_ctl_mmio_write);
2299c349dbc7Sjsg 	MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2300c349dbc7Sjsg 		dp_aux_ch_ctl_mmio_write);
2301c349dbc7Sjsg 	MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2302c349dbc7Sjsg 		dp_aux_ch_ctl_mmio_write);
2303c349dbc7Sjsg 
2304c349dbc7Sjsg 	MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2305c349dbc7Sjsg 
2306c349dbc7Sjsg 	MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2307c349dbc7Sjsg 	MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2308c349dbc7Sjsg 
2309c349dbc7Sjsg 	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2310c349dbc7Sjsg 	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2311c349dbc7Sjsg 	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2312c349dbc7Sjsg 	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2313c349dbc7Sjsg 	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2314c349dbc7Sjsg 	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2315c349dbc7Sjsg 	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2316c349dbc7Sjsg 	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2317c349dbc7Sjsg 	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2318c349dbc7Sjsg 	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2319c349dbc7Sjsg 	MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2320c349dbc7Sjsg 	MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2321c349dbc7Sjsg 	MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2322c349dbc7Sjsg 	MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2323c349dbc7Sjsg 	MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2324c349dbc7Sjsg 	MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2325c349dbc7Sjsg 
2326c349dbc7Sjsg 	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2327c349dbc7Sjsg 		PORTA_HOTPLUG_STATUS_MASK
2328c349dbc7Sjsg 		| PORTB_HOTPLUG_STATUS_MASK
2329c349dbc7Sjsg 		| PORTC_HOTPLUG_STATUS_MASK
2330c349dbc7Sjsg 		| PORTD_HOTPLUG_STATUS_MASK,
2331c349dbc7Sjsg 		NULL, NULL);
2332c349dbc7Sjsg 
2333c349dbc7Sjsg 	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2334c349dbc7Sjsg 	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2335c349dbc7Sjsg 	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2336c349dbc7Sjsg 	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2337c349dbc7Sjsg 	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2338c349dbc7Sjsg 
2339c349dbc7Sjsg 	MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
2340c349dbc7Sjsg 		dp_aux_ch_ctl_mmio_write);
2341c349dbc7Sjsg 
2342c349dbc7Sjsg 	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2343c349dbc7Sjsg 	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2344c349dbc7Sjsg 	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2345c349dbc7Sjsg 	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2346c349dbc7Sjsg 	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2347c349dbc7Sjsg 
2348c349dbc7Sjsg 	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2349c349dbc7Sjsg 	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2350c349dbc7Sjsg 	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2351c349dbc7Sjsg 	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2352c349dbc7Sjsg 	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2353c349dbc7Sjsg 
2354c349dbc7Sjsg 	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2355c349dbc7Sjsg 	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2356c349dbc7Sjsg 	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2357c349dbc7Sjsg 	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2358c349dbc7Sjsg 	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2359c349dbc7Sjsg 
2360c349dbc7Sjsg 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2361c349dbc7Sjsg 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2362c349dbc7Sjsg 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2363c349dbc7Sjsg 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2364c349dbc7Sjsg 
2365c349dbc7Sjsg 	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2366c349dbc7Sjsg 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2367c349dbc7Sjsg 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2368c349dbc7Sjsg 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2369c349dbc7Sjsg 	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2370c349dbc7Sjsg 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2371c349dbc7Sjsg 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2372c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2373c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2374c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2375c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2376c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2377c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2378c349dbc7Sjsg 
2379c349dbc7Sjsg 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2380c349dbc7Sjsg 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2381c349dbc7Sjsg 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2382c349dbc7Sjsg 
2383c349dbc7Sjsg 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2384c349dbc7Sjsg 	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2385c349dbc7Sjsg 
2386c349dbc7Sjsg 	MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2387c349dbc7Sjsg 	MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2388c349dbc7Sjsg 
2389c349dbc7Sjsg 	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2390c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2391c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2392c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2393c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2394c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2395c349dbc7Sjsg 
2396c349dbc7Sjsg 	MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2397c349dbc7Sjsg 	MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2398c349dbc7Sjsg 	MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2399c349dbc7Sjsg 	MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2400c349dbc7Sjsg 
2401c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2402c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2403c349dbc7Sjsg 	MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2404c349dbc7Sjsg 
2405c349dbc7Sjsg 	MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2406c349dbc7Sjsg 	MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2407c349dbc7Sjsg 	MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2408c349dbc7Sjsg 	MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2409c349dbc7Sjsg 	MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2410c349dbc7Sjsg 	MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2411c349dbc7Sjsg 	MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2412c349dbc7Sjsg 	MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2413c349dbc7Sjsg 	MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2414c349dbc7Sjsg 	MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2415c349dbc7Sjsg 	MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2416c349dbc7Sjsg 	MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2417c349dbc7Sjsg 	MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2418c349dbc7Sjsg 	MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2419c349dbc7Sjsg 	MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2420c349dbc7Sjsg 	MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2421c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2422c349dbc7Sjsg 
2423c349dbc7Sjsg 	MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2424ad8b1aafSjsg 	MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2425c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2426c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2427c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2428c349dbc7Sjsg 	MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2429c349dbc7Sjsg 	MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2430c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2431c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2432c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2433c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2434c349dbc7Sjsg 
2435c349dbc7Sjsg 	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2436c349dbc7Sjsg 	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2437c349dbc7Sjsg 	MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
2438c349dbc7Sjsg 
2439c349dbc7Sjsg 	return 0;
2440c349dbc7Sjsg }
2441c349dbc7Sjsg 
init_bdw_mmio_info(struct intel_gvt * gvt)2442c349dbc7Sjsg static int init_bdw_mmio_info(struct intel_gvt *gvt)
2443c349dbc7Sjsg {
2444c349dbc7Sjsg 	int ret;
2445c349dbc7Sjsg 
2446c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2447c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2448c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2449c349dbc7Sjsg 
2450c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2451c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2452c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2453c349dbc7Sjsg 
2454c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2455c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2456c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2457c349dbc7Sjsg 
2458c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2459c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2460c349dbc7Sjsg 	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2461c349dbc7Sjsg 
2462c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
2463c349dbc7Sjsg 		intel_vgpu_reg_imr_handler);
2464c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
2465c349dbc7Sjsg 		intel_vgpu_reg_ier_handler);
2466c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
2467c349dbc7Sjsg 		intel_vgpu_reg_iir_handler);
2468c349dbc7Sjsg 
2469c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
2470c349dbc7Sjsg 		intel_vgpu_reg_imr_handler);
2471c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
2472c349dbc7Sjsg 		intel_vgpu_reg_ier_handler);
2473c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
2474c349dbc7Sjsg 		intel_vgpu_reg_iir_handler);
2475c349dbc7Sjsg 
2476c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
2477c349dbc7Sjsg 		intel_vgpu_reg_imr_handler);
2478c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
2479c349dbc7Sjsg 		intel_vgpu_reg_ier_handler);
2480c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
2481c349dbc7Sjsg 		intel_vgpu_reg_iir_handler);
2482c349dbc7Sjsg 
2483c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2484c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2485c349dbc7Sjsg 	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2486c349dbc7Sjsg 
2487c349dbc7Sjsg 	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2488c349dbc7Sjsg 	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2489c349dbc7Sjsg 	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2490c349dbc7Sjsg 
2491c349dbc7Sjsg 	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2492c349dbc7Sjsg 	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2493c349dbc7Sjsg 	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2494c349dbc7Sjsg 
2495c349dbc7Sjsg 	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2496c349dbc7Sjsg 		intel_vgpu_reg_master_irq_handler);
2497c349dbc7Sjsg 
2498ad8b1aafSjsg 	MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
2499c349dbc7Sjsg 		mmio_read_from_hw, NULL);
2500c349dbc7Sjsg 
2501c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0xd0)
2502c349dbc7Sjsg 	MMIO_RING_F(RING_REG, 4, F_RO, 0,
2503c349dbc7Sjsg 		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2504c349dbc7Sjsg 		ring_reset_ctl_write);
2505c349dbc7Sjsg #undef RING_REG
2506c349dbc7Sjsg 
2507c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x230)
2508c349dbc7Sjsg 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2509c349dbc7Sjsg #undef RING_REG
2510c349dbc7Sjsg 
2511c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x234)
2512ad8b1aafSjsg 	MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
2513c349dbc7Sjsg 		NULL, NULL);
2514c349dbc7Sjsg #undef RING_REG
2515c349dbc7Sjsg 
2516c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x244)
2517c349dbc7Sjsg 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2518c349dbc7Sjsg #undef RING_REG
2519c349dbc7Sjsg 
2520c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x370)
2521c349dbc7Sjsg 	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2522c349dbc7Sjsg #undef RING_REG
2523c349dbc7Sjsg 
2524c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x3a0)
2525c349dbc7Sjsg 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2526c349dbc7Sjsg #undef RING_REG
2527c349dbc7Sjsg 
2528c349dbc7Sjsg 	MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2529c349dbc7Sjsg 
2530c349dbc7Sjsg #define RING_REG(base) _MMIO((base) + 0x270)
2531ad8b1aafSjsg 	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2532c349dbc7Sjsg #undef RING_REG
2533c349dbc7Sjsg 
2534ad8b1aafSjsg 	MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
2535c349dbc7Sjsg 
2536c349dbc7Sjsg 	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2537c349dbc7Sjsg 
2538c349dbc7Sjsg 	MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2539c349dbc7Sjsg 		NULL, NULL);
2540c349dbc7Sjsg 	MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2541c349dbc7Sjsg 		NULL, NULL);
2542c349dbc7Sjsg 	MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2543c349dbc7Sjsg 
2544c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2545c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2546c349dbc7Sjsg 	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2547c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
2548c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
2549c349dbc7Sjsg 
25505ca02815Sjsg 	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
25515ca02815Sjsg 		D_BDW_PLUS, NULL, force_nonpriv_write);
2552c349dbc7Sjsg 
2553c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
2554c349dbc7Sjsg 
2555c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
2556c349dbc7Sjsg 
2557c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2558c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2559c349dbc7Sjsg 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2560c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2561c349dbc7Sjsg 
2562c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
2563c349dbc7Sjsg 
2564c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2565c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2566c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2567c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2568c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2569c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2570c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2571c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2572c349dbc7Sjsg 	MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2573c349dbc7Sjsg 	MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2574c349dbc7Sjsg 	return 0;
2575c349dbc7Sjsg }
2576c349dbc7Sjsg 
init_skl_mmio_info(struct intel_gvt * gvt)2577c349dbc7Sjsg static int init_skl_mmio_info(struct intel_gvt *gvt)
2578c349dbc7Sjsg {
2579c349dbc7Sjsg 	struct drm_i915_private *dev_priv = gvt->gt->i915;
2580c349dbc7Sjsg 	int ret;
2581c349dbc7Sjsg 
2582c349dbc7Sjsg 	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2583c349dbc7Sjsg 	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
25845ca02815Sjsg 	MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
25855ca02815Sjsg 	MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
2586c349dbc7Sjsg 	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2587c349dbc7Sjsg 	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
2588c349dbc7Sjsg 
2589c349dbc7Sjsg 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2590c349dbc7Sjsg 						dp_aux_ch_ctl_mmio_write);
2591c349dbc7Sjsg 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2592c349dbc7Sjsg 						dp_aux_ch_ctl_mmio_write);
2593c349dbc7Sjsg 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2594c349dbc7Sjsg 						dp_aux_ch_ctl_mmio_write);
2595c349dbc7Sjsg 
2596c349dbc7Sjsg 	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
2597c349dbc7Sjsg 
2598c349dbc7Sjsg 	MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2599c349dbc7Sjsg 
2600c349dbc7Sjsg 	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2601ad8b1aafSjsg 	MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2602c349dbc7Sjsg 	MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
2603c349dbc7Sjsg 	MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2604c349dbc7Sjsg 	MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2605c349dbc7Sjsg 	MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
2606c349dbc7Sjsg 
2607c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2608c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2609c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2610c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2611c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2612c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2613c349dbc7Sjsg 
2614c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2615c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2616c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2617c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2618c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2619c349dbc7Sjsg 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2620c349dbc7Sjsg 
2621c349dbc7Sjsg 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2622c349dbc7Sjsg 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2623c349dbc7Sjsg 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2624c349dbc7Sjsg 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2625c349dbc7Sjsg 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2626c349dbc7Sjsg 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2627c349dbc7Sjsg 
2628c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2629c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2630c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2631c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2632c349dbc7Sjsg 
2633c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2634c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2635c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2636c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2637c349dbc7Sjsg 
2638c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2639c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2640c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2641c349dbc7Sjsg 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2642c349dbc7Sjsg 
2643c349dbc7Sjsg 	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
2644c349dbc7Sjsg 	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
2645c349dbc7Sjsg 	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
2646c349dbc7Sjsg 
2647c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2648c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2649c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2650c349dbc7Sjsg 
2651c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2652c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2653c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2654c349dbc7Sjsg 
2655c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2656c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2657c349dbc7Sjsg 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2658c349dbc7Sjsg 
2659c349dbc7Sjsg 	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
2660c349dbc7Sjsg 	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
2661c349dbc7Sjsg 	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
2662c349dbc7Sjsg 
2663c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2664c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2665c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2666c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2667c349dbc7Sjsg 
2668c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2669c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2670c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2671c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2672c349dbc7Sjsg 
2673c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2674c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2675c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2676c349dbc7Sjsg 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2677c349dbc7Sjsg 
2678c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
2679c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
2680c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
2681c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
2682c349dbc7Sjsg 
2683c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
2684c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
2685c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
2686c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
2687c349dbc7Sjsg 
2688c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
2689c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
2690c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
2691c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
2692c349dbc7Sjsg 
2693c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
2694c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
2695c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
2696c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
2697c349dbc7Sjsg 
2698c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
2699c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
2700c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
2701c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
2702c349dbc7Sjsg 
2703c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
2704c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
2705c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
2706c349dbc7Sjsg 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
2707c349dbc7Sjsg 
2708c349dbc7Sjsg 	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2709c349dbc7Sjsg 
2710c349dbc7Sjsg 	MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2711c349dbc7Sjsg 		NULL, NULL);
2712c349dbc7Sjsg 	MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2713c349dbc7Sjsg 		NULL, NULL);
2714c349dbc7Sjsg 
2715c349dbc7Sjsg 	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
2716c349dbc7Sjsg 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2717c349dbc7Sjsg 	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2718c349dbc7Sjsg 		NULL, NULL);
2719c349dbc7Sjsg 
2720c349dbc7Sjsg 	/* TRTT */
2721c349dbc7Sjsg 	MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2722c349dbc7Sjsg 	MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2723c349dbc7Sjsg 	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2724c349dbc7Sjsg 	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2725c349dbc7Sjsg 	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
27265ca02815Sjsg 	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
2727c349dbc7Sjsg 		 NULL, gen9_trtte_write);
27285ca02815Sjsg 	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
27295ca02815Sjsg 		 NULL, gen9_trtt_chicken_write);
2730c349dbc7Sjsg 
2731c349dbc7Sjsg 	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2732c349dbc7Sjsg 	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
2733c349dbc7Sjsg 
2734c349dbc7Sjsg #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
2735c349dbc7Sjsg 	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2736c349dbc7Sjsg 		      NULL, csfe_chicken1_mmio_write);
2737c349dbc7Sjsg #undef CSFE_CHICKEN1_REG
2738c349dbc7Sjsg 	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2739c349dbc7Sjsg 		 NULL, NULL);
2740c349dbc7Sjsg 	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2741c349dbc7Sjsg 		 NULL, NULL);
2742c349dbc7Sjsg 
2743ad8b1aafSjsg 	MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
27441bb76ff1Sjsg 	MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2745c349dbc7Sjsg 
2746c349dbc7Sjsg 	return 0;
2747c349dbc7Sjsg }
2748c349dbc7Sjsg 
init_bxt_mmio_info(struct intel_gvt * gvt)2749c349dbc7Sjsg static int init_bxt_mmio_info(struct intel_gvt *gvt)
2750c349dbc7Sjsg {
2751c349dbc7Sjsg 	int ret;
2752c349dbc7Sjsg 
2753c349dbc7Sjsg 	MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
2754c349dbc7Sjsg 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
2755c349dbc7Sjsg 		NULL, bxt_phy_ctl_family_write);
2756c349dbc7Sjsg 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
2757c349dbc7Sjsg 		NULL, bxt_phy_ctl_family_write);
2758c349dbc7Sjsg 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
2759c349dbc7Sjsg 		NULL, bxt_port_pll_enable_write);
2760c349dbc7Sjsg 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
2761c349dbc7Sjsg 		NULL, bxt_port_pll_enable_write);
2762c349dbc7Sjsg 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
2763c349dbc7Sjsg 		bxt_port_pll_enable_write);
2764c349dbc7Sjsg 
2765c349dbc7Sjsg 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
2766c349dbc7Sjsg 		NULL, bxt_pcs_dw12_grp_write);
2767c349dbc7Sjsg 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
2768c349dbc7Sjsg 		bxt_port_tx_dw3_read, NULL);
2769c349dbc7Sjsg 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
2770c349dbc7Sjsg 		NULL, bxt_pcs_dw12_grp_write);
2771c349dbc7Sjsg 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
2772c349dbc7Sjsg 		bxt_port_tx_dw3_read, NULL);
2773c349dbc7Sjsg 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
2774c349dbc7Sjsg 		NULL, bxt_pcs_dw12_grp_write);
2775c349dbc7Sjsg 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
2776c349dbc7Sjsg 		bxt_port_tx_dw3_read, NULL);
2777c349dbc7Sjsg 	MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
2778c349dbc7Sjsg 	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
2779ad8b1aafSjsg 	MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2780ad8b1aafSjsg 	MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
2781ad8b1aafSjsg 	MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2782ad8b1aafSjsg 	       0, 0, D_BXT, NULL, NULL);
2783ad8b1aafSjsg 	MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2784ad8b1aafSjsg 	       0, 0, D_BXT, NULL, NULL);
2785ad8b1aafSjsg 	MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2786ad8b1aafSjsg 	       0, 0, D_BXT, NULL, NULL);
2787ad8b1aafSjsg 	MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2788ad8b1aafSjsg 	       0, 0, D_BXT, NULL, NULL);
2789c349dbc7Sjsg 
2790c349dbc7Sjsg 	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2791c349dbc7Sjsg 
2792ad8b1aafSjsg 	MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
2793ad8b1aafSjsg 
2794c349dbc7Sjsg 	return 0;
2795c349dbc7Sjsg }
2796c349dbc7Sjsg 
find_mmio_block(struct intel_gvt * gvt,unsigned int offset)2797c349dbc7Sjsg static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2798c349dbc7Sjsg 					      unsigned int offset)
2799c349dbc7Sjsg {
2800c349dbc7Sjsg 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2801c349dbc7Sjsg 	int num = gvt->mmio.num_mmio_block;
2802c349dbc7Sjsg 	int i;
2803c349dbc7Sjsg 
2804c349dbc7Sjsg 	for (i = 0; i < num; i++, block++) {
2805c349dbc7Sjsg 		if (offset >= i915_mmio_reg_offset(block->offset) &&
2806c349dbc7Sjsg 		    offset < i915_mmio_reg_offset(block->offset) + block->size)
2807c349dbc7Sjsg 			return block;
2808c349dbc7Sjsg 	}
2809c349dbc7Sjsg 	return NULL;
2810c349dbc7Sjsg }
2811c349dbc7Sjsg 
2812c349dbc7Sjsg /**
2813c349dbc7Sjsg  * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
2814c349dbc7Sjsg  * @gvt: GVT device
2815c349dbc7Sjsg  *
2816c349dbc7Sjsg  * This function is called at the driver unloading stage, to clean up the MMIO
2817c349dbc7Sjsg  * information table of GVT device
2818c349dbc7Sjsg  *
2819c349dbc7Sjsg  */
intel_gvt_clean_mmio_info(struct intel_gvt * gvt)2820c349dbc7Sjsg void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2821c349dbc7Sjsg {
2822c349dbc7Sjsg 	struct hlist_node *tmp;
2823c349dbc7Sjsg 	struct intel_gvt_mmio_info *e;
2824c349dbc7Sjsg 	int i;
2825c349dbc7Sjsg 
2826c349dbc7Sjsg 	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
2827c349dbc7Sjsg 		kfree(e);
2828c349dbc7Sjsg 
28291bb76ff1Sjsg 	kfree(gvt->mmio.mmio_block);
28301bb76ff1Sjsg 	gvt->mmio.mmio_block = NULL;
28311bb76ff1Sjsg 	gvt->mmio.num_mmio_block = 0;
28321bb76ff1Sjsg 
2833c349dbc7Sjsg 	vfree(gvt->mmio.mmio_attribute);
2834c349dbc7Sjsg 	gvt->mmio.mmio_attribute = NULL;
2835c349dbc7Sjsg }
2836c349dbc7Sjsg 
handle_mmio(struct intel_gvt_mmio_table_iter * iter,u32 offset,u32 size)28371bb76ff1Sjsg static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
28381bb76ff1Sjsg 		       u32 size)
28391bb76ff1Sjsg {
28401bb76ff1Sjsg 	struct intel_gvt *gvt = iter->data;
28411bb76ff1Sjsg 	struct intel_gvt_mmio_info *info, *p;
28421bb76ff1Sjsg 	u32 start, end, i;
28431bb76ff1Sjsg 
28441bb76ff1Sjsg 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
28451bb76ff1Sjsg 		return -EINVAL;
28461bb76ff1Sjsg 
28471bb76ff1Sjsg 	start = offset;
28481bb76ff1Sjsg 	end = offset + size;
28491bb76ff1Sjsg 
28501bb76ff1Sjsg 	for (i = start; i < end; i += 4) {
28511bb76ff1Sjsg 		p = intel_gvt_find_mmio_info(gvt, i);
28521bb76ff1Sjsg 		if (p) {
2853*cc7c2990Sjsg 			WARN(1, "dup mmio definition offset %x\n", i);
28541bb76ff1Sjsg 
28551bb76ff1Sjsg 			/* We return -EEXIST here to make GVT-g load fail.
28561bb76ff1Sjsg 			 * So duplicated MMIO can be found as soon as
28571bb76ff1Sjsg 			 * possible.
2858ad8b1aafSjsg 			 */
28591bb76ff1Sjsg 			return -EEXIST;
28601bb76ff1Sjsg 		}
28611bb76ff1Sjsg 
28621bb76ff1Sjsg 		info = kzalloc(sizeof(*info), GFP_KERNEL);
28631bb76ff1Sjsg 		if (!info)
28641bb76ff1Sjsg 			return -ENOMEM;
28651bb76ff1Sjsg 
28661bb76ff1Sjsg 		info->offset = i;
28671bb76ff1Sjsg 		info->read = intel_vgpu_default_mmio_read;
28681bb76ff1Sjsg 		info->write = intel_vgpu_default_mmio_write;
28691bb76ff1Sjsg 		INIT_HLIST_NODE(&info->node);
28701bb76ff1Sjsg 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
28711bb76ff1Sjsg 		gvt->mmio.num_tracked_mmio++;
28721bb76ff1Sjsg 	}
28731bb76ff1Sjsg 	return 0;
28741bb76ff1Sjsg }
28751bb76ff1Sjsg 
handle_mmio_block(struct intel_gvt_mmio_table_iter * iter,u32 offset,u32 size)28761bb76ff1Sjsg static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
28771bb76ff1Sjsg 			     u32 offset, u32 size)
28781bb76ff1Sjsg {
28791bb76ff1Sjsg 	struct intel_gvt *gvt = iter->data;
28801bb76ff1Sjsg 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
28811bb76ff1Sjsg 	void *ret;
28821bb76ff1Sjsg 
28831bb76ff1Sjsg 	ret = krealloc(block,
28841bb76ff1Sjsg 			 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
28851bb76ff1Sjsg 			 GFP_KERNEL);
28861bb76ff1Sjsg 	if (!ret)
28871bb76ff1Sjsg 		return -ENOMEM;
28881bb76ff1Sjsg 
28891bb76ff1Sjsg 	gvt->mmio.mmio_block = block = ret;
28901bb76ff1Sjsg 
28911bb76ff1Sjsg 	block += gvt->mmio.num_mmio_block;
28921bb76ff1Sjsg 
28931bb76ff1Sjsg 	memset(block, 0, sizeof(*block));
28941bb76ff1Sjsg 
28951bb76ff1Sjsg 	block->offset = _MMIO(offset);
28961bb76ff1Sjsg 	block->size = size;
28971bb76ff1Sjsg 
28981bb76ff1Sjsg 	gvt->mmio.num_mmio_block++;
28991bb76ff1Sjsg 
29001bb76ff1Sjsg 	return 0;
29011bb76ff1Sjsg }
29021bb76ff1Sjsg 
handle_mmio_cb(struct intel_gvt_mmio_table_iter * iter,u32 offset,u32 size)29031bb76ff1Sjsg static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
29041bb76ff1Sjsg 			  u32 size)
29051bb76ff1Sjsg {
29061bb76ff1Sjsg 	if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
29071bb76ff1Sjsg 		return handle_mmio(iter, offset, size);
29081bb76ff1Sjsg 	else
29091bb76ff1Sjsg 		return handle_mmio_block(iter, offset, size);
29101bb76ff1Sjsg }
29111bb76ff1Sjsg 
init_mmio_info(struct intel_gvt * gvt)29121bb76ff1Sjsg static int init_mmio_info(struct intel_gvt *gvt)
29131bb76ff1Sjsg {
29141bb76ff1Sjsg 	struct intel_gvt_mmio_table_iter iter = {
29151bb76ff1Sjsg 		.i915 = gvt->gt->i915,
29161bb76ff1Sjsg 		.data = gvt,
29171bb76ff1Sjsg 		.handle_mmio_cb = handle_mmio_cb,
2918c349dbc7Sjsg 	};
2919c349dbc7Sjsg 
29201bb76ff1Sjsg 	return intel_gvt_iterate_mmio_table(&iter);
29211bb76ff1Sjsg }
29221bb76ff1Sjsg 
init_mmio_block_handlers(struct intel_gvt * gvt)29231bb76ff1Sjsg static int init_mmio_block_handlers(struct intel_gvt *gvt)
29241bb76ff1Sjsg {
29251bb76ff1Sjsg 	struct gvt_mmio_block *block;
29261bb76ff1Sjsg 
29271bb76ff1Sjsg 	block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
29281bb76ff1Sjsg 	if (!block) {
29291bb76ff1Sjsg 		WARN(1, "fail to assign handlers to mmio block %x\n",
29301bb76ff1Sjsg 		     i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
29311bb76ff1Sjsg 		return -ENODEV;
29321bb76ff1Sjsg 	}
29331bb76ff1Sjsg 
29341bb76ff1Sjsg 	block->read = pvinfo_mmio_read;
29351bb76ff1Sjsg 	block->write = pvinfo_mmio_write;
29361bb76ff1Sjsg 
29371bb76ff1Sjsg 	return 0;
29381bb76ff1Sjsg }
29391bb76ff1Sjsg 
2940c349dbc7Sjsg /**
2941c349dbc7Sjsg  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2942c349dbc7Sjsg  * @gvt: GVT device
2943c349dbc7Sjsg  *
2944c349dbc7Sjsg  * This function is called at the initialization stage, to setup the MMIO
2945c349dbc7Sjsg  * information table for GVT device
2946c349dbc7Sjsg  *
2947c349dbc7Sjsg  * Returns:
2948c349dbc7Sjsg  * zero on success, negative if failed.
2949c349dbc7Sjsg  */
intel_gvt_setup_mmio_info(struct intel_gvt * gvt)2950c349dbc7Sjsg int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2951c349dbc7Sjsg {
2952c349dbc7Sjsg 	struct intel_gvt_device_info *info = &gvt->device_info;
2953c349dbc7Sjsg 	struct drm_i915_private *i915 = gvt->gt->i915;
2954c349dbc7Sjsg 	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2955c349dbc7Sjsg 	int ret;
2956c349dbc7Sjsg 
2957c349dbc7Sjsg 	gvt->mmio.mmio_attribute = vzalloc(size);
2958c349dbc7Sjsg 	if (!gvt->mmio.mmio_attribute)
2959c349dbc7Sjsg 		return -ENOMEM;
2960c349dbc7Sjsg 
29611bb76ff1Sjsg 	ret = init_mmio_info(gvt);
29621bb76ff1Sjsg 	if (ret)
29631bb76ff1Sjsg 		goto err;
29641bb76ff1Sjsg 
29651bb76ff1Sjsg 	ret = init_mmio_block_handlers(gvt);
29661bb76ff1Sjsg 	if (ret)
29671bb76ff1Sjsg 		goto err;
29681bb76ff1Sjsg 
2969c349dbc7Sjsg 	ret = init_generic_mmio_info(gvt);
2970c349dbc7Sjsg 	if (ret)
2971c349dbc7Sjsg 		goto err;
2972c349dbc7Sjsg 
2973c349dbc7Sjsg 	if (IS_BROADWELL(i915)) {
2974c349dbc7Sjsg 		ret = init_bdw_mmio_info(gvt);
2975c349dbc7Sjsg 		if (ret)
2976c349dbc7Sjsg 			goto err;
2977c349dbc7Sjsg 	} else if (IS_SKYLAKE(i915) ||
2978c349dbc7Sjsg 		   IS_KABYLAKE(i915) ||
2979ad8b1aafSjsg 		   IS_COFFEELAKE(i915) ||
2980ad8b1aafSjsg 		   IS_COMETLAKE(i915)) {
2981c349dbc7Sjsg 		ret = init_bdw_mmio_info(gvt);
2982c349dbc7Sjsg 		if (ret)
2983c349dbc7Sjsg 			goto err;
2984c349dbc7Sjsg 		ret = init_skl_mmio_info(gvt);
2985c349dbc7Sjsg 		if (ret)
2986c349dbc7Sjsg 			goto err;
2987c349dbc7Sjsg 	} else if (IS_BROXTON(i915)) {
2988c349dbc7Sjsg 		ret = init_bdw_mmio_info(gvt);
2989c349dbc7Sjsg 		if (ret)
2990c349dbc7Sjsg 			goto err;
2991c349dbc7Sjsg 		ret = init_skl_mmio_info(gvt);
2992c349dbc7Sjsg 		if (ret)
2993c349dbc7Sjsg 			goto err;
2994c349dbc7Sjsg 		ret = init_bxt_mmio_info(gvt);
2995c349dbc7Sjsg 		if (ret)
2996c349dbc7Sjsg 			goto err;
2997c349dbc7Sjsg 	}
2998c349dbc7Sjsg 
2999c349dbc7Sjsg 	return 0;
3000c349dbc7Sjsg err:
3001c349dbc7Sjsg 	intel_gvt_clean_mmio_info(gvt);
3002c349dbc7Sjsg 	return ret;
3003c349dbc7Sjsg }
3004c349dbc7Sjsg 
3005c349dbc7Sjsg /**
3006c349dbc7Sjsg  * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
3007c349dbc7Sjsg  * @gvt: a GVT device
3008c349dbc7Sjsg  * @handler: the handler
3009c349dbc7Sjsg  * @data: private data given to handler
3010c349dbc7Sjsg  *
3011c349dbc7Sjsg  * Returns:
3012c349dbc7Sjsg  * Zero on success, negative error code if failed.
3013c349dbc7Sjsg  */
intel_gvt_for_each_tracked_mmio(struct intel_gvt * gvt,int (* handler)(struct intel_gvt * gvt,u32 offset,void * data),void * data)3014c349dbc7Sjsg int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3015c349dbc7Sjsg 	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3016c349dbc7Sjsg 	void *data)
3017c349dbc7Sjsg {
3018c349dbc7Sjsg 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3019c349dbc7Sjsg 	struct intel_gvt_mmio_info *e;
3020c349dbc7Sjsg 	int i, j, ret;
3021c349dbc7Sjsg 
3022c349dbc7Sjsg 	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3023c349dbc7Sjsg 		ret = handler(gvt, e->offset, data);
3024c349dbc7Sjsg 		if (ret)
3025c349dbc7Sjsg 			return ret;
3026c349dbc7Sjsg 	}
3027c349dbc7Sjsg 
3028c349dbc7Sjsg 	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3029c349dbc7Sjsg 		/* pvinfo data doesn't come from hw mmio */
3030c349dbc7Sjsg 		if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3031c349dbc7Sjsg 			continue;
3032c349dbc7Sjsg 
3033c349dbc7Sjsg 		for (j = 0; j < block->size; j += 4) {
30341bb76ff1Sjsg 			ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
3035c349dbc7Sjsg 			if (ret)
3036c349dbc7Sjsg 				return ret;
3037c349dbc7Sjsg 		}
3038c349dbc7Sjsg 	}
3039c349dbc7Sjsg 	return 0;
3040c349dbc7Sjsg }
3041c349dbc7Sjsg 
3042c349dbc7Sjsg /**
3043c349dbc7Sjsg  * intel_vgpu_default_mmio_read - default MMIO read handler
3044c349dbc7Sjsg  * @vgpu: a vGPU
3045c349dbc7Sjsg  * @offset: access offset
3046c349dbc7Sjsg  * @p_data: data return buffer
3047c349dbc7Sjsg  * @bytes: access data length
3048c349dbc7Sjsg  *
3049c349dbc7Sjsg  * Returns:
3050c349dbc7Sjsg  * Zero on success, negative error code if failed.
3051c349dbc7Sjsg  */
intel_vgpu_default_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)3052c349dbc7Sjsg int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3053c349dbc7Sjsg 		void *p_data, unsigned int bytes)
3054c349dbc7Sjsg {
3055c349dbc7Sjsg 	read_vreg(vgpu, offset, p_data, bytes);
3056c349dbc7Sjsg 	return 0;
3057c349dbc7Sjsg }
3058c349dbc7Sjsg 
3059c349dbc7Sjsg /**
30601bb76ff1Sjsg  * intel_vgpu_default_mmio_write() - default MMIO write handler
3061c349dbc7Sjsg  * @vgpu: a vGPU
3062c349dbc7Sjsg  * @offset: access offset
3063c349dbc7Sjsg  * @p_data: write data buffer
3064c349dbc7Sjsg  * @bytes: access data length
3065c349dbc7Sjsg  *
3066c349dbc7Sjsg  * Returns:
3067c349dbc7Sjsg  * Zero on success, negative error code if failed.
3068c349dbc7Sjsg  */
intel_vgpu_default_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)3069c349dbc7Sjsg int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3070c349dbc7Sjsg 		void *p_data, unsigned int bytes)
3071c349dbc7Sjsg {
3072c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
3073c349dbc7Sjsg 	return 0;
3074c349dbc7Sjsg }
3075c349dbc7Sjsg 
3076c349dbc7Sjsg /**
3077c349dbc7Sjsg  * intel_vgpu_mask_mmio_write - write mask register
3078c349dbc7Sjsg  * @vgpu: a vGPU
3079c349dbc7Sjsg  * @offset: access offset
3080c349dbc7Sjsg  * @p_data: write data buffer
3081c349dbc7Sjsg  * @bytes: access data length
3082c349dbc7Sjsg  *
3083c349dbc7Sjsg  * Returns:
3084c349dbc7Sjsg  * Zero on success, negative error code if failed.
3085c349dbc7Sjsg  */
intel_vgpu_mask_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)3086c349dbc7Sjsg int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3087c349dbc7Sjsg 		void *p_data, unsigned int bytes)
3088c349dbc7Sjsg {
3089c349dbc7Sjsg 	u32 mask, old_vreg;
3090c349dbc7Sjsg 
3091c349dbc7Sjsg 	old_vreg = vgpu_vreg(vgpu, offset);
3092c349dbc7Sjsg 	write_vreg(vgpu, offset, p_data, bytes);
3093c349dbc7Sjsg 	mask = vgpu_vreg(vgpu, offset) >> 16;
3094c349dbc7Sjsg 	vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3095c349dbc7Sjsg 				(vgpu_vreg(vgpu, offset) & mask);
3096c349dbc7Sjsg 
3097c349dbc7Sjsg 	return 0;
3098c349dbc7Sjsg }
3099c349dbc7Sjsg 
3100c349dbc7Sjsg /**
3101c349dbc7Sjsg  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3102c349dbc7Sjsg  * force-nopriv register
3103c349dbc7Sjsg  *
3104c349dbc7Sjsg  * @gvt: a GVT device
3105c349dbc7Sjsg  * @offset: register offset
3106c349dbc7Sjsg  *
3107c349dbc7Sjsg  * Returns:
3108c349dbc7Sjsg  * True if the register is in force-nonpriv whitelist;
3109c349dbc7Sjsg  * False if outside;
3110c349dbc7Sjsg  */
intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt * gvt,unsigned int offset)3111c349dbc7Sjsg bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3112c349dbc7Sjsg 					  unsigned int offset)
3113c349dbc7Sjsg {
3114c349dbc7Sjsg 	return in_whitelist(offset);
3115c349dbc7Sjsg }
3116c349dbc7Sjsg 
3117c349dbc7Sjsg /**
3118c349dbc7Sjsg  * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3119c349dbc7Sjsg  * @vgpu: a vGPU
3120c349dbc7Sjsg  * @offset: register offset
3121c349dbc7Sjsg  * @pdata: data buffer
3122c349dbc7Sjsg  * @bytes: data length
3123c349dbc7Sjsg  * @is_read: read or write
3124c349dbc7Sjsg  *
3125c349dbc7Sjsg  * Returns:
3126c349dbc7Sjsg  * Zero on success, negative error code if failed.
3127c349dbc7Sjsg  */
intel_vgpu_mmio_reg_rw(struct intel_vgpu * vgpu,unsigned int offset,void * pdata,unsigned int bytes,bool is_read)3128c349dbc7Sjsg int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3129c349dbc7Sjsg 			   void *pdata, unsigned int bytes, bool is_read)
3130c349dbc7Sjsg {
3131c349dbc7Sjsg 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3132c349dbc7Sjsg 	struct intel_gvt *gvt = vgpu->gvt;
3133c349dbc7Sjsg 	struct intel_gvt_mmio_info *mmio_info;
3134c349dbc7Sjsg 	struct gvt_mmio_block *mmio_block;
3135c349dbc7Sjsg 	gvt_mmio_func func;
3136c349dbc7Sjsg 	int ret;
3137c349dbc7Sjsg 
3138c349dbc7Sjsg 	if (drm_WARN_ON(&i915->drm, bytes > 8))
3139c349dbc7Sjsg 		return -EINVAL;
3140c349dbc7Sjsg 
3141c349dbc7Sjsg 	/*
3142c349dbc7Sjsg 	 * Handle special MMIO blocks.
3143c349dbc7Sjsg 	 */
3144c349dbc7Sjsg 	mmio_block = find_mmio_block(gvt, offset);
3145c349dbc7Sjsg 	if (mmio_block) {
3146c349dbc7Sjsg 		func = is_read ? mmio_block->read : mmio_block->write;
3147c349dbc7Sjsg 		if (func)
3148c349dbc7Sjsg 			return func(vgpu, offset, pdata, bytes);
3149c349dbc7Sjsg 		goto default_rw;
3150c349dbc7Sjsg 	}
3151c349dbc7Sjsg 
3152c349dbc7Sjsg 	/*
3153c349dbc7Sjsg 	 * Normal tracked MMIOs.
3154c349dbc7Sjsg 	 */
31555ca02815Sjsg 	mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3156c349dbc7Sjsg 	if (!mmio_info) {
3157c349dbc7Sjsg 		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3158c349dbc7Sjsg 		goto default_rw;
3159c349dbc7Sjsg 	}
3160c349dbc7Sjsg 
3161c349dbc7Sjsg 	if (is_read)
3162c349dbc7Sjsg 		return mmio_info->read(vgpu, offset, pdata, bytes);
3163c349dbc7Sjsg 	else {
3164c349dbc7Sjsg 		u64 ro_mask = mmio_info->ro_mask;
3165c349dbc7Sjsg 		u32 old_vreg = 0;
3166c349dbc7Sjsg 		u64 data = 0;
3167c349dbc7Sjsg 
3168c349dbc7Sjsg 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3169c349dbc7Sjsg 			old_vreg = vgpu_vreg(vgpu, offset);
3170c349dbc7Sjsg 		}
3171c349dbc7Sjsg 
3172c349dbc7Sjsg 		if (likely(!ro_mask))
3173c349dbc7Sjsg 			ret = mmio_info->write(vgpu, offset, pdata, bytes);
3174c349dbc7Sjsg 		else if (!~ro_mask) {
3175c349dbc7Sjsg 			gvt_vgpu_err("try to write RO reg %x\n", offset);
3176c349dbc7Sjsg 			return 0;
3177c349dbc7Sjsg 		} else {
3178c349dbc7Sjsg 			/* keep the RO bits in the virtual register */
3179c349dbc7Sjsg 			memcpy(&data, pdata, bytes);
3180c349dbc7Sjsg 			data &= ~ro_mask;
3181c349dbc7Sjsg 			data |= vgpu_vreg(vgpu, offset) & ro_mask;
3182c349dbc7Sjsg 			ret = mmio_info->write(vgpu, offset, &data, bytes);
3183c349dbc7Sjsg 		}
3184c349dbc7Sjsg 
3185c349dbc7Sjsg 		/* higher 16bits of mode ctl regs are mask bits for change */
3186c349dbc7Sjsg 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3187c349dbc7Sjsg 			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3188c349dbc7Sjsg 
3189c349dbc7Sjsg 			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3190c349dbc7Sjsg 					| (vgpu_vreg(vgpu, offset) & mask);
3191c349dbc7Sjsg 		}
3192c349dbc7Sjsg 	}
3193c349dbc7Sjsg 
3194c349dbc7Sjsg 	return ret;
3195c349dbc7Sjsg 
3196c349dbc7Sjsg default_rw:
3197c349dbc7Sjsg 	return is_read ?
3198c349dbc7Sjsg 		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3199c349dbc7Sjsg 		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3200c349dbc7Sjsg }
32015ca02815Sjsg 
intel_gvt_restore_fence(struct intel_gvt * gvt)32025ca02815Sjsg void intel_gvt_restore_fence(struct intel_gvt *gvt)
32035ca02815Sjsg {
32045ca02815Sjsg 	struct intel_vgpu *vgpu;
32055ca02815Sjsg 	int i, id;
32065ca02815Sjsg 
32075ca02815Sjsg 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
32085ca02815Sjsg 		mmio_hw_access_pre(gvt->gt);
32095ca02815Sjsg 		for (i = 0; i < vgpu_fence_sz(vgpu); i++)
32105ca02815Sjsg 			intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
32115ca02815Sjsg 		mmio_hw_access_post(gvt->gt);
32125ca02815Sjsg 	}
32135ca02815Sjsg }
32145ca02815Sjsg 
mmio_pm_restore_handler(struct intel_gvt * gvt,u32 offset,void * data)32155ca02815Sjsg static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
32165ca02815Sjsg {
32175ca02815Sjsg 	struct intel_vgpu *vgpu = data;
32185ca02815Sjsg 	struct drm_i915_private *dev_priv = gvt->gt->i915;
32195ca02815Sjsg 
32205ca02815Sjsg 	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
32215ca02815Sjsg 		intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
32225ca02815Sjsg 
32235ca02815Sjsg 	return 0;
32245ca02815Sjsg }
32255ca02815Sjsg 
intel_gvt_restore_mmio(struct intel_gvt * gvt)32265ca02815Sjsg void intel_gvt_restore_mmio(struct intel_gvt *gvt)
32275ca02815Sjsg {
32285ca02815Sjsg 	struct intel_vgpu *vgpu;
32295ca02815Sjsg 	int id;
32305ca02815Sjsg 
32315ca02815Sjsg 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
32325ca02815Sjsg 		mmio_hw_access_pre(gvt->gt);
32335ca02815Sjsg 		intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
32345ca02815Sjsg 		mmio_hw_access_post(gvt->gt);
32355ca02815Sjsg 	}
32365ca02815Sjsg }
3237