xref: /openbsd-src/sys/dev/pci/drm/i915/gvt/gtt.c (revision 1ad61ae0a79a724d2d3ec69e69c8e1d1ff6b53a0)
1 /*
2  * GTT virtualization
3  *
4  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Zhi Wang <zhi.a.wang@intel.com>
27  *    Zhenyu Wang <zhenyuw@linux.intel.com>
28  *    Xiao Zheng <xiao.zheng@intel.com>
29  *
30  * Contributors:
31  *    Min He <min.he@intel.com>
32  *    Bing Niu <bing.niu@intel.com>
33  *
34  */
35 
36 #include "i915_drv.h"
37 #include "gvt.h"
38 #include "i915_pvinfo.h"
39 #include "trace.h"
40 
41 #include "gt/intel_gt_regs.h"
42 
43 #if defined(VERBOSE_DEBUG)
44 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
45 #else
46 #define gvt_vdbg_mm(fmt, args...)
47 #endif
48 
49 static bool enable_out_of_sync = false;
50 static int preallocated_oos_pages = 8192;
51 
52 static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
53 {
54 	struct kvm *kvm = vgpu->vfio_device.kvm;
55 	int idx;
56 	bool ret;
57 
58 	if (!vgpu->attached)
59 		return false;
60 
61 	idx = srcu_read_lock(&kvm->srcu);
62 	ret = kvm_is_visible_gfn(kvm, gfn);
63 	srcu_read_unlock(&kvm->srcu, idx);
64 
65 	return ret;
66 }
67 
68 /*
69  * validate a gm address and related range size,
70  * translate it to host gm address
71  */
72 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
73 {
74 	if (size == 0)
75 		return vgpu_gmadr_is_valid(vgpu, addr);
76 
77 	if (vgpu_gmadr_is_aperture(vgpu, addr) &&
78 	    vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
79 		return true;
80 	else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
81 		 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
82 		return true;
83 
84 	gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
85 		     addr, size);
86 	return false;
87 }
88 
89 /* translate a guest gmadr to host gmadr */
90 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
91 {
92 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
93 
94 	if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
95 		     "invalid guest gmadr %llx\n", g_addr))
96 		return -EACCES;
97 
98 	if (vgpu_gmadr_is_aperture(vgpu, g_addr))
99 		*h_addr = vgpu_aperture_gmadr_base(vgpu)
100 			  + (g_addr - vgpu_aperture_offset(vgpu));
101 	else
102 		*h_addr = vgpu_hidden_gmadr_base(vgpu)
103 			  + (g_addr - vgpu_hidden_offset(vgpu));
104 	return 0;
105 }
106 
107 /* translate a host gmadr to guest gmadr */
108 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
109 {
110 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
111 
112 	if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
113 		     "invalid host gmadr %llx\n", h_addr))
114 		return -EACCES;
115 
116 	if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
117 		*g_addr = vgpu_aperture_gmadr_base(vgpu)
118 			+ (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
119 	else
120 		*g_addr = vgpu_hidden_gmadr_base(vgpu)
121 			+ (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
122 	return 0;
123 }
124 
125 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
126 			     unsigned long *h_index)
127 {
128 	u64 h_addr;
129 	int ret;
130 
131 	ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
132 				       &h_addr);
133 	if (ret)
134 		return ret;
135 
136 	*h_index = h_addr >> I915_GTT_PAGE_SHIFT;
137 	return 0;
138 }
139 
140 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
141 			     unsigned long *g_index)
142 {
143 	u64 g_addr;
144 	int ret;
145 
146 	ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
147 				       &g_addr);
148 	if (ret)
149 		return ret;
150 
151 	*g_index = g_addr >> I915_GTT_PAGE_SHIFT;
152 	return 0;
153 }
154 
155 #define gtt_type_is_entry(type) \
156 	(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
157 	 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
158 	 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
159 
160 #define gtt_type_is_pt(type) \
161 	(type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
162 
163 #define gtt_type_is_pte_pt(type) \
164 	(type == GTT_TYPE_PPGTT_PTE_PT)
165 
166 #define gtt_type_is_root_pointer(type) \
167 	(gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
168 
169 #define gtt_init_entry(e, t, p, v) do { \
170 	(e)->type = t; \
171 	(e)->pdev = p; \
172 	memcpy(&(e)->val64, &v, sizeof(v)); \
173 } while (0)
174 
175 /*
176  * Mappings between GTT_TYPE* enumerations.
177  * Following information can be found according to the given type:
178  * - type of next level page table
179  * - type of entry inside this level page table
180  * - type of entry with PSE set
181  *
182  * If the given type doesn't have such a kind of information,
183  * e.g. give a l4 root entry type, then request to get its PSE type,
184  * give a PTE page table type, then request to get its next level page
185  * table type, as we know l4 root entry doesn't have a PSE bit,
186  * and a PTE page table doesn't have a next level page table type,
187  * GTT_TYPE_INVALID will be returned. This is useful when traversing a
188  * page table.
189  */
190 
191 struct gtt_type_table_entry {
192 	int entry_type;
193 	int pt_type;
194 	int next_pt_type;
195 	int pse_entry_type;
196 };
197 
198 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
199 	[type] = { \
200 		.entry_type = e_type, \
201 		.pt_type = cpt_type, \
202 		.next_pt_type = npt_type, \
203 		.pse_entry_type = pse_type, \
204 	}
205 
206 static const struct gtt_type_table_entry gtt_type_table[] = {
207 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
208 			GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
209 			GTT_TYPE_INVALID,
210 			GTT_TYPE_PPGTT_PML4_PT,
211 			GTT_TYPE_INVALID),
212 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
213 			GTT_TYPE_PPGTT_PML4_ENTRY,
214 			GTT_TYPE_PPGTT_PML4_PT,
215 			GTT_TYPE_PPGTT_PDP_PT,
216 			GTT_TYPE_INVALID),
217 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
218 			GTT_TYPE_PPGTT_PML4_ENTRY,
219 			GTT_TYPE_PPGTT_PML4_PT,
220 			GTT_TYPE_PPGTT_PDP_PT,
221 			GTT_TYPE_INVALID),
222 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
223 			GTT_TYPE_PPGTT_PDP_ENTRY,
224 			GTT_TYPE_PPGTT_PDP_PT,
225 			GTT_TYPE_PPGTT_PDE_PT,
226 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
227 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
228 			GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
229 			GTT_TYPE_INVALID,
230 			GTT_TYPE_PPGTT_PDE_PT,
231 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
232 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
233 			GTT_TYPE_PPGTT_PDP_ENTRY,
234 			GTT_TYPE_PPGTT_PDP_PT,
235 			GTT_TYPE_PPGTT_PDE_PT,
236 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
237 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
238 			GTT_TYPE_PPGTT_PDE_ENTRY,
239 			GTT_TYPE_PPGTT_PDE_PT,
240 			GTT_TYPE_PPGTT_PTE_PT,
241 			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
242 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
243 			GTT_TYPE_PPGTT_PDE_ENTRY,
244 			GTT_TYPE_PPGTT_PDE_PT,
245 			GTT_TYPE_PPGTT_PTE_PT,
246 			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
247 	/* We take IPS bit as 'PSE' for PTE level. */
248 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
249 			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
250 			GTT_TYPE_PPGTT_PTE_PT,
251 			GTT_TYPE_INVALID,
252 			GTT_TYPE_PPGTT_PTE_64K_ENTRY),
253 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
254 			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
255 			GTT_TYPE_PPGTT_PTE_PT,
256 			GTT_TYPE_INVALID,
257 			GTT_TYPE_PPGTT_PTE_64K_ENTRY),
258 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
259 			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
260 			GTT_TYPE_PPGTT_PTE_PT,
261 			GTT_TYPE_INVALID,
262 			GTT_TYPE_PPGTT_PTE_64K_ENTRY),
263 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
264 			GTT_TYPE_PPGTT_PDE_ENTRY,
265 			GTT_TYPE_PPGTT_PDE_PT,
266 			GTT_TYPE_INVALID,
267 			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
268 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
269 			GTT_TYPE_PPGTT_PDP_ENTRY,
270 			GTT_TYPE_PPGTT_PDP_PT,
271 			GTT_TYPE_INVALID,
272 			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
273 	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
274 			GTT_TYPE_GGTT_PTE,
275 			GTT_TYPE_INVALID,
276 			GTT_TYPE_INVALID,
277 			GTT_TYPE_INVALID),
278 };
279 
280 static inline int get_next_pt_type(int type)
281 {
282 	return gtt_type_table[type].next_pt_type;
283 }
284 
285 static inline int get_pt_type(int type)
286 {
287 	return gtt_type_table[type].pt_type;
288 }
289 
290 static inline int get_entry_type(int type)
291 {
292 	return gtt_type_table[type].entry_type;
293 }
294 
295 static inline int get_pse_type(int type)
296 {
297 	return gtt_type_table[type].pse_entry_type;
298 }
299 
300 static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
301 {
302 	void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
303 
304 	return readq(addr);
305 }
306 
307 static void ggtt_invalidate(struct intel_gt *gt)
308 {
309 	mmio_hw_access_pre(gt);
310 	intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
311 	mmio_hw_access_post(gt);
312 }
313 
314 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
315 {
316 	void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
317 
318 	writeq(pte, addr);
319 }
320 
321 static inline int gtt_get_entry64(void *pt,
322 		struct intel_gvt_gtt_entry *e,
323 		unsigned long index, bool hypervisor_access, unsigned long gpa,
324 		struct intel_vgpu *vgpu)
325 {
326 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
327 	int ret;
328 
329 	if (WARN_ON(info->gtt_entry_size != 8))
330 		return -EINVAL;
331 
332 	if (hypervisor_access) {
333 		ret = intel_gvt_read_gpa(vgpu, gpa +
334 				(index << info->gtt_entry_size_shift),
335 				&e->val64, 8);
336 		if (WARN_ON(ret))
337 			return ret;
338 	} else if (!pt) {
339 		e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
340 	} else {
341 		e->val64 = *((u64 *)pt + index);
342 	}
343 	return 0;
344 }
345 
346 static inline int gtt_set_entry64(void *pt,
347 		struct intel_gvt_gtt_entry *e,
348 		unsigned long index, bool hypervisor_access, unsigned long gpa,
349 		struct intel_vgpu *vgpu)
350 {
351 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
352 	int ret;
353 
354 	if (WARN_ON(info->gtt_entry_size != 8))
355 		return -EINVAL;
356 
357 	if (hypervisor_access) {
358 		ret = intel_gvt_write_gpa(vgpu, gpa +
359 				(index << info->gtt_entry_size_shift),
360 				&e->val64, 8);
361 		if (WARN_ON(ret))
362 			return ret;
363 	} else if (!pt) {
364 		write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
365 	} else {
366 		*((u64 *)pt + index) = e->val64;
367 	}
368 	return 0;
369 }
370 
371 #define GTT_HAW 46
372 
373 #define ADDR_1G_MASK	GENMASK_ULL(GTT_HAW - 1, 30)
374 #define ADDR_2M_MASK	GENMASK_ULL(GTT_HAW - 1, 21)
375 #define ADDR_64K_MASK	GENMASK_ULL(GTT_HAW - 1, 16)
376 #define ADDR_4K_MASK	GENMASK_ULL(GTT_HAW - 1, 12)
377 
378 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
379 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
380 
381 #define GTT_64K_PTE_STRIDE 16
382 
383 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
384 {
385 	unsigned long pfn;
386 
387 	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
388 		pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
389 	else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
390 		pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
391 	else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
392 		pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
393 	else
394 		pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
395 	return pfn;
396 }
397 
398 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
399 {
400 	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
401 		e->val64 &= ~ADDR_1G_MASK;
402 		pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
403 	} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
404 		e->val64 &= ~ADDR_2M_MASK;
405 		pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
406 	} else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
407 		e->val64 &= ~ADDR_64K_MASK;
408 		pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
409 	} else {
410 		e->val64 &= ~ADDR_4K_MASK;
411 		pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
412 	}
413 
414 	e->val64 |= (pfn << PAGE_SHIFT);
415 }
416 
417 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
418 {
419 	return !!(e->val64 & _PAGE_PSE);
420 }
421 
422 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
423 {
424 	if (gen8_gtt_test_pse(e)) {
425 		switch (e->type) {
426 		case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
427 			e->val64 &= ~_PAGE_PSE;
428 			e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
429 			break;
430 		case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
431 			e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
432 			e->val64 &= ~_PAGE_PSE;
433 			break;
434 		default:
435 			WARN_ON(1);
436 		}
437 	}
438 }
439 
440 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
441 {
442 	if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
443 		return false;
444 
445 	return !!(e->val64 & GEN8_PDE_IPS_64K);
446 }
447 
448 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
449 {
450 	if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
451 		return;
452 
453 	e->val64 &= ~GEN8_PDE_IPS_64K;
454 }
455 
456 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
457 {
458 	/*
459 	 * i915 writes PDP root pointer registers without present bit,
460 	 * it also works, so we need to treat root pointer entry
461 	 * specifically.
462 	 */
463 	if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
464 			|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
465 		return (e->val64 != 0);
466 	else
467 		return (e->val64 & GEN8_PAGE_PRESENT);
468 }
469 
470 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
471 {
472 	e->val64 &= ~GEN8_PAGE_PRESENT;
473 }
474 
475 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
476 {
477 	e->val64 |= GEN8_PAGE_PRESENT;
478 }
479 
480 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
481 {
482 	return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
483 }
484 
485 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
486 {
487 	e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
488 }
489 
490 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
491 {
492 	e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
493 }
494 
495 /*
496  * Per-platform GMA routines.
497  */
498 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
499 {
500 	unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
501 
502 	trace_gma_index(__func__, gma, x);
503 	return x;
504 }
505 
506 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
507 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
508 { \
509 	unsigned long x = (exp); \
510 	trace_gma_index(__func__, gma, x); \
511 	return x; \
512 }
513 
514 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
515 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
516 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
517 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
518 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
519 
520 static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
521 	.get_entry = gtt_get_entry64,
522 	.set_entry = gtt_set_entry64,
523 	.clear_present = gtt_entry_clear_present,
524 	.set_present = gtt_entry_set_present,
525 	.test_present = gen8_gtt_test_present,
526 	.test_pse = gen8_gtt_test_pse,
527 	.clear_pse = gen8_gtt_clear_pse,
528 	.clear_ips = gen8_gtt_clear_ips,
529 	.test_ips = gen8_gtt_test_ips,
530 	.clear_64k_splited = gen8_gtt_clear_64k_splited,
531 	.set_64k_splited = gen8_gtt_set_64k_splited,
532 	.test_64k_splited = gen8_gtt_test_64k_splited,
533 	.get_pfn = gen8_gtt_get_pfn,
534 	.set_pfn = gen8_gtt_set_pfn,
535 };
536 
537 static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
538 	.gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
539 	.gma_to_pte_index = gen8_gma_to_pte_index,
540 	.gma_to_pde_index = gen8_gma_to_pde_index,
541 	.gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
542 	.gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
543 	.gma_to_pml4_index = gen8_gma_to_pml4_index,
544 };
545 
546 /* Update entry type per pse and ips bit. */
547 static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops,
548 	struct intel_gvt_gtt_entry *entry, bool ips)
549 {
550 	switch (entry->type) {
551 	case GTT_TYPE_PPGTT_PDE_ENTRY:
552 	case GTT_TYPE_PPGTT_PDP_ENTRY:
553 		if (pte_ops->test_pse(entry))
554 			entry->type = get_pse_type(entry->type);
555 		break;
556 	case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
557 		if (ips)
558 			entry->type = get_pse_type(entry->type);
559 		break;
560 	default:
561 		GEM_BUG_ON(!gtt_type_is_entry(entry->type));
562 	}
563 
564 	GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
565 }
566 
567 /*
568  * MM helpers.
569  */
570 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
571 		struct intel_gvt_gtt_entry *entry, unsigned long index,
572 		bool guest)
573 {
574 	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
575 
576 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
577 
578 	entry->type = mm->ppgtt_mm.root_entry_type;
579 	pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
580 			   mm->ppgtt_mm.shadow_pdps,
581 			   entry, index, false, 0, mm->vgpu);
582 	update_entry_type_for_real(pte_ops, entry, false);
583 }
584 
585 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
586 		struct intel_gvt_gtt_entry *entry, unsigned long index)
587 {
588 	_ppgtt_get_root_entry(mm, entry, index, true);
589 }
590 
591 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
592 		struct intel_gvt_gtt_entry *entry, unsigned long index)
593 {
594 	_ppgtt_get_root_entry(mm, entry, index, false);
595 }
596 
597 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
598 		struct intel_gvt_gtt_entry *entry, unsigned long index,
599 		bool guest)
600 {
601 	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
602 
603 	pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
604 			   mm->ppgtt_mm.shadow_pdps,
605 			   entry, index, false, 0, mm->vgpu);
606 }
607 
608 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
609 		struct intel_gvt_gtt_entry *entry, unsigned long index)
610 {
611 	_ppgtt_set_root_entry(mm, entry, index, false);
612 }
613 
614 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
615 		struct intel_gvt_gtt_entry *entry, unsigned long index)
616 {
617 	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
618 
619 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
620 
621 	entry->type = GTT_TYPE_GGTT_PTE;
622 	pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
623 			   false, 0, mm->vgpu);
624 }
625 
626 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
627 		struct intel_gvt_gtt_entry *entry, unsigned long index)
628 {
629 	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
630 
631 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
632 
633 	pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
634 			   false, 0, mm->vgpu);
635 }
636 
637 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
638 		struct intel_gvt_gtt_entry *entry, unsigned long index)
639 {
640 	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
641 
642 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
643 
644 	pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
645 }
646 
647 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
648 		struct intel_gvt_gtt_entry *entry, unsigned long index)
649 {
650 	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
651 	unsigned long offset = index;
652 
653 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
654 
655 	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
656 		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
657 		mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
658 	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
659 		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
660 		mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
661 	}
662 
663 	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
664 }
665 
666 /*
667  * PPGTT shadow page table helpers.
668  */
669 static inline int ppgtt_spt_get_entry(
670 		struct intel_vgpu_ppgtt_spt *spt,
671 		void *page_table, int type,
672 		struct intel_gvt_gtt_entry *e, unsigned long index,
673 		bool guest)
674 {
675 	struct intel_gvt *gvt = spt->vgpu->gvt;
676 	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
677 	int ret;
678 
679 	e->type = get_entry_type(type);
680 
681 	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
682 		return -EINVAL;
683 
684 	ret = ops->get_entry(page_table, e, index, guest,
685 			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
686 			spt->vgpu);
687 	if (ret)
688 		return ret;
689 
690 	update_entry_type_for_real(ops, e, guest ?
691 				   spt->guest_page.pde_ips : false);
692 
693 	gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
694 		    type, e->type, index, e->val64);
695 	return 0;
696 }
697 
698 static inline int ppgtt_spt_set_entry(
699 		struct intel_vgpu_ppgtt_spt *spt,
700 		void *page_table, int type,
701 		struct intel_gvt_gtt_entry *e, unsigned long index,
702 		bool guest)
703 {
704 	struct intel_gvt *gvt = spt->vgpu->gvt;
705 	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
706 
707 	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
708 		return -EINVAL;
709 
710 	gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
711 		    type, e->type, index, e->val64);
712 
713 	return ops->set_entry(page_table, e, index, guest,
714 			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
715 			spt->vgpu);
716 }
717 
718 #define ppgtt_get_guest_entry(spt, e, index) \
719 	ppgtt_spt_get_entry(spt, NULL, \
720 		spt->guest_page.type, e, index, true)
721 
722 #define ppgtt_set_guest_entry(spt, e, index) \
723 	ppgtt_spt_set_entry(spt, NULL, \
724 		spt->guest_page.type, e, index, true)
725 
726 #define ppgtt_get_shadow_entry(spt, e, index) \
727 	ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
728 		spt->shadow_page.type, e, index, false)
729 
730 #define ppgtt_set_shadow_entry(spt, e, index) \
731 	ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
732 		spt->shadow_page.type, e, index, false)
733 
734 static void *alloc_spt(gfp_t gfp_mask)
735 {
736 	struct intel_vgpu_ppgtt_spt *spt;
737 
738 	spt = kzalloc(sizeof(*spt), gfp_mask);
739 	if (!spt)
740 		return NULL;
741 
742 	spt->shadow_page.page = alloc_page(gfp_mask);
743 	if (!spt->shadow_page.page) {
744 		kfree(spt);
745 		return NULL;
746 	}
747 	return spt;
748 }
749 
750 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
751 {
752 	__free_page(spt->shadow_page.page);
753 	kfree(spt);
754 }
755 
756 static int detach_oos_page(struct intel_vgpu *vgpu,
757 		struct intel_vgpu_oos_page *oos_page);
758 
759 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
760 {
761 	struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
762 
763 	trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
764 
765 	dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
766 		       DMA_BIDIRECTIONAL);
767 
768 	radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
769 
770 	if (spt->guest_page.gfn) {
771 		if (spt->guest_page.oos_page)
772 			detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
773 
774 		intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
775 	}
776 
777 	list_del_init(&spt->post_shadow_list);
778 	free_spt(spt);
779 }
780 
781 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
782 {
783 	struct intel_vgpu_ppgtt_spt *spt, *spn;
784 	struct radix_tree_iter iter;
785 	DRM_LIST_HEAD(all_spt);
786 	void __rcu **slot;
787 
788 	rcu_read_lock();
789 	radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
790 		spt = radix_tree_deref_slot(slot);
791 		list_move(&spt->post_shadow_list, &all_spt);
792 	}
793 	rcu_read_unlock();
794 
795 	list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
796 		ppgtt_free_spt(spt);
797 }
798 
799 static int ppgtt_handle_guest_write_page_table_bytes(
800 		struct intel_vgpu_ppgtt_spt *spt,
801 		u64 pa, void *p_data, int bytes);
802 
803 static int ppgtt_write_protection_handler(
804 		struct intel_vgpu_page_track *page_track,
805 		u64 gpa, void *data, int bytes)
806 {
807 	struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
808 
809 	int ret;
810 
811 	if (bytes != 4 && bytes != 8)
812 		return -EINVAL;
813 
814 	ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
815 	if (ret)
816 		return ret;
817 	return ret;
818 }
819 
820 /* Find a spt by guest gfn. */
821 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
822 		struct intel_vgpu *vgpu, unsigned long gfn)
823 {
824 	struct intel_vgpu_page_track *track;
825 
826 	track = intel_vgpu_find_page_track(vgpu, gfn);
827 	if (track && track->handler == ppgtt_write_protection_handler)
828 		return track->priv_data;
829 
830 	return NULL;
831 }
832 
833 /* Find the spt by shadow page mfn. */
834 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
835 		struct intel_vgpu *vgpu, unsigned long mfn)
836 {
837 	return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
838 }
839 
840 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
841 
842 /* Allocate shadow page table without guest page. */
843 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
844 		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
845 {
846 	struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
847 	struct intel_vgpu_ppgtt_spt *spt = NULL;
848 	dma_addr_t daddr;
849 	int ret;
850 
851 retry:
852 	spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
853 	if (!spt) {
854 		if (reclaim_one_ppgtt_mm(vgpu->gvt))
855 			goto retry;
856 
857 		gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
858 		return ERR_PTR(-ENOMEM);
859 	}
860 
861 	spt->vgpu = vgpu;
862 	atomic_set(&spt->refcount, 1);
863 	INIT_LIST_HEAD(&spt->post_shadow_list);
864 
865 	/*
866 	 * Init shadow_page.
867 	 */
868 	spt->shadow_page.type = type;
869 	daddr = dma_map_page(kdev, spt->shadow_page.page,
870 			     0, 4096, DMA_BIDIRECTIONAL);
871 	if (dma_mapping_error(kdev, daddr)) {
872 		gvt_vgpu_err("fail to map dma addr\n");
873 		ret = -EINVAL;
874 		goto err_free_spt;
875 	}
876 	spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
877 	spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
878 
879 	ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
880 	if (ret)
881 		goto err_unmap_dma;
882 
883 	return spt;
884 
885 err_unmap_dma:
886 	dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
887 err_free_spt:
888 	free_spt(spt);
889 	return ERR_PTR(ret);
890 }
891 
892 /* Allocate shadow page table associated with specific gfn. */
893 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
894 		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
895 		unsigned long gfn, bool guest_pde_ips)
896 {
897 	struct intel_vgpu_ppgtt_spt *spt;
898 	int ret;
899 
900 	spt = ppgtt_alloc_spt(vgpu, type);
901 	if (IS_ERR(spt))
902 		return spt;
903 
904 	/*
905 	 * Init guest_page.
906 	 */
907 	ret = intel_vgpu_register_page_track(vgpu, gfn,
908 			ppgtt_write_protection_handler, spt);
909 	if (ret) {
910 		ppgtt_free_spt(spt);
911 		return ERR_PTR(ret);
912 	}
913 
914 	spt->guest_page.type = type;
915 	spt->guest_page.gfn = gfn;
916 	spt->guest_page.pde_ips = guest_pde_ips;
917 
918 	trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
919 
920 	return spt;
921 }
922 
923 #define pt_entry_size_shift(spt) \
924 	((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
925 
926 #define pt_entries(spt) \
927 	(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
928 
929 #define for_each_present_guest_entry(spt, e, i) \
930 	for (i = 0; i < pt_entries(spt); \
931 	     i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
932 		if (!ppgtt_get_guest_entry(spt, e, i) && \
933 		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
934 
935 #define for_each_present_shadow_entry(spt, e, i) \
936 	for (i = 0; i < pt_entries(spt); \
937 	     i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
938 		if (!ppgtt_get_shadow_entry(spt, e, i) && \
939 		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
940 
941 #define for_each_shadow_entry(spt, e, i) \
942 	for (i = 0; i < pt_entries(spt); \
943 	     i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
944 		if (!ppgtt_get_shadow_entry(spt, e, i))
945 
946 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
947 {
948 	int v = atomic_read(&spt->refcount);
949 
950 	trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
951 	atomic_inc(&spt->refcount);
952 }
953 
954 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
955 {
956 	int v = atomic_read(&spt->refcount);
957 
958 	trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
959 	return atomic_dec_return(&spt->refcount);
960 }
961 
962 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
963 
964 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
965 		struct intel_gvt_gtt_entry *e)
966 {
967 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
968 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
969 	struct intel_vgpu_ppgtt_spt *s;
970 	enum intel_gvt_gtt_type cur_pt_type;
971 
972 	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
973 
974 	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
975 		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
976 		cur_pt_type = get_next_pt_type(e->type);
977 
978 		if (!gtt_type_is_pt(cur_pt_type) ||
979 				!gtt_type_is_pt(cur_pt_type + 1)) {
980 			drm_WARN(&i915->drm, 1,
981 				 "Invalid page table type, cur_pt_type is: %d\n",
982 				 cur_pt_type);
983 			return -EINVAL;
984 		}
985 
986 		cur_pt_type += 1;
987 
988 		if (ops->get_pfn(e) ==
989 			vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
990 			return 0;
991 	}
992 	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
993 	if (!s) {
994 		gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
995 				ops->get_pfn(e));
996 		return -ENXIO;
997 	}
998 	return ppgtt_invalidate_spt(s);
999 }
1000 
1001 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
1002 		struct intel_gvt_gtt_entry *entry)
1003 {
1004 	struct intel_vgpu *vgpu = spt->vgpu;
1005 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1006 	unsigned long pfn;
1007 	int type;
1008 
1009 	pfn = ops->get_pfn(entry);
1010 	type = spt->shadow_page.type;
1011 
1012 	/* Uninitialized spte or unshadowed spte. */
1013 	if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1014 		return;
1015 
1016 	intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1017 }
1018 
1019 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1020 {
1021 	struct intel_vgpu *vgpu = spt->vgpu;
1022 	struct intel_gvt_gtt_entry e;
1023 	unsigned long index;
1024 	int ret;
1025 
1026 	trace_spt_change(spt->vgpu->id, "die", spt,
1027 			spt->guest_page.gfn, spt->shadow_page.type);
1028 
1029 	if (ppgtt_put_spt(spt) > 0)
1030 		return 0;
1031 
1032 	for_each_present_shadow_entry(spt, &e, index) {
1033 		switch (e.type) {
1034 		case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1035 			gvt_vdbg_mm("invalidate 4K entry\n");
1036 			ppgtt_invalidate_pte(spt, &e);
1037 			break;
1038 		case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1039 			/* We don't setup 64K shadow entry so far. */
1040 			WARN(1, "suspicious 64K gtt entry\n");
1041 			continue;
1042 		case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1043 			gvt_vdbg_mm("invalidate 2M entry\n");
1044 			continue;
1045 		case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1046 			WARN(1, "GVT doesn't support 1GB page\n");
1047 			continue;
1048 		case GTT_TYPE_PPGTT_PML4_ENTRY:
1049 		case GTT_TYPE_PPGTT_PDP_ENTRY:
1050 		case GTT_TYPE_PPGTT_PDE_ENTRY:
1051 			gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1052 			ret = ppgtt_invalidate_spt_by_shadow_entry(
1053 					spt->vgpu, &e);
1054 			if (ret)
1055 				goto fail;
1056 			break;
1057 		default:
1058 			GEM_BUG_ON(1);
1059 		}
1060 	}
1061 
1062 	trace_spt_change(spt->vgpu->id, "release", spt,
1063 			 spt->guest_page.gfn, spt->shadow_page.type);
1064 	ppgtt_free_spt(spt);
1065 	return 0;
1066 fail:
1067 	gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1068 			spt, e.val64, e.type);
1069 	return ret;
1070 }
1071 
1072 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1073 {
1074 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1075 
1076 	if (GRAPHICS_VER(dev_priv) == 9) {
1077 		u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1078 			GAMW_ECO_ENABLE_64K_IPS_FIELD;
1079 
1080 		return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1081 	} else if (GRAPHICS_VER(dev_priv) >= 11) {
1082 		/* 64K paging only controlled by IPS bit in PTE now. */
1083 		return true;
1084 	} else
1085 		return false;
1086 }
1087 
1088 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1089 
1090 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1091 		struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1092 {
1093 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1094 	struct intel_vgpu_ppgtt_spt *spt = NULL;
1095 	bool ips = false;
1096 	int ret;
1097 
1098 	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1099 
1100 	if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1101 		ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1102 
1103 	spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1104 	if (spt) {
1105 		ppgtt_get_spt(spt);
1106 
1107 		if (ips != spt->guest_page.pde_ips) {
1108 			spt->guest_page.pde_ips = ips;
1109 
1110 			gvt_dbg_mm("reshadow PDE since ips changed\n");
1111 			clear_page(spt->shadow_page.vaddr);
1112 			ret = ppgtt_populate_spt(spt);
1113 			if (ret) {
1114 				ppgtt_put_spt(spt);
1115 				goto err;
1116 			}
1117 		}
1118 	} else {
1119 		int type = get_next_pt_type(we->type);
1120 
1121 		if (!gtt_type_is_pt(type)) {
1122 			ret = -EINVAL;
1123 			goto err;
1124 		}
1125 
1126 		spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1127 		if (IS_ERR(spt)) {
1128 			ret = PTR_ERR(spt);
1129 			goto err;
1130 		}
1131 
1132 		ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1133 		if (ret)
1134 			goto err_free_spt;
1135 
1136 		ret = ppgtt_populate_spt(spt);
1137 		if (ret)
1138 			goto err_free_spt;
1139 
1140 		trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1141 				 spt->shadow_page.type);
1142 	}
1143 	return spt;
1144 
1145 err_free_spt:
1146 	ppgtt_free_spt(spt);
1147 	spt = NULL;
1148 err:
1149 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1150 		     spt, we->val64, we->type);
1151 	return ERR_PTR(ret);
1152 }
1153 
1154 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1155 		struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1156 {
1157 	const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1158 
1159 	se->type = ge->type;
1160 	se->val64 = ge->val64;
1161 
1162 	/* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1163 	if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1164 		ops->clear_ips(se);
1165 
1166 	ops->set_pfn(se, s->shadow_page.mfn);
1167 }
1168 
1169 /*
1170  * Check if can do 2M page
1171  * @vgpu: target vgpu
1172  * @entry: target pfn's gtt entry
1173  *
1174  * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
1175  * negative if found err.
1176  */
1177 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1178 	struct intel_gvt_gtt_entry *entry)
1179 {
1180 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1181 	kvm_pfn_t pfn;
1182 	int ret;
1183 
1184 	if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1185 		return 0;
1186 
1187 	if (!vgpu->attached)
1188 		return -EINVAL;
1189 	pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
1190 	if (is_error_noslot_pfn(pfn))
1191 		return -EINVAL;
1192 
1193 	if (!pfn_valid(pfn))
1194 		return -EINVAL;
1195 
1196 	ret = PageTransHuge(pfn_to_page(pfn));
1197 	kvm_release_pfn_clean(pfn);
1198 	return ret;
1199 }
1200 
1201 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1202 	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1203 	struct intel_gvt_gtt_entry *se)
1204 {
1205 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1206 	struct intel_vgpu_ppgtt_spt *sub_spt;
1207 	struct intel_gvt_gtt_entry sub_se;
1208 	unsigned long start_gfn;
1209 	dma_addr_t dma_addr;
1210 	unsigned long sub_index;
1211 	int ret;
1212 
1213 	gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1214 
1215 	start_gfn = ops->get_pfn(se);
1216 
1217 	sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1218 	if (IS_ERR(sub_spt))
1219 		return PTR_ERR(sub_spt);
1220 
1221 	for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1222 		ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
1223 						   PAGE_SIZE, &dma_addr);
1224 		if (ret)
1225 			goto err;
1226 		sub_se.val64 = se->val64;
1227 
1228 		/* Copy the PAT field from PDE. */
1229 		sub_se.val64 &= ~_PAGE_PAT;
1230 		sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1231 
1232 		ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1233 		ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1234 	}
1235 
1236 	/* Clear dirty field. */
1237 	se->val64 &= ~_PAGE_DIRTY;
1238 
1239 	ops->clear_pse(se);
1240 	ops->clear_ips(se);
1241 	ops->set_pfn(se, sub_spt->shadow_page.mfn);
1242 	ppgtt_set_shadow_entry(spt, se, index);
1243 	return 0;
1244 err:
1245 	/* Cancel the existing addess mappings of DMA addr. */
1246 	for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
1247 		gvt_vdbg_mm("invalidate 4K entry\n");
1248 		ppgtt_invalidate_pte(sub_spt, &sub_se);
1249 	}
1250 	/* Release the new allocated spt. */
1251 	trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1252 		sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1253 	ppgtt_free_spt(sub_spt);
1254 	return ret;
1255 }
1256 
1257 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1258 	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1259 	struct intel_gvt_gtt_entry *se)
1260 {
1261 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1262 	struct intel_gvt_gtt_entry entry = *se;
1263 	unsigned long start_gfn;
1264 	dma_addr_t dma_addr;
1265 	int i, ret;
1266 
1267 	gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1268 
1269 	GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1270 
1271 	start_gfn = ops->get_pfn(se);
1272 
1273 	entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1274 	ops->set_64k_splited(&entry);
1275 
1276 	for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1277 		ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
1278 						   PAGE_SIZE, &dma_addr);
1279 		if (ret)
1280 			return ret;
1281 
1282 		ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1283 		ppgtt_set_shadow_entry(spt, &entry, index + i);
1284 	}
1285 	return 0;
1286 }
1287 
1288 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1289 	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1290 	struct intel_gvt_gtt_entry *ge)
1291 {
1292 	const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1293 	struct intel_gvt_gtt_entry se = *ge;
1294 	unsigned long gfn, page_size = PAGE_SIZE;
1295 	dma_addr_t dma_addr;
1296 	int ret;
1297 
1298 	if (!pte_ops->test_present(ge))
1299 		return 0;
1300 
1301 	gfn = pte_ops->get_pfn(ge);
1302 
1303 	switch (ge->type) {
1304 	case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1305 		gvt_vdbg_mm("shadow 4K gtt entry\n");
1306 		break;
1307 	case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1308 		gvt_vdbg_mm("shadow 64K gtt entry\n");
1309 		/*
1310 		 * The layout of 64K page is special, the page size is
1311 		 * controlled by uper PDE. To be simple, we always split
1312 		 * 64K page to smaller 4K pages in shadow PT.
1313 		 */
1314 		return split_64KB_gtt_entry(vgpu, spt, index, &se);
1315 	case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1316 		gvt_vdbg_mm("shadow 2M gtt entry\n");
1317 		ret = is_2MB_gtt_possible(vgpu, ge);
1318 		if (ret == 0)
1319 			return split_2MB_gtt_entry(vgpu, spt, index, &se);
1320 		else if (ret < 0)
1321 			return ret;
1322 		page_size = I915_GTT_PAGE_SIZE_2M;
1323 		break;
1324 	case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1325 		gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1326 		return -EINVAL;
1327 	default:
1328 		GEM_BUG_ON(1);
1329 	}
1330 
1331 	/* direct shadow */
1332 	ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
1333 	if (ret)
1334 		return -ENXIO;
1335 
1336 	pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1337 	ppgtt_set_shadow_entry(spt, &se, index);
1338 	return 0;
1339 }
1340 
1341 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1342 {
1343 	struct intel_vgpu *vgpu = spt->vgpu;
1344 	struct intel_gvt *gvt = vgpu->gvt;
1345 	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1346 	struct intel_vgpu_ppgtt_spt *s;
1347 	struct intel_gvt_gtt_entry se, ge;
1348 	unsigned long gfn, i;
1349 	int ret;
1350 
1351 	trace_spt_change(spt->vgpu->id, "born", spt,
1352 			 spt->guest_page.gfn, spt->shadow_page.type);
1353 
1354 	for_each_present_guest_entry(spt, &ge, i) {
1355 		if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1356 			s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1357 			if (IS_ERR(s)) {
1358 				ret = PTR_ERR(s);
1359 				goto fail;
1360 			}
1361 			ppgtt_get_shadow_entry(spt, &se, i);
1362 			ppgtt_generate_shadow_entry(&se, s, &ge);
1363 			ppgtt_set_shadow_entry(spt, &se, i);
1364 		} else {
1365 			gfn = ops->get_pfn(&ge);
1366 			if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
1367 				ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1368 				ppgtt_set_shadow_entry(spt, &se, i);
1369 				continue;
1370 			}
1371 
1372 			ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1373 			if (ret)
1374 				goto fail;
1375 		}
1376 	}
1377 	return 0;
1378 fail:
1379 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1380 			spt, ge.val64, ge.type);
1381 	return ret;
1382 }
1383 
1384 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1385 		struct intel_gvt_gtt_entry *se, unsigned long index)
1386 {
1387 	struct intel_vgpu *vgpu = spt->vgpu;
1388 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1389 	int ret;
1390 
1391 	trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1392 			       spt->shadow_page.type, se->val64, index);
1393 
1394 	gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1395 		    se->type, index, se->val64);
1396 
1397 	if (!ops->test_present(se))
1398 		return 0;
1399 
1400 	if (ops->get_pfn(se) ==
1401 	    vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1402 		return 0;
1403 
1404 	if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1405 		struct intel_vgpu_ppgtt_spt *s =
1406 			intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1407 		if (!s) {
1408 			gvt_vgpu_err("fail to find guest page\n");
1409 			ret = -ENXIO;
1410 			goto fail;
1411 		}
1412 		ret = ppgtt_invalidate_spt(s);
1413 		if (ret)
1414 			goto fail;
1415 	} else {
1416 		/* We don't setup 64K shadow entry so far. */
1417 		WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1418 		     "suspicious 64K entry\n");
1419 		ppgtt_invalidate_pte(spt, se);
1420 	}
1421 
1422 	return 0;
1423 fail:
1424 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1425 			spt, se->val64, se->type);
1426 	return ret;
1427 }
1428 
1429 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1430 		struct intel_gvt_gtt_entry *we, unsigned long index)
1431 {
1432 	struct intel_vgpu *vgpu = spt->vgpu;
1433 	struct intel_gvt_gtt_entry m;
1434 	struct intel_vgpu_ppgtt_spt *s;
1435 	int ret;
1436 
1437 	trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1438 			       we->val64, index);
1439 
1440 	gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1441 		    we->type, index, we->val64);
1442 
1443 	if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1444 		s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1445 		if (IS_ERR(s)) {
1446 			ret = PTR_ERR(s);
1447 			goto fail;
1448 		}
1449 		ppgtt_get_shadow_entry(spt, &m, index);
1450 		ppgtt_generate_shadow_entry(&m, s, we);
1451 		ppgtt_set_shadow_entry(spt, &m, index);
1452 	} else {
1453 		ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1454 		if (ret)
1455 			goto fail;
1456 	}
1457 	return 0;
1458 fail:
1459 	gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1460 		spt, we->val64, we->type);
1461 	return ret;
1462 }
1463 
1464 static int sync_oos_page(struct intel_vgpu *vgpu,
1465 		struct intel_vgpu_oos_page *oos_page)
1466 {
1467 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1468 	struct intel_gvt *gvt = vgpu->gvt;
1469 	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1470 	struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1471 	struct intel_gvt_gtt_entry old, new;
1472 	int index;
1473 	int ret;
1474 
1475 	trace_oos_change(vgpu->id, "sync", oos_page->id,
1476 			 spt, spt->guest_page.type);
1477 
1478 	old.type = new.type = get_entry_type(spt->guest_page.type);
1479 	old.val64 = new.val64 = 0;
1480 
1481 	for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1482 				info->gtt_entry_size_shift); index++) {
1483 		ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1484 		ops->get_entry(NULL, &new, index, true,
1485 			       spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1486 
1487 		if (old.val64 == new.val64
1488 			&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
1489 			continue;
1490 
1491 		trace_oos_sync(vgpu->id, oos_page->id,
1492 				spt, spt->guest_page.type,
1493 				new.val64, index);
1494 
1495 		ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1496 		if (ret)
1497 			return ret;
1498 
1499 		ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1500 	}
1501 
1502 	spt->guest_page.write_cnt = 0;
1503 	list_del_init(&spt->post_shadow_list);
1504 	return 0;
1505 }
1506 
1507 static int detach_oos_page(struct intel_vgpu *vgpu,
1508 		struct intel_vgpu_oos_page *oos_page)
1509 {
1510 	struct intel_gvt *gvt = vgpu->gvt;
1511 	struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1512 
1513 	trace_oos_change(vgpu->id, "detach", oos_page->id,
1514 			 spt, spt->guest_page.type);
1515 
1516 	spt->guest_page.write_cnt = 0;
1517 	spt->guest_page.oos_page = NULL;
1518 	oos_page->spt = NULL;
1519 
1520 	list_del_init(&oos_page->vm_list);
1521 	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1522 
1523 	return 0;
1524 }
1525 
1526 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1527 		struct intel_vgpu_ppgtt_spt *spt)
1528 {
1529 	struct intel_gvt *gvt = spt->vgpu->gvt;
1530 	int ret;
1531 
1532 	ret = intel_gvt_read_gpa(spt->vgpu,
1533 			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1534 			oos_page->mem, I915_GTT_PAGE_SIZE);
1535 	if (ret)
1536 		return ret;
1537 
1538 	oos_page->spt = spt;
1539 	spt->guest_page.oos_page = oos_page;
1540 
1541 	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1542 
1543 	trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1544 			 spt, spt->guest_page.type);
1545 	return 0;
1546 }
1547 
1548 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1549 {
1550 	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1551 	int ret;
1552 
1553 	ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1554 	if (ret)
1555 		return ret;
1556 
1557 	trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1558 			 spt, spt->guest_page.type);
1559 
1560 	list_del_init(&oos_page->vm_list);
1561 	return sync_oos_page(spt->vgpu, oos_page);
1562 }
1563 
1564 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1565 {
1566 	struct intel_gvt *gvt = spt->vgpu->gvt;
1567 	struct intel_gvt_gtt *gtt = &gvt->gtt;
1568 	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1569 	int ret;
1570 
1571 	WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1572 
1573 	if (list_empty(&gtt->oos_page_free_list_head)) {
1574 		oos_page = container_of(gtt->oos_page_use_list_head.next,
1575 			struct intel_vgpu_oos_page, list);
1576 		ret = ppgtt_set_guest_page_sync(oos_page->spt);
1577 		if (ret)
1578 			return ret;
1579 		ret = detach_oos_page(spt->vgpu, oos_page);
1580 		if (ret)
1581 			return ret;
1582 	} else
1583 		oos_page = container_of(gtt->oos_page_free_list_head.next,
1584 			struct intel_vgpu_oos_page, list);
1585 	return attach_oos_page(oos_page, spt);
1586 }
1587 
1588 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1589 {
1590 	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1591 
1592 	if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1593 		return -EINVAL;
1594 
1595 	trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1596 			 spt, spt->guest_page.type);
1597 
1598 	list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1599 	return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1600 }
1601 
1602 /**
1603  * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1604  * @vgpu: a vGPU
1605  *
1606  * This function is called before submitting a guest workload to host,
1607  * to sync all the out-of-synced shadow for vGPU
1608  *
1609  * Returns:
1610  * Zero on success, negative error code if failed.
1611  */
1612 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1613 {
1614 	struct list_head *pos, *n;
1615 	struct intel_vgpu_oos_page *oos_page;
1616 	int ret;
1617 
1618 	if (!enable_out_of_sync)
1619 		return 0;
1620 
1621 	list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1622 		oos_page = container_of(pos,
1623 				struct intel_vgpu_oos_page, vm_list);
1624 		ret = ppgtt_set_guest_page_sync(oos_page->spt);
1625 		if (ret)
1626 			return ret;
1627 	}
1628 	return 0;
1629 }
1630 
1631 /*
1632  * The heart of PPGTT shadow page table.
1633  */
1634 static int ppgtt_handle_guest_write_page_table(
1635 		struct intel_vgpu_ppgtt_spt *spt,
1636 		struct intel_gvt_gtt_entry *we, unsigned long index)
1637 {
1638 	struct intel_vgpu *vgpu = spt->vgpu;
1639 	int type = spt->shadow_page.type;
1640 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1641 	struct intel_gvt_gtt_entry old_se;
1642 	int new_present;
1643 	int i, ret;
1644 
1645 	new_present = ops->test_present(we);
1646 
1647 	/*
1648 	 * Adding the new entry first and then removing the old one, that can
1649 	 * guarantee the ppgtt table is validated during the window between
1650 	 * adding and removal.
1651 	 */
1652 	ppgtt_get_shadow_entry(spt, &old_se, index);
1653 
1654 	if (new_present) {
1655 		ret = ppgtt_handle_guest_entry_add(spt, we, index);
1656 		if (ret)
1657 			goto fail;
1658 	}
1659 
1660 	ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1661 	if (ret)
1662 		goto fail;
1663 
1664 	if (!new_present) {
1665 		/* For 64KB splited entries, we need clear them all. */
1666 		if (ops->test_64k_splited(&old_se) &&
1667 		    !(index % GTT_64K_PTE_STRIDE)) {
1668 			gvt_vdbg_mm("remove splited 64K shadow entries\n");
1669 			for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1670 				ops->clear_64k_splited(&old_se);
1671 				ops->set_pfn(&old_se,
1672 					vgpu->gtt.scratch_pt[type].page_mfn);
1673 				ppgtt_set_shadow_entry(spt, &old_se, index + i);
1674 			}
1675 		} else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1676 			   old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1677 			ops->clear_pse(&old_se);
1678 			ops->set_pfn(&old_se,
1679 				     vgpu->gtt.scratch_pt[type].page_mfn);
1680 			ppgtt_set_shadow_entry(spt, &old_se, index);
1681 		} else {
1682 			ops->set_pfn(&old_se,
1683 				     vgpu->gtt.scratch_pt[type].page_mfn);
1684 			ppgtt_set_shadow_entry(spt, &old_se, index);
1685 		}
1686 	}
1687 
1688 	return 0;
1689 fail:
1690 	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1691 			spt, we->val64, we->type);
1692 	return ret;
1693 }
1694 
1695 
1696 
1697 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1698 {
1699 	return enable_out_of_sync
1700 		&& gtt_type_is_pte_pt(spt->guest_page.type)
1701 		&& spt->guest_page.write_cnt >= 2;
1702 }
1703 
1704 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1705 		unsigned long index)
1706 {
1707 	set_bit(index, spt->post_shadow_bitmap);
1708 	if (!list_empty(&spt->post_shadow_list))
1709 		return;
1710 
1711 	list_add_tail(&spt->post_shadow_list,
1712 			&spt->vgpu->gtt.post_shadow_list_head);
1713 }
1714 
1715 /**
1716  * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1717  * @vgpu: a vGPU
1718  *
1719  * This function is called before submitting a guest workload to host,
1720  * to flush all the post shadows for a vGPU.
1721  *
1722  * Returns:
1723  * Zero on success, negative error code if failed.
1724  */
1725 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1726 {
1727 	struct list_head *pos, *n;
1728 	struct intel_vgpu_ppgtt_spt *spt;
1729 	struct intel_gvt_gtt_entry ge;
1730 	unsigned long index;
1731 	int ret;
1732 
1733 	list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1734 		spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1735 				post_shadow_list);
1736 
1737 		for_each_set_bit(index, spt->post_shadow_bitmap,
1738 				GTT_ENTRY_NUM_IN_ONE_PAGE) {
1739 			ppgtt_get_guest_entry(spt, &ge, index);
1740 
1741 			ret = ppgtt_handle_guest_write_page_table(spt,
1742 							&ge, index);
1743 			if (ret)
1744 				return ret;
1745 			clear_bit(index, spt->post_shadow_bitmap);
1746 		}
1747 		list_del_init(&spt->post_shadow_list);
1748 	}
1749 	return 0;
1750 }
1751 
1752 static int ppgtt_handle_guest_write_page_table_bytes(
1753 		struct intel_vgpu_ppgtt_spt *spt,
1754 		u64 pa, void *p_data, int bytes)
1755 {
1756 	struct intel_vgpu *vgpu = spt->vgpu;
1757 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1758 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1759 	struct intel_gvt_gtt_entry we, se;
1760 	unsigned long index;
1761 	int ret;
1762 
1763 	index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1764 
1765 	ppgtt_get_guest_entry(spt, &we, index);
1766 
1767 	/*
1768 	 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1769 	 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1770 	 * ignored.
1771 	 */
1772 	if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1773 	    (index % GTT_64K_PTE_STRIDE)) {
1774 		gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1775 			    index);
1776 		return 0;
1777 	}
1778 
1779 	if (bytes == info->gtt_entry_size) {
1780 		ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1781 		if (ret)
1782 			return ret;
1783 	} else {
1784 		if (!test_bit(index, spt->post_shadow_bitmap)) {
1785 			int type = spt->shadow_page.type;
1786 
1787 			ppgtt_get_shadow_entry(spt, &se, index);
1788 			ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1789 			if (ret)
1790 				return ret;
1791 			ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1792 			ppgtt_set_shadow_entry(spt, &se, index);
1793 		}
1794 		ppgtt_set_post_shadow(spt, index);
1795 	}
1796 
1797 	if (!enable_out_of_sync)
1798 		return 0;
1799 
1800 	spt->guest_page.write_cnt++;
1801 
1802 	if (spt->guest_page.oos_page)
1803 		ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1804 				false, 0, vgpu);
1805 
1806 	if (can_do_out_of_sync(spt)) {
1807 		if (!spt->guest_page.oos_page)
1808 			ppgtt_allocate_oos_page(spt);
1809 
1810 		ret = ppgtt_set_guest_page_oos(spt);
1811 		if (ret < 0)
1812 			return ret;
1813 	}
1814 	return 0;
1815 }
1816 
1817 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1818 {
1819 	struct intel_vgpu *vgpu = mm->vgpu;
1820 	struct intel_gvt *gvt = vgpu->gvt;
1821 	struct intel_gvt_gtt *gtt = &gvt->gtt;
1822 	const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1823 	struct intel_gvt_gtt_entry se;
1824 	int index;
1825 
1826 	if (!mm->ppgtt_mm.shadowed)
1827 		return;
1828 
1829 	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1830 		ppgtt_get_shadow_root_entry(mm, &se, index);
1831 
1832 		if (!ops->test_present(&se))
1833 			continue;
1834 
1835 		ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1836 		se.val64 = 0;
1837 		ppgtt_set_shadow_root_entry(mm, &se, index);
1838 
1839 		trace_spt_guest_change(vgpu->id, "destroy root pointer",
1840 				       NULL, se.type, se.val64, index);
1841 	}
1842 
1843 	mm->ppgtt_mm.shadowed = false;
1844 }
1845 
1846 
1847 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1848 {
1849 	struct intel_vgpu *vgpu = mm->vgpu;
1850 	struct intel_gvt *gvt = vgpu->gvt;
1851 	struct intel_gvt_gtt *gtt = &gvt->gtt;
1852 	const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1853 	struct intel_vgpu_ppgtt_spt *spt;
1854 	struct intel_gvt_gtt_entry ge, se;
1855 	int index, ret;
1856 
1857 	if (mm->ppgtt_mm.shadowed)
1858 		return 0;
1859 
1860 	mm->ppgtt_mm.shadowed = true;
1861 
1862 	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1863 		ppgtt_get_guest_root_entry(mm, &ge, index);
1864 
1865 		if (!ops->test_present(&ge))
1866 			continue;
1867 
1868 		trace_spt_guest_change(vgpu->id, __func__, NULL,
1869 				       ge.type, ge.val64, index);
1870 
1871 		spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1872 		if (IS_ERR(spt)) {
1873 			gvt_vgpu_err("fail to populate guest root pointer\n");
1874 			ret = PTR_ERR(spt);
1875 			goto fail;
1876 		}
1877 		ppgtt_generate_shadow_entry(&se, spt, &ge);
1878 		ppgtt_set_shadow_root_entry(mm, &se, index);
1879 
1880 		trace_spt_guest_change(vgpu->id, "populate root pointer",
1881 				       NULL, se.type, se.val64, index);
1882 	}
1883 
1884 	return 0;
1885 fail:
1886 	invalidate_ppgtt_mm(mm);
1887 	return ret;
1888 }
1889 
1890 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1891 {
1892 	struct intel_vgpu_mm *mm;
1893 
1894 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1895 	if (!mm)
1896 		return NULL;
1897 
1898 	mm->vgpu = vgpu;
1899 	kref_init(&mm->ref);
1900 	atomic_set(&mm->pincount, 0);
1901 
1902 	return mm;
1903 }
1904 
1905 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1906 {
1907 	kfree(mm);
1908 }
1909 
1910 /**
1911  * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1912  * @vgpu: a vGPU
1913  * @root_entry_type: ppgtt root entry type
1914  * @pdps: guest pdps.
1915  *
1916  * This function is used to create a ppgtt mm object for a vGPU.
1917  *
1918  * Returns:
1919  * Zero on success, negative error code in pointer if failed.
1920  */
1921 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1922 		enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1923 {
1924 	struct intel_gvt *gvt = vgpu->gvt;
1925 	struct intel_vgpu_mm *mm;
1926 	int ret;
1927 
1928 	mm = vgpu_alloc_mm(vgpu);
1929 	if (!mm)
1930 		return ERR_PTR(-ENOMEM);
1931 
1932 	mm->type = INTEL_GVT_MM_PPGTT;
1933 
1934 	GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1935 		   root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1936 	mm->ppgtt_mm.root_entry_type = root_entry_type;
1937 
1938 	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1939 	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1940 	INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1941 
1942 	if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1943 		mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1944 	else
1945 		memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1946 		       sizeof(mm->ppgtt_mm.guest_pdps));
1947 
1948 	ret = shadow_ppgtt_mm(mm);
1949 	if (ret) {
1950 		gvt_vgpu_err("failed to shadow ppgtt mm\n");
1951 		vgpu_free_mm(mm);
1952 		return ERR_PTR(ret);
1953 	}
1954 
1955 	list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1956 
1957 	mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1958 	list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1959 	mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1960 
1961 	return mm;
1962 }
1963 
1964 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1965 {
1966 	struct intel_vgpu_mm *mm;
1967 	unsigned long nr_entries;
1968 
1969 	mm = vgpu_alloc_mm(vgpu);
1970 	if (!mm)
1971 		return ERR_PTR(-ENOMEM);
1972 
1973 	mm->type = INTEL_GVT_MM_GGTT;
1974 
1975 	nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1976 	mm->ggtt_mm.virtual_ggtt =
1977 		vzalloc(array_size(nr_entries,
1978 				   vgpu->gvt->device_info.gtt_entry_size));
1979 	if (!mm->ggtt_mm.virtual_ggtt) {
1980 		vgpu_free_mm(mm);
1981 		return ERR_PTR(-ENOMEM);
1982 	}
1983 
1984 	mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1985 	if (!mm->ggtt_mm.host_ggtt_aperture) {
1986 		vfree(mm->ggtt_mm.virtual_ggtt);
1987 		vgpu_free_mm(mm);
1988 		return ERR_PTR(-ENOMEM);
1989 	}
1990 
1991 	mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1992 	if (!mm->ggtt_mm.host_ggtt_hidden) {
1993 		vfree(mm->ggtt_mm.host_ggtt_aperture);
1994 		vfree(mm->ggtt_mm.virtual_ggtt);
1995 		vgpu_free_mm(mm);
1996 		return ERR_PTR(-ENOMEM);
1997 	}
1998 
1999 	return mm;
2000 }
2001 
2002 /**
2003  * _intel_vgpu_mm_release - destroy a mm object
2004  * @mm_ref: a kref object
2005  *
2006  * This function is used to destroy a mm object for vGPU
2007  *
2008  */
2009 void _intel_vgpu_mm_release(struct kref *mm_ref)
2010 {
2011 	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
2012 
2013 	if (GEM_WARN_ON(atomic_read(&mm->pincount)))
2014 		gvt_err("vgpu mm pin count bug detected\n");
2015 
2016 	if (mm->type == INTEL_GVT_MM_PPGTT) {
2017 		list_del(&mm->ppgtt_mm.list);
2018 
2019 		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2020 		list_del(&mm->ppgtt_mm.lru_list);
2021 		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2022 
2023 		invalidate_ppgtt_mm(mm);
2024 	} else {
2025 		vfree(mm->ggtt_mm.virtual_ggtt);
2026 		vfree(mm->ggtt_mm.host_ggtt_aperture);
2027 		vfree(mm->ggtt_mm.host_ggtt_hidden);
2028 	}
2029 
2030 	vgpu_free_mm(mm);
2031 }
2032 
2033 /**
2034  * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
2035  * @mm: a vGPU mm object
2036  *
2037  * This function is called when user doesn't want to use a vGPU mm object
2038  */
2039 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
2040 {
2041 	atomic_dec_if_positive(&mm->pincount);
2042 }
2043 
2044 /**
2045  * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
2046  * @mm: target vgpu mm
2047  *
2048  * This function is called when user wants to use a vGPU mm object. If this
2049  * mm object hasn't been shadowed yet, the shadow will be populated at this
2050  * time.
2051  *
2052  * Returns:
2053  * Zero on success, negative error code if failed.
2054  */
2055 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2056 {
2057 	int ret;
2058 
2059 	atomic_inc(&mm->pincount);
2060 
2061 	if (mm->type == INTEL_GVT_MM_PPGTT) {
2062 		ret = shadow_ppgtt_mm(mm);
2063 		if (ret)
2064 			return ret;
2065 
2066 		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2067 		list_move_tail(&mm->ppgtt_mm.lru_list,
2068 			       &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2069 		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2070 	}
2071 
2072 	return 0;
2073 }
2074 
2075 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2076 {
2077 	struct intel_vgpu_mm *mm;
2078 	struct list_head *pos, *n;
2079 
2080 	mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2081 
2082 	list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2083 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2084 
2085 		if (atomic_read(&mm->pincount))
2086 			continue;
2087 
2088 		list_del_init(&mm->ppgtt_mm.lru_list);
2089 		mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2090 		invalidate_ppgtt_mm(mm);
2091 		return 1;
2092 	}
2093 	mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2094 	return 0;
2095 }
2096 
2097 /*
2098  * GMA translation APIs.
2099  */
2100 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2101 		struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2102 {
2103 	struct intel_vgpu *vgpu = mm->vgpu;
2104 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2105 	struct intel_vgpu_ppgtt_spt *s;
2106 
2107 	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2108 	if (!s)
2109 		return -ENXIO;
2110 
2111 	if (!guest)
2112 		ppgtt_get_shadow_entry(s, e, index);
2113 	else
2114 		ppgtt_get_guest_entry(s, e, index);
2115 	return 0;
2116 }
2117 
2118 /**
2119  * intel_vgpu_gma_to_gpa - translate a gma to GPA
2120  * @mm: mm object. could be a PPGTT or GGTT mm object
2121  * @gma: graphics memory address in this mm object
2122  *
2123  * This function is used to translate a graphics memory address in specific
2124  * graphics memory space to guest physical address.
2125  *
2126  * Returns:
2127  * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2128  */
2129 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2130 {
2131 	struct intel_vgpu *vgpu = mm->vgpu;
2132 	struct intel_gvt *gvt = vgpu->gvt;
2133 	const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2134 	const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2135 	unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2136 	unsigned long gma_index[4];
2137 	struct intel_gvt_gtt_entry e;
2138 	int i, levels = 0;
2139 	int ret;
2140 
2141 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2142 		   mm->type != INTEL_GVT_MM_PPGTT);
2143 
2144 	if (mm->type == INTEL_GVT_MM_GGTT) {
2145 		if (!vgpu_gmadr_is_valid(vgpu, gma))
2146 			goto err;
2147 
2148 		ggtt_get_guest_entry(mm, &e,
2149 			gma_ops->gma_to_ggtt_pte_index(gma));
2150 
2151 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2152 			+ (gma & ~I915_GTT_PAGE_MASK);
2153 
2154 		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2155 	} else {
2156 		switch (mm->ppgtt_mm.root_entry_type) {
2157 		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2158 			ppgtt_get_shadow_root_entry(mm, &e, 0);
2159 
2160 			gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2161 			gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2162 			gma_index[2] = gma_ops->gma_to_pde_index(gma);
2163 			gma_index[3] = gma_ops->gma_to_pte_index(gma);
2164 			levels = 4;
2165 			break;
2166 		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2167 			ppgtt_get_shadow_root_entry(mm, &e,
2168 					gma_ops->gma_to_l3_pdp_index(gma));
2169 
2170 			gma_index[0] = gma_ops->gma_to_pde_index(gma);
2171 			gma_index[1] = gma_ops->gma_to_pte_index(gma);
2172 			levels = 2;
2173 			break;
2174 		default:
2175 			GEM_BUG_ON(1);
2176 		}
2177 
2178 		/* walk the shadow page table and get gpa from guest entry */
2179 		for (i = 0; i < levels; i++) {
2180 			ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2181 				(i == levels - 1));
2182 			if (ret)
2183 				goto err;
2184 
2185 			if (!pte_ops->test_present(&e)) {
2186 				gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2187 				goto err;
2188 			}
2189 		}
2190 
2191 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2192 					(gma & ~I915_GTT_PAGE_MASK);
2193 		trace_gma_translate(vgpu->id, "ppgtt", 0,
2194 				    mm->ppgtt_mm.root_entry_type, gma, gpa);
2195 	}
2196 
2197 	return gpa;
2198 err:
2199 	gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2200 	return INTEL_GVT_INVALID_ADDR;
2201 }
2202 
2203 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2204 	unsigned int off, void *p_data, unsigned int bytes)
2205 {
2206 	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2207 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2208 	unsigned long index = off >> info->gtt_entry_size_shift;
2209 	unsigned long gma;
2210 	struct intel_gvt_gtt_entry e;
2211 
2212 	if (bytes != 4 && bytes != 8)
2213 		return -EINVAL;
2214 
2215 	gma = index << I915_GTT_PAGE_SHIFT;
2216 	if (!intel_gvt_ggtt_validate_range(vgpu,
2217 					   gma, 1 << I915_GTT_PAGE_SHIFT)) {
2218 		gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2219 		memset(p_data, 0, bytes);
2220 		return 0;
2221 	}
2222 
2223 	ggtt_get_guest_entry(ggtt_mm, &e, index);
2224 	memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2225 			bytes);
2226 	return 0;
2227 }
2228 
2229 /**
2230  * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
2231  * @vgpu: a vGPU
2232  * @off: register offset
2233  * @p_data: data will be returned to guest
2234  * @bytes: data length
2235  *
2236  * This function is used to emulate the GTT MMIO register read
2237  *
2238  * Returns:
2239  * Zero on success, error code if failed.
2240  */
2241 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2242 	void *p_data, unsigned int bytes)
2243 {
2244 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2245 	int ret;
2246 
2247 	if (bytes != 4 && bytes != 8)
2248 		return -EINVAL;
2249 
2250 	off -= info->gtt_start_offset;
2251 	ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2252 	return ret;
2253 }
2254 
2255 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2256 		struct intel_gvt_gtt_entry *entry)
2257 {
2258 	const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2259 	unsigned long pfn;
2260 
2261 	pfn = pte_ops->get_pfn(entry);
2262 	if (pfn != vgpu->gvt->gtt.scratch_mfn)
2263 		intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
2264 }
2265 
2266 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2267 	void *p_data, unsigned int bytes)
2268 {
2269 	struct intel_gvt *gvt = vgpu->gvt;
2270 	const struct intel_gvt_device_info *info = &gvt->device_info;
2271 	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2272 	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2273 	unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2274 	unsigned long gma, gfn;
2275 	struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2276 	struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2277 	dma_addr_t dma_addr;
2278 	int ret;
2279 	struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2280 	bool partial_update = false;
2281 
2282 	if (bytes != 4 && bytes != 8)
2283 		return -EINVAL;
2284 
2285 	gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2286 
2287 	/* the VM may configure the whole GM space when ballooning is used */
2288 	if (!vgpu_gmadr_is_valid(vgpu, gma))
2289 		return 0;
2290 
2291 	e.type = GTT_TYPE_GGTT_PTE;
2292 	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2293 			bytes);
2294 
2295 	/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2296 	 * write, save the first 4 bytes in a list and update virtual
2297 	 * PTE. Only update shadow PTE when the second 4 bytes comes.
2298 	 */
2299 	if (bytes < info->gtt_entry_size) {
2300 		bool found = false;
2301 
2302 		list_for_each_entry_safe(pos, n,
2303 				&ggtt_mm->ggtt_mm.partial_pte_list, list) {
2304 			if (g_gtt_index == pos->offset >>
2305 					info->gtt_entry_size_shift) {
2306 				if (off != pos->offset) {
2307 					/* the second partial part*/
2308 					int last_off = pos->offset &
2309 						(info->gtt_entry_size - 1);
2310 
2311 					memcpy((void *)&e.val64 + last_off,
2312 						(void *)&pos->data + last_off,
2313 						bytes);
2314 
2315 					list_del(&pos->list);
2316 					kfree(pos);
2317 					found = true;
2318 					break;
2319 				}
2320 
2321 				/* update of the first partial part */
2322 				pos->data = e.val64;
2323 				ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2324 				return 0;
2325 			}
2326 		}
2327 
2328 		if (!found) {
2329 			/* the first partial part */
2330 			partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2331 			if (!partial_pte)
2332 				return -ENOMEM;
2333 			partial_pte->offset = off;
2334 			partial_pte->data = e.val64;
2335 			list_add_tail(&partial_pte->list,
2336 				&ggtt_mm->ggtt_mm.partial_pte_list);
2337 			partial_update = true;
2338 		}
2339 	}
2340 
2341 	if (!partial_update && (ops->test_present(&e))) {
2342 		gfn = ops->get_pfn(&e);
2343 		m.val64 = e.val64;
2344 		m.type = e.type;
2345 
2346 		/* one PTE update may be issued in multiple writes and the
2347 		 * first write may not construct a valid gfn
2348 		 */
2349 		if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
2350 			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2351 			goto out;
2352 		}
2353 
2354 		ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
2355 						   &dma_addr);
2356 		if (ret) {
2357 			gvt_vgpu_err("fail to populate guest ggtt entry\n");
2358 			/* guest driver may read/write the entry when partial
2359 			 * update the entry in this situation p2m will fail
2360 			 * setting the shadow entry to point to a scratch page
2361 			 */
2362 			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2363 		} else
2364 			ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2365 	} else {
2366 		ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2367 		ops->clear_present(&m);
2368 	}
2369 
2370 out:
2371 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2372 
2373 	ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2374 	ggtt_invalidate_pte(vgpu, &e);
2375 
2376 	ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2377 	ggtt_invalidate(gvt->gt);
2378 	return 0;
2379 }
2380 
2381 /*
2382  * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2383  * @vgpu: a vGPU
2384  * @off: register offset
2385  * @p_data: data from guest write
2386  * @bytes: data length
2387  *
2388  * This function is used to emulate the GTT MMIO register write
2389  *
2390  * Returns:
2391  * Zero on success, error code if failed.
2392  */
2393 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2394 		unsigned int off, void *p_data, unsigned int bytes)
2395 {
2396 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2397 	int ret;
2398 	struct intel_vgpu_submission *s = &vgpu->submission;
2399 	struct intel_engine_cs *engine;
2400 	int i;
2401 
2402 	if (bytes != 4 && bytes != 8)
2403 		return -EINVAL;
2404 
2405 	off -= info->gtt_start_offset;
2406 	ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2407 
2408 	/* if ggtt of last submitted context is written,
2409 	 * that context is probably got unpinned.
2410 	 * Set last shadowed ctx to invalid.
2411 	 */
2412 	for_each_engine(engine, vgpu->gvt->gt, i) {
2413 		if (!s->last_ctx[i].valid)
2414 			continue;
2415 
2416 		if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2417 			s->last_ctx[i].valid = false;
2418 	}
2419 	return ret;
2420 }
2421 
2422 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2423 		enum intel_gvt_gtt_type type)
2424 {
2425 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2426 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2427 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2428 	int page_entry_num = I915_GTT_PAGE_SIZE >>
2429 				vgpu->gvt->device_info.gtt_entry_size_shift;
2430 	void *scratch_pt;
2431 	int i;
2432 	struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2433 	dma_addr_t daddr;
2434 
2435 	if (drm_WARN_ON(&i915->drm,
2436 			type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2437 		return -EINVAL;
2438 
2439 	scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2440 	if (!scratch_pt) {
2441 		gvt_vgpu_err("fail to allocate scratch page\n");
2442 		return -ENOMEM;
2443 	}
2444 
2445 	daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
2446 	if (dma_mapping_error(dev, daddr)) {
2447 		gvt_vgpu_err("fail to dmamap scratch_pt\n");
2448 		__free_page(virt_to_page(scratch_pt));
2449 		return -ENOMEM;
2450 	}
2451 	gtt->scratch_pt[type].page_mfn =
2452 		(unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2453 	gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2454 	gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2455 			vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2456 
2457 	/* Build the tree by full filled the scratch pt with the entries which
2458 	 * point to the next level scratch pt or scratch page. The
2459 	 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2460 	 * 'type' pt.
2461 	 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2462 	 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2463 	 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2464 	 */
2465 	if (type > GTT_TYPE_PPGTT_PTE_PT) {
2466 		struct intel_gvt_gtt_entry se;
2467 
2468 		memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2469 		se.type = get_entry_type(type - 1);
2470 		ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2471 
2472 		/* The entry parameters like present/writeable/cache type
2473 		 * set to the same as i915's scratch page tree.
2474 		 */
2475 		se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
2476 		if (type == GTT_TYPE_PPGTT_PDE_PT)
2477 			se.val64 |= PPAT_CACHED;
2478 
2479 		for (i = 0; i < page_entry_num; i++)
2480 			ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2481 	}
2482 
2483 	return 0;
2484 }
2485 
2486 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2487 {
2488 	int i;
2489 	struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2490 	dma_addr_t daddr;
2491 
2492 	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2493 		if (vgpu->gtt.scratch_pt[i].page != NULL) {
2494 			daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2495 					I915_GTT_PAGE_SHIFT);
2496 			dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2497 			__free_page(vgpu->gtt.scratch_pt[i].page);
2498 			vgpu->gtt.scratch_pt[i].page = NULL;
2499 			vgpu->gtt.scratch_pt[i].page_mfn = 0;
2500 		}
2501 	}
2502 
2503 	return 0;
2504 }
2505 
2506 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2507 {
2508 	int i, ret;
2509 
2510 	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2511 		ret = alloc_scratch_pages(vgpu, i);
2512 		if (ret)
2513 			goto err;
2514 	}
2515 
2516 	return 0;
2517 
2518 err:
2519 	release_scratch_page_tree(vgpu);
2520 	return ret;
2521 }
2522 
2523 /**
2524  * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2525  * @vgpu: a vGPU
2526  *
2527  * This function is used to initialize per-vGPU graphics memory virtualization
2528  * components.
2529  *
2530  * Returns:
2531  * Zero on success, error code if failed.
2532  */
2533 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2534 {
2535 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2536 
2537 	INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
2538 
2539 	INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
2540 	INIT_LIST_HEAD(&gtt->oos_page_list_head);
2541 	INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2542 
2543 	gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2544 	if (IS_ERR(gtt->ggtt_mm)) {
2545 		gvt_vgpu_err("fail to create mm for ggtt.\n");
2546 		return PTR_ERR(gtt->ggtt_mm);
2547 	}
2548 
2549 	intel_vgpu_reset_ggtt(vgpu, false);
2550 
2551 	INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2552 
2553 	return create_scratch_page_tree(vgpu);
2554 }
2555 
2556 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2557 {
2558 	struct list_head *pos, *n;
2559 	struct intel_vgpu_mm *mm;
2560 
2561 	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2562 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2563 		intel_vgpu_destroy_mm(mm);
2564 	}
2565 
2566 	if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2567 		gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2568 
2569 	if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2570 		gvt_err("Why we still has spt not freed?\n");
2571 		ppgtt_free_all_spt(vgpu);
2572 	}
2573 }
2574 
2575 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2576 {
2577 	struct intel_gvt_partial_pte *pos, *next;
2578 
2579 	list_for_each_entry_safe(pos, next,
2580 				 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2581 				 list) {
2582 		gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2583 			pos->offset, pos->data);
2584 		kfree(pos);
2585 	}
2586 	intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2587 	vgpu->gtt.ggtt_mm = NULL;
2588 }
2589 
2590 /**
2591  * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2592  * @vgpu: a vGPU
2593  *
2594  * This function is used to clean up per-vGPU graphics memory virtualization
2595  * components.
2596  *
2597  * Returns:
2598  * Zero on success, error code if failed.
2599  */
2600 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2601 {
2602 	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2603 	intel_vgpu_destroy_ggtt_mm(vgpu);
2604 	release_scratch_page_tree(vgpu);
2605 }
2606 
2607 static void clean_spt_oos(struct intel_gvt *gvt)
2608 {
2609 	struct intel_gvt_gtt *gtt = &gvt->gtt;
2610 	struct list_head *pos, *n;
2611 	struct intel_vgpu_oos_page *oos_page;
2612 
2613 	WARN(!list_empty(&gtt->oos_page_use_list_head),
2614 		"someone is still using oos page\n");
2615 
2616 	list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2617 		oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2618 		list_del(&oos_page->list);
2619 		free_page((unsigned long)oos_page->mem);
2620 		kfree(oos_page);
2621 	}
2622 }
2623 
2624 static int setup_spt_oos(struct intel_gvt *gvt)
2625 {
2626 	struct intel_gvt_gtt *gtt = &gvt->gtt;
2627 	struct intel_vgpu_oos_page *oos_page;
2628 	int i;
2629 	int ret;
2630 
2631 	INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2632 	INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2633 
2634 	for (i = 0; i < preallocated_oos_pages; i++) {
2635 		oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2636 		if (!oos_page) {
2637 			ret = -ENOMEM;
2638 			goto fail;
2639 		}
2640 		oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2641 		if (!oos_page->mem) {
2642 			ret = -ENOMEM;
2643 			kfree(oos_page);
2644 			goto fail;
2645 		}
2646 
2647 		INIT_LIST_HEAD(&oos_page->list);
2648 		INIT_LIST_HEAD(&oos_page->vm_list);
2649 		oos_page->id = i;
2650 		list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2651 	}
2652 
2653 	gvt_dbg_mm("%d oos pages preallocated\n", i);
2654 
2655 	return 0;
2656 fail:
2657 	clean_spt_oos(gvt);
2658 	return ret;
2659 }
2660 
2661 /**
2662  * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2663  * @vgpu: a vGPU
2664  * @pdps: pdp root array
2665  *
2666  * This function is used to find a PPGTT mm object from mm object pool
2667  *
2668  * Returns:
2669  * pointer to mm object on success, NULL if failed.
2670  */
2671 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2672 		u64 pdps[])
2673 {
2674 	struct intel_vgpu_mm *mm;
2675 	struct list_head *pos;
2676 
2677 	list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2678 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2679 
2680 		switch (mm->ppgtt_mm.root_entry_type) {
2681 		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2682 			if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2683 				return mm;
2684 			break;
2685 		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2686 			if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2687 				    sizeof(mm->ppgtt_mm.guest_pdps)))
2688 				return mm;
2689 			break;
2690 		default:
2691 			GEM_BUG_ON(1);
2692 		}
2693 	}
2694 	return NULL;
2695 }
2696 
2697 /**
2698  * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2699  * @vgpu: a vGPU
2700  * @root_entry_type: ppgtt root entry type
2701  * @pdps: guest pdps
2702  *
2703  * This function is used to find or create a PPGTT mm object from a guest.
2704  *
2705  * Returns:
2706  * Zero on success, negative error code if failed.
2707  */
2708 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2709 		enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2710 {
2711 	struct intel_vgpu_mm *mm;
2712 
2713 	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2714 	if (mm) {
2715 		intel_vgpu_mm_get(mm);
2716 	} else {
2717 		mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2718 		if (IS_ERR(mm))
2719 			gvt_vgpu_err("fail to create mm\n");
2720 	}
2721 	return mm;
2722 }
2723 
2724 /**
2725  * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2726  * @vgpu: a vGPU
2727  * @pdps: guest pdps
2728  *
2729  * This function is used to find a PPGTT mm object from a guest and destroy it.
2730  *
2731  * Returns:
2732  * Zero on success, negative error code if failed.
2733  */
2734 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2735 {
2736 	struct intel_vgpu_mm *mm;
2737 
2738 	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2739 	if (!mm) {
2740 		gvt_vgpu_err("fail to find ppgtt instance.\n");
2741 		return -EINVAL;
2742 	}
2743 	intel_vgpu_mm_put(mm);
2744 	return 0;
2745 }
2746 
2747 /**
2748  * intel_gvt_init_gtt - initialize mm components of a GVT device
2749  * @gvt: GVT device
2750  *
2751  * This function is called at the initialization stage, to initialize
2752  * the mm components of a GVT device.
2753  *
2754  * Returns:
2755  * zero on success, negative error code if failed.
2756  */
2757 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2758 {
2759 	int ret;
2760 	void *page;
2761 	struct device *dev = gvt->gt->i915->drm.dev;
2762 	dma_addr_t daddr;
2763 
2764 	gvt_dbg_core("init gtt\n");
2765 
2766 	gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2767 	gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2768 
2769 	page = (void *)get_zeroed_page(GFP_KERNEL);
2770 	if (!page) {
2771 		gvt_err("fail to allocate scratch ggtt page\n");
2772 		return -ENOMEM;
2773 	}
2774 
2775 	daddr = dma_map_page(dev, virt_to_page(page), 0,
2776 			4096, DMA_BIDIRECTIONAL);
2777 	if (dma_mapping_error(dev, daddr)) {
2778 		gvt_err("fail to dmamap scratch ggtt page\n");
2779 		__free_page(virt_to_page(page));
2780 		return -ENOMEM;
2781 	}
2782 
2783 	gvt->gtt.scratch_page = virt_to_page(page);
2784 	gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2785 
2786 	if (enable_out_of_sync) {
2787 		ret = setup_spt_oos(gvt);
2788 		if (ret) {
2789 			gvt_err("fail to initialize SPT oos\n");
2790 			dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2791 			__free_page(gvt->gtt.scratch_page);
2792 			return ret;
2793 		}
2794 	}
2795 	INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2796 	rw_init(&gvt->gtt.ppgtt_mm_lock, "gvtmm");
2797 	return 0;
2798 }
2799 
2800 /**
2801  * intel_gvt_clean_gtt - clean up mm components of a GVT device
2802  * @gvt: GVT device
2803  *
2804  * This function is called at the driver unloading stage, to clean up the
2805  * the mm components of a GVT device.
2806  *
2807  */
2808 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2809 {
2810 	struct device *dev = gvt->gt->i915->drm.dev;
2811 	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2812 					I915_GTT_PAGE_SHIFT);
2813 
2814 	dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2815 
2816 	__free_page(gvt->gtt.scratch_page);
2817 
2818 	if (enable_out_of_sync)
2819 		clean_spt_oos(gvt);
2820 }
2821 
2822 /**
2823  * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2824  * @vgpu: a vGPU
2825  *
2826  * This function is called when invalidate all PPGTT instances of a vGPU.
2827  *
2828  */
2829 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2830 {
2831 	struct list_head *pos, *n;
2832 	struct intel_vgpu_mm *mm;
2833 
2834 	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2835 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2836 		if (mm->type == INTEL_GVT_MM_PPGTT) {
2837 			mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2838 			list_del_init(&mm->ppgtt_mm.lru_list);
2839 			mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2840 			if (mm->ppgtt_mm.shadowed)
2841 				invalidate_ppgtt_mm(mm);
2842 		}
2843 	}
2844 }
2845 
2846 /**
2847  * intel_vgpu_reset_ggtt - reset the GGTT entry
2848  * @vgpu: a vGPU
2849  * @invalidate_old: invalidate old entries
2850  *
2851  * This function is called at the vGPU create stage
2852  * to reset all the GGTT entries.
2853  *
2854  */
2855 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2856 {
2857 	struct intel_gvt *gvt = vgpu->gvt;
2858 	const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2859 	struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2860 	struct intel_gvt_gtt_entry old_entry;
2861 	u32 index;
2862 	u32 num_entries;
2863 
2864 	pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2865 	pte_ops->set_present(&entry);
2866 
2867 	index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2868 	num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2869 	while (num_entries--) {
2870 		if (invalidate_old) {
2871 			ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2872 			ggtt_invalidate_pte(vgpu, &old_entry);
2873 		}
2874 		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2875 	}
2876 
2877 	index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2878 	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2879 	while (num_entries--) {
2880 		if (invalidate_old) {
2881 			ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2882 			ggtt_invalidate_pte(vgpu, &old_entry);
2883 		}
2884 		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2885 	}
2886 
2887 	ggtt_invalidate(gvt->gt);
2888 }
2889 
2890 /**
2891  * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2892  * @gvt: intel gvt device
2893  *
2894  * This function is called at driver resume stage to restore
2895  * GGTT entries of every vGPU.
2896  *
2897  */
2898 void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
2899 {
2900 	struct intel_vgpu *vgpu;
2901 	struct intel_vgpu_mm *mm;
2902 	int id;
2903 	gen8_pte_t pte;
2904 	u32 idx, num_low, num_hi, offset;
2905 
2906 	/* Restore dirty host ggtt for all vGPUs */
2907 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2908 		mm = vgpu->gtt.ggtt_mm;
2909 
2910 		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2911 		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2912 		for (idx = 0; idx < num_low; idx++) {
2913 			pte = mm->ggtt_mm.host_ggtt_aperture[idx];
2914 			if (pte & GEN8_PAGE_PRESENT)
2915 				write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2916 		}
2917 
2918 		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2919 		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2920 		for (idx = 0; idx < num_hi; idx++) {
2921 			pte = mm->ggtt_mm.host_ggtt_hidden[idx];
2922 			if (pte & GEN8_PAGE_PRESENT)
2923 				write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2924 		}
2925 	}
2926 }
2927