xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/gen8_ppgtt.c (revision 857850c4271737b1c9b954973c22f84277d3dd32)
1 /*	$NetBSD: gen8_ppgtt.c,v 1.10 2021/12/19 12:13:01 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 /*
5  * Copyright © 2020 Intel Corporation
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: gen8_ppgtt.c,v 1.10 2021/12/19 12:13:01 riastradh Exp $");
10 
11 #include <linux/log2.h>
12 
13 #include "gen8_ppgtt.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
16 #include "i915_vgpu.h"
17 #include "intel_gt.h"
18 #include "intel_gtt.h"
19 
gen8_pde_encode(const dma_addr_t addr,const enum i915_cache_level level)20 static u64 gen8_pde_encode(const dma_addr_t addr,
21 			   const enum i915_cache_level level)
22 {
23 	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
24 
25 	if (level != I915_CACHE_NONE)
26 		pde |= PPAT_CACHED_PDE;
27 	else
28 		pde |= PPAT_UNCACHED;
29 
30 	return pde;
31 }
32 
gen8_ppgtt_notify_vgt(struct i915_ppgtt * ppgtt,bool create)33 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
34 {
35 	struct drm_i915_private *i915 = ppgtt->vm.i915;
36 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
37 	enum vgt_g2v_type msg;
38 	int i;
39 
40 	if (create)
41 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
42 	else
43 		atomic_dec(px_used(ppgtt->pd));
44 
45 	mutex_lock(&i915->vgpu.lock);
46 
47 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
48 		const u64 daddr = px_dma(ppgtt->pd);
49 
50 		intel_uncore_write(uncore,
51 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
52 		intel_uncore_write(uncore,
53 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
54 
55 		msg = create ?
56 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
57 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
58 	} else {
59 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
60 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
61 
62 			intel_uncore_write(uncore,
63 					   vgtif_reg(pdp[i].lo),
64 					   lower_32_bits(daddr));
65 			intel_uncore_write(uncore,
66 					   vgtif_reg(pdp[i].hi),
67 					   upper_32_bits(daddr));
68 		}
69 
70 		msg = create ?
71 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
72 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
73 	}
74 
75 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
76 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
77 
78 	mutex_unlock(&i915->vgpu.lock);
79 }
80 
81 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
82 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
83 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
84 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
85 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
86 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
87 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
88 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
89 
90 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
91 
92 static inline unsigned int
gen8_pd_range(u64 start,u64 end,int lvl,unsigned int * idx)93 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
94 {
95 	const int shift = gen8_pd_shift(lvl);
96 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
97 
98 	GEM_BUG_ON(start >= end);
99 	end += ~mask >> gen8_pd_shift(1);
100 
101 	*idx = i915_pde_index(start, shift);
102 	if ((start ^ end) & mask)
103 		return GEN8_PDES - *idx;
104 	else
105 		return i915_pde_index(end, shift) - *idx;
106 }
107 
gen8_pd_contains(u64 start,u64 end,int lvl)108 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
109 {
110 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
111 
112 	GEM_BUG_ON(start >= end);
113 	return (start ^ end) & mask && (start & ~mask) == 0;
114 }
115 
gen8_pt_count(u64 start,u64 end)116 static inline unsigned int gen8_pt_count(u64 start, u64 end)
117 {
118 	GEM_BUG_ON(start >= end);
119 	if ((start ^ end) >> gen8_pd_shift(1))
120 		return GEN8_PDES - (start & (GEN8_PDES - 1));
121 	else
122 		return end - start;
123 }
124 
125 static inline unsigned int
gen8_pd_top_count(const struct i915_address_space * vm)126 gen8_pd_top_count(const struct i915_address_space *vm)
127 {
128 	unsigned int shift = __gen8_pte_shift(vm->top);
129 	return (vm->total + (1ull << shift) - 1) >> shift;
130 }
131 
132 static inline struct i915_page_directory *
gen8_pdp_for_page_index(struct i915_address_space * const vm,const u64 idx)133 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
134 {
135 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
136 
137 	if (vm->top == 2)
138 		return ppgtt->pd;
139 	else
140 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
141 }
142 
143 static inline struct i915_page_directory *
gen8_pdp_for_page_address(struct i915_address_space * const vm,const u64 addr)144 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
145 {
146 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
147 }
148 
__gen8_ppgtt_cleanup(struct i915_address_space * vm,struct i915_page_directory * pd,int count,int lvl)149 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
150 				 struct i915_page_directory *pd,
151 				 int count, int lvl)
152 {
153 	if (lvl) {
154 		void **pde = pd->entry;
155 
156 		do {
157 			if (!*pde)
158 				continue;
159 
160 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
161 		} while (pde++, --count);
162 		spin_lock_destroy(&pd->lock);
163 	}
164 
165 	free_px(vm, pd);
166 }
167 
gen8_ppgtt_cleanup(struct i915_address_space * vm)168 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
169 {
170 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
171 
172 	if (intel_vgpu_active(vm->i915))
173 		gen8_ppgtt_notify_vgt(ppgtt, false);
174 
175 	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
176 	free_scratch(vm);
177 }
178 
__gen8_ppgtt_clear(struct i915_address_space * const vm,struct i915_page_directory * const pd,u64 start,const u64 end,int lvl)179 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
180 			      struct i915_page_directory * const pd,
181 			      u64 start, const u64 end, int lvl)
182 {
183 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
184 	unsigned int idx, len;
185 
186 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
187 
188 	len = gen8_pd_range(start, end, lvl--, &idx);
189 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
190 	    __func__, vm, lvl + 1, start, end,
191 	    idx, len, atomic_read(px_used(pd)));
192 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
193 
194 	do {
195 		struct i915_page_table *pt = pd->entry[idx];
196 
197 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
198 		    gen8_pd_contains(start, end, lvl)) {
199 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
200 			    __func__, vm, lvl + 1, idx, start, end);
201 			clear_pd_entry(pd, idx, scratch);
202 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
203 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
204 			continue;
205 		}
206 
207 		if (lvl) {
208 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
209 						   start, end, lvl);
210 		} else {
211 			unsigned int count;
212 			u64 *vaddr;
213 
214 			count = gen8_pt_count(start, end);
215 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
216 			    __func__, vm, lvl, start, end,
217 			    gen8_pd_index(start, 0), count,
218 			    atomic_read(&pt->used));
219 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
220 
221 			vaddr = kmap_atomic_px(pt);
222 			memset64(vaddr + gen8_pd_index(start, 0),
223 				 vm->scratch[0].encode,
224 				 count);
225 			kunmap_atomic(vaddr);
226 
227 			atomic_sub(count, &pt->used);
228 			start += count;
229 		}
230 
231 		if (release_pd_entry(pd, idx, pt, scratch)) {
232 			if (lvl)
233 				spin_lock_destroy(&as_pd(pt)->lock);
234 			free_px(vm, pt);
235 		}
236 	} while (idx++, --len);
237 
238 	return start;
239 }
240 
gen8_ppgtt_clear(struct i915_address_space * vm,u64 start,u64 length)241 static void gen8_ppgtt_clear(struct i915_address_space *vm,
242 			     u64 start, u64 length)
243 {
244 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
245 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
246 	GEM_BUG_ON(range_overflows(start, length, vm->total));
247 
248 	start >>= GEN8_PTE_SHIFT;
249 	length >>= GEN8_PTE_SHIFT;
250 	GEM_BUG_ON(length == 0);
251 
252 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
253 			   start, start + length, vm->top);
254 }
255 
__gen8_ppgtt_alloc(struct i915_address_space * const vm,struct i915_page_directory * const pd,u64 * const start,const u64 end,int lvl)256 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
257 			      struct i915_page_directory * const pd,
258 			      u64 * const start, const u64 end, int lvl)
259 {
260 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
261 	struct i915_page_table *alloc = NULL;
262 	unsigned int idx, len;
263 	int ret = 0;
264 
265 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
266 
267 	len = gen8_pd_range(*start, end, lvl--, &idx);
268 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
269 	    __func__, vm, lvl + 1, *start, end,
270 	    idx, len, atomic_read(px_used(pd)));
271 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
272 
273 	spin_lock(&pd->lock);
274 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
275 	do {
276 		struct i915_page_table *pt = pd->entry[idx];
277 
278 		if (!pt) {
279 			spin_unlock(&pd->lock);
280 
281 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
282 			    __func__, vm, lvl + 1, idx);
283 
284 			pt = fetch_and_zero(&alloc);
285 			if (lvl) {
286 				if (!pt) {
287 					pt = &alloc_pd(vm)->pt;
288 					if (IS_ERR(pt)) {
289 						ret = PTR_ERR(pt);
290 						goto out;
291 					}
292 				}
293 
294 				fill_px(pt, vm->scratch[lvl].encode);
295 			} else {
296 				if (!pt) {
297 					pt = alloc_pt(vm);
298 					if (IS_ERR(pt)) {
299 						ret = PTR_ERR(pt);
300 						goto out;
301 					}
302 				}
303 
304 				if (intel_vgpu_active(vm->i915) ||
305 				    gen8_pt_count(*start, end) < I915_PDES)
306 					fill_px(pt, vm->scratch[lvl].encode);
307 			}
308 
309 			spin_lock(&pd->lock);
310 			if (likely(!pd->entry[idx]))
311 				set_pd_entry(pd, idx, pt);
312 			else
313 				alloc = pt, pt = pd->entry[idx];
314 		}
315 
316 		if (lvl) {
317 			atomic_inc(&pt->used);
318 			spin_unlock(&pd->lock);
319 
320 			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
321 						 start, end, lvl);
322 			if (unlikely(ret)) {
323 				if (release_pd_entry(pd, idx, pt, scratch))
324 					free_px(vm, pt);
325 				goto out;
326 			}
327 
328 			spin_lock(&pd->lock);
329 			atomic_dec(&pt->used);
330 			GEM_BUG_ON(!atomic_read(&pt->used));
331 		} else {
332 			unsigned int count = gen8_pt_count(*start, end);
333 
334 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
335 			    __func__, vm, lvl, *start, end,
336 			    gen8_pd_index(*start, 0), count,
337 			    atomic_read(&pt->used));
338 
339 			atomic_add(count, &pt->used);
340 			/* All other pdes may be simultaneously removed */
341 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
342 			*start += count;
343 		}
344 	} while (idx++, --len);
345 	spin_unlock(&pd->lock);
346 out:
347 	if (alloc) {
348 		if (lvl) {
349 			struct i915_page_directory *allocpd =
350 			    container_of(alloc, struct i915_page_directory,
351 				pt);
352 			spin_lock_destroy(&allocpd->lock);
353 		}
354 		free_px(vm, alloc);
355 	}
356 	return ret;
357 }
358 
gen8_ppgtt_alloc(struct i915_address_space * vm,u64 start,u64 length)359 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
360 			    u64 start, u64 length)
361 {
362 	u64 from;
363 	int err;
364 
365 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
366 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
367 	GEM_BUG_ON(range_overflows(start, length, vm->total));
368 
369 	start >>= GEN8_PTE_SHIFT;
370 	length >>= GEN8_PTE_SHIFT;
371 	GEM_BUG_ON(length == 0);
372 	from = start;
373 
374 	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
375 				 &start, start + length, vm->top);
376 	if (unlikely(err && from != start))
377 		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
378 				   from, start, vm->top);
379 
380 	return err;
381 }
382 
383 static __always_inline u64
gen8_ppgtt_insert_pte(struct i915_ppgtt * ppgtt,struct i915_page_directory * pdp,struct sgt_dma * iter,u64 idx,enum i915_cache_level cache_level,u32 flags)384 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
385 		      struct i915_page_directory *pdp,
386 		      struct sgt_dma *iter,
387 		      u64 idx,
388 		      enum i915_cache_level cache_level,
389 		      u32 flags)
390 {
391 	struct i915_page_directory *pd;
392 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
393 	gen8_pte_t *vaddr;
394 
395 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
396 	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
397 	do {
398 #ifdef __NetBSD__
399 		KASSERT(iter->seg < iter->map->dm_nsegs);
400 		KASSERT((iter->off & (I915_GTT_PAGE_SIZE - 1)) == 0);
401 		const bus_dma_segment_t *seg = &iter->map->dm_segs[iter->seg];
402 		KASSERT((seg->ds_addr & (I915_GTT_PAGE_SIZE - 1)) == 0);
403 		KASSERT((seg->ds_len & (I915_GTT_PAGE_SIZE - 1)) == 0);
404 		KASSERT(iter->off <= seg->ds_len - I915_GTT_PAGE_SIZE);
405 		vaddr[gen8_pd_index(idx, 0)] =
406 		    pte_encode | (seg->ds_addr + iter->off);
407 		iter->off += I915_GTT_PAGE_SIZE;
408 		if (iter->off >= seg->ds_len) {
409 			GEM_BUG_ON(iter->off > seg->ds_len);
410 			iter->off = 0;
411 			if (++iter->seg >= iter->map->dm_nsegs) {
412 				GEM_BUG_ON(iter->seg > iter->map->dm_nsegs);
413 				idx = 0;
414 				break;
415 			}
416 		}
417 #else
418 		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
419 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
420 
421 		iter->dma += I915_GTT_PAGE_SIZE;
422 		if (iter->dma >= iter->max) {
423 			iter->sg = __sg_next(iter->sg);
424 			if (!iter->sg) {
425 				idx = 0;
426 				break;
427 			}
428 
429 			iter->dma = sg_dma_address(iter->sg);
430 			iter->max = iter->dma + iter->sg->length;
431 		}
432 #endif
433 
434 		if (gen8_pd_index(++idx, 0) == 0) {
435 			if (gen8_pd_index(idx, 1) == 0) {
436 				/* Limited by sg length for 3lvl */
437 				if (gen8_pd_index(idx, 2) == 0)
438 					break;
439 
440 				pd = pdp->entry[gen8_pd_index(idx, 2)];
441 			}
442 
443 			kunmap_atomic(vaddr);
444 			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
445 		}
446 	} while (1);
447 	kunmap_atomic(vaddr);
448 
449 	return idx;
450 }
451 
gen8_ppgtt_insert_huge(struct i915_vma * vma,struct sgt_dma * iter,enum i915_cache_level cache_level,u32 flags)452 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
453 				   struct sgt_dma *iter,
454 				   enum i915_cache_level cache_level,
455 				   u32 flags)
456 {
457 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
458 	u64 start = vma->node.start;
459 #ifdef __NetBSD__
460 	bus_size_t rem = iter->map->dm_segs[iter->seg].ds_len - iter->off;
461 #else
462 	dma_addr_t rem = iter->sg->length;
463 #endif
464 
465 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
466 
467 	do {
468 		struct i915_page_directory * const pdp =
469 			gen8_pdp_for_page_address(vma->vm, start);
470 		struct i915_page_directory * const pd =
471 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
472 		gen8_pte_t encode = pte_encode;
473 		unsigned int maybe_64K = -1;
474 		unsigned int page_size;
475 		gen8_pte_t *vaddr;
476 		u16 index;
477 
478 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
479 #ifdef __NetBSD__
480 		    IS_ALIGNED((iter->map->dm_segs[iter->seg].ds_addr +
481 			    iter->off),
482 			I915_GTT_PAGE_SIZE_2M) &&
483 #else
484 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
485 #endif
486 		    rem >= I915_GTT_PAGE_SIZE_2M &&
487 		    !__gen8_pte_index(start, 0)) {
488 			index = __gen8_pte_index(start, 1);
489 			encode |= GEN8_PDE_PS_2M;
490 			page_size = I915_GTT_PAGE_SIZE_2M;
491 
492 			vaddr = kmap_atomic_px(pd);
493 		} else {
494 			struct i915_page_table *pt =
495 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
496 
497 			index = __gen8_pte_index(start, 0);
498 			page_size = I915_GTT_PAGE_SIZE;
499 
500 			if (!index &&
501 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
502 #ifdef __NetBSD__
503 			    IS_ALIGNED((iter->map->dm_segs[iter->seg].ds_addr
504 				    + iter->off),
505 				I915_GTT_PAGE_SIZE_64K) &&
506 #else
507 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
508 #endif
509 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
510 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
511 				maybe_64K = __gen8_pte_index(start, 1);
512 
513 			vaddr = kmap_atomic_px(pt);
514 		}
515 
516 		do {
517 #ifdef __NetBSD__
518 			GEM_BUG_ON((iter->map->dm_segs[iter->seg].ds_len -
519 				iter->off) < page_size);
520 			vaddr[index++] = encode |
521 			    (iter->map->dm_segs[iter->seg].ds_addr
522 				+ iter->off);
523 #else
524 			GEM_BUG_ON(iter->sg->length < page_size);
525 			vaddr[index++] = encode | iter->dma;
526 #endif
527 
528 			start += page_size;
529 #ifdef __NetBSD__
530 			iter->off += page_size;
531 			rem -= page_size;
532 			if (iter->off >= iter->map->dm_segs[iter->seg].ds_len) {
533 				GEM_BUG_ON(iter->off >
534 				    iter->map->dm_segs[iter->seg].ds_len);
535 				iter->off = 0;
536 				if (++iter->seg >= iter->map->dm_nsegs) {
537 					GEM_BUG_ON(iter->seg >
538 					    iter->map->dm_nsegs);
539 					break;
540 				}
541 				const bus_dma_segment_t *seg =
542 				    &iter->map->dm_segs[iter->seg];
543 				if (maybe_64K && index < I915_PDES &&
544 				    !(IS_ALIGNED((seg->ds_addr + iter->off),
545 					    I915_GTT_PAGE_SIZE_64K) &&
546 					(IS_ALIGNED(rem,
547 					    I915_GTT_PAGE_SIZE_64K) ||
548 					    rem >= ((I915_PDES - index) * I915_GTT_PAGE_SIZE))))
549 					maybe_64K = false;
550 				if (unlikely(!IS_ALIGNED((seg->ds_addr +
551 						iter->off), page_size)))
552 					break;
553 			}
554 #else
555 			iter->dma += page_size;
556 			rem -= page_size;
557 			if (iter->dma >= iter->max) {
558 				iter->sg = __sg_next(iter->sg);
559 				if (!iter->sg)
560 					break;
561 
562 				rem = iter->sg->length;
563 				iter->dma = sg_dma_address(iter->sg);
564 				iter->max = iter->dma + rem;
565 
566 				if (maybe_64K != -1 && index < I915_PDES &&
567 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
568 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
569 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
570 					maybe_64K = -1;
571 
572 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
573 					break;
574 			}
575 #endif
576 		} while (rem >= page_size && index < I915_PDES);
577 
578 		kunmap_atomic(vaddr);
579 
580 		/*
581 		 * Is it safe to mark the 2M block as 64K? -- Either we have
582 		 * filled whole page-table with 64K entries, or filled part of
583 		 * it and have reached the end of the sg table and we have
584 		 * enough padding.
585 		 */
586 		if (maybe_64K != -1 &&
587 		    (index == I915_PDES ||
588 		     (i915_vm_has_scratch_64K(vma->vm) &&
589 #ifdef __NetBSD__
590 		      iter->seg == iter->map->dm_nsegs &&
591 #else
592 		      !iter->sg &&
593 #endif
594 		      IS_ALIGNED(vma->node.start +
595 					      vma->node.size,
596 					      I915_GTT_PAGE_SIZE_2M)))) {
597 			vaddr = kmap_atomic_px(pd);
598 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
599 			kunmap_atomic(vaddr);
600 			page_size = I915_GTT_PAGE_SIZE_64K;
601 
602 			/*
603 			 * We write all 4K page entries, even when using 64K
604 			 * pages. In order to verify that the HW isn't cheating
605 			 * by using the 4K PTE instead of the 64K PTE, we want
606 			 * to remove all the surplus entries. If the HW skipped
607 			 * the 64K PTE, it will read/write into the scratch page
608 			 * instead - which we detect as missing results during
609 			 * selftests.
610 			 */
611 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
612 				u16 i;
613 
614 				encode = vma->vm->scratch[0].encode;
615 				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
616 
617 				for (i = 1; i < index; i += 16)
618 					memset64(vaddr + i, encode, 15);
619 
620 				kunmap_atomic(vaddr);
621 			}
622 		}
623 
624 		vma->page_sizes.gtt |= page_size;
625 	}
626 #ifdef __NetBSD__
627 	while (iter->seg < iter->map->dm_nsegs);
628 #else
629 	while (iter->sg);
630 #endif
631 }
632 
gen8_ppgtt_insert(struct i915_address_space * vm,struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags)633 static void gen8_ppgtt_insert(struct i915_address_space *vm,
634 			      struct i915_vma *vma,
635 			      enum i915_cache_level cache_level,
636 			      u32 flags)
637 {
638 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
639 	struct sgt_dma iter = sgt_dma(vma);
640 
641 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
642 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
643 	} else  {
644 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
645 
646 		do {
647 			struct i915_page_directory * const pdp =
648 				gen8_pdp_for_page_index(vm, idx);
649 
650 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
651 						    cache_level, flags);
652 		} while (idx);
653 
654 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
655 	}
656 }
657 
gen8_init_scratch(struct i915_address_space * vm)658 static int gen8_init_scratch(struct i915_address_space *vm)
659 {
660 	int ret;
661 	int i;
662 
663 	/*
664 	 * If everybody agrees to not to write into the scratch page,
665 	 * we can reuse it for all vm, keeping contexts and processes separate.
666 	 */
667 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
668 		struct i915_address_space *clone = vm->gt->vm;
669 
670 		GEM_BUG_ON(!clone->has_read_only);
671 
672 		vm->scratch_order = clone->scratch_order;
673 		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
674 		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
675 		return 0;
676 	}
677 
678 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
679 	if (ret)
680 		return ret;
681 
682 	vm->scratch[0].encode =
683 		gen8_pte_encode(px_dma(&vm->scratch[0]),
684 				I915_CACHE_LLC, vm->has_read_only);
685 
686 	for (i = 1; i <= vm->top; i++) {
687 		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
688 			goto free_scratch;
689 
690 		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
691 		vm->scratch[i].encode =
692 			gen8_pde_encode(px_dma(&vm->scratch[i]),
693 					I915_CACHE_LLC);
694 	}
695 
696 	return 0;
697 
698 free_scratch:
699 	free_scratch(vm);
700 	return -ENOMEM;
701 }
702 
gen8_preallocate_top_level_pdp(struct i915_ppgtt * ppgtt)703 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
704 {
705 	struct i915_address_space *vm = &ppgtt->vm;
706 	struct i915_page_directory *pd = ppgtt->pd;
707 	unsigned int idx;
708 
709 	GEM_BUG_ON(vm->top != 2);
710 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
711 
712 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
713 		struct i915_page_directory *pde;
714 
715 		pde = alloc_pd(vm);
716 		if (IS_ERR(pde))
717 			return PTR_ERR(pde);
718 
719 		fill_px(pde, vm->scratch[1].encode);
720 		set_pd_entry(pd, idx, pde);
721 		atomic_inc(px_used(pde)); /* keep pinned */
722 	}
723 	wmb();
724 
725 	return 0;
726 }
727 
728 static struct i915_page_directory *
gen8_alloc_top_pd(struct i915_address_space * vm)729 gen8_alloc_top_pd(struct i915_address_space *vm)
730 {
731 	const unsigned int count = gen8_pd_top_count(vm);
732 	struct i915_page_directory *pd;
733 
734 	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
735 
736 	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
737 	if (unlikely(!pd))
738 		return ERR_PTR(-ENOMEM);
739 
740 	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
741 		spin_lock_destroy(&pd->lock);
742 		kfree(pd);
743 		return ERR_PTR(-ENOMEM);
744 	}
745 
746 	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
747 	atomic_inc(px_used(pd)); /* mark as pinned */
748 	return pd;
749 }
750 
751 /*
752  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
753  * with a net effect resembling a 2-level page table in normal x86 terms. Each
754  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
755  * space.
756  *
757  */
gen8_ppgtt_create(struct intel_gt * gt)758 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
759 {
760 	struct i915_ppgtt *ppgtt;
761 	int err;
762 
763 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
764 	if (!ppgtt)
765 		return ERR_PTR(-ENOMEM);
766 
767 	ppgtt_init(ppgtt, gt);
768 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
769 
770 	/*
771 	 * From bdw, there is hw support for read-only pages in the PPGTT.
772 	 *
773 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
774 	 * for now.
775 	 *
776 	 * Gen12 has inherited the same read-only fault issue from gen11.
777 	 */
778 	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
779 
780 	/*
781 	 * There are only few exceptions for gen >=6. chv and bxt.
782 	 * And we are not sure about the latter so play safe for now.
783 	 */
784 	if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
785 		ppgtt->vm.pt_kmap_wc = true;
786 
787 	err = gen8_init_scratch(&ppgtt->vm);
788 	if (err)
789 		goto err_free;
790 
791 	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
792 	if (IS_ERR(ppgtt->pd)) {
793 		err = PTR_ERR(ppgtt->pd);
794 		goto err_free_scratch;
795 	}
796 
797 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
798 		err = gen8_preallocate_top_level_pdp(ppgtt);
799 		if (err)
800 			goto err_free_pd;
801 	}
802 
803 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
804 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
805 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
806 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
807 
808 	if (intel_vgpu_active(gt->i915))
809 		gen8_ppgtt_notify_vgt(ppgtt, true);
810 
811 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
812 
813 	return ppgtt;
814 
815 err_free_pd:
816 	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
817 			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
818 err_free_scratch:
819 	free_scratch(&ppgtt->vm);
820 err_free:
821 	kfree(ppgtt);
822 	return ERR_PTR(err);
823 }
824