xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_vmm.c (revision 798b8d11ecd8257a8e35c3396210f98abf3d9ade)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_vmm.c,v 1.4 2021/12/19 11:34:46 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_vmm.c,v 1.4 2021/12/19 11:34:46 riastradh Exp $");
26 
27 #define NVKM_VMM_LEVELS_MAX 5
28 #include "vmm.h"
29 
30 #include <subdev/fb.h>
31 
32 #include <linux/nbsd-namespace.h>
33 
34 static void
nvkm_vmm_pt_del(struct nvkm_vmm_pt ** ppgt)35 nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
36 {
37 	struct nvkm_vmm_pt *pgt = *ppgt;
38 	if (pgt) {
39 		kvfree(pgt->pde);
40 		kfree(pgt);
41 		*ppgt = NULL;
42 	}
43 }
44 
45 
46 static struct nvkm_vmm_pt *
nvkm_vmm_pt_new(const struct nvkm_vmm_desc * desc,bool sparse,const struct nvkm_vmm_page * page)47 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
48 		const struct nvkm_vmm_page *page)
49 {
50 	const u32 pten = 1 << desc->bits;
51 	struct nvkm_vmm_pt *pgt;
52 	u32 lpte = 0;
53 
54 	if (desc->type > PGT) {
55 		if (desc->type == SPT) {
56 			const struct nvkm_vmm_desc *pair = page[-1].desc;
57 			lpte = pten >> (desc->bits - pair->bits);
58 		} else {
59 			lpte = pten;
60 		}
61 	}
62 
63 	if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
64 		return NULL;
65 	pgt->page = page ? page->shift : 0;
66 	pgt->sparse = sparse;
67 
68 	if (desc->type == PGD) {
69 		pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
70 		if (!pgt->pde) {
71 			kfree(pgt);
72 			return NULL;
73 		}
74 	}
75 
76 	return pgt;
77 }
78 
79 struct nvkm_vmm_iter {
80 	const struct nvkm_vmm_page *page;
81 	const struct nvkm_vmm_desc *desc;
82 	struct nvkm_vmm *vmm;
83 	u64 cnt;
84 	u16 max, lvl;
85 	u32 pte[NVKM_VMM_LEVELS_MAX];
86 	struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
87 	int flush;
88 };
89 
90 #ifdef CONFIG_NOUVEAU_DEBUG_MMU
91 static const char *
nvkm_vmm_desc_type(const struct nvkm_vmm_desc * desc)92 nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
93 {
94 	switch (desc->type) {
95 	case PGD: return "PGD";
96 	case PGT: return "PGT";
97 	case SPT: return "SPT";
98 	case LPT: return "LPT";
99 	default:
100 		return "UNKNOWN";
101 	}
102 }
103 
104 static void
nvkm_vmm_trace(struct nvkm_vmm_iter * it,char * buf)105 nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
106 {
107 	int lvl;
108 	for (lvl = it->max; lvl >= 0; lvl--) {
109 		if (lvl >= it->lvl)
110 			buf += sprintf(buf,  "%05x:", it->pte[lvl]);
111 		else
112 			buf += sprintf(buf, "xxxxx:");
113 	}
114 }
115 
116 #define TRA(i,f,a...) do {                                                     \
117 	char _buf[NVKM_VMM_LEVELS_MAX * 7];                                    \
118 	struct nvkm_vmm_iter *_it = (i);                                       \
119 	nvkm_vmm_trace(_it, _buf);                                             \
120 	VMM_TRACE(_it->vmm, "%s "f, _buf, ##a);                                \
121 } while(0)
122 #else
123 #define TRA(i,f,a...)
124 #endif
125 
126 static inline void
nvkm_vmm_flush_mark(struct nvkm_vmm_iter * it)127 nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
128 {
129 	it->flush = min(it->flush, it->max - it->lvl);
130 }
131 
132 static inline void
nvkm_vmm_flush(struct nvkm_vmm_iter * it)133 nvkm_vmm_flush(struct nvkm_vmm_iter *it)
134 {
135 	if (it->flush != NVKM_VMM_LEVELS_MAX) {
136 		if (it->vmm->func->flush) {
137 			TRA(it, "flush: %d", it->flush);
138 			it->vmm->func->flush(it->vmm, it->flush);
139 		}
140 		it->flush = NVKM_VMM_LEVELS_MAX;
141 	}
142 }
143 
144 static void
nvkm_vmm_unref_pdes(struct nvkm_vmm_iter * it)145 nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
146 {
147 	const struct nvkm_vmm_desc *desc = it->desc;
148 	const int type = desc[it->lvl].type == SPT;
149 	struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
150 	struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
151 	struct nvkm_mmu_pt *pt = pgt->pt[type];
152 	struct nvkm_vmm *vmm = it->vmm;
153 	u32 pdei = it->pte[it->lvl + 1];
154 
155 	/* Recurse up the tree, unreferencing/destroying unneeded PDs. */
156 	it->lvl++;
157 	if (--pgd->refs[0]) {
158 		const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
159 		/* PD has other valid PDEs, so we need a proper update. */
160 		TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
161 		pgt->pt[type] = NULL;
162 		if (!pgt->refs[!type]) {
163 			/* PDE no longer required. */
164 			if (pgd->pt[0]) {
165 				if (pgt->sparse) {
166 					func->sparse(vmm, pgd->pt[0], pdei, 1);
167 					pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
168 				} else {
169 					func->unmap(vmm, pgd->pt[0], pdei, 1);
170 					pgd->pde[pdei] = NULL;
171 				}
172 			} else {
173 				/* Special handling for Tesla-class GPUs,
174 				 * where there's no central PD, but each
175 				 * instance has its own embedded PD.
176 				 */
177 				func->pde(vmm, pgd, pdei);
178 				pgd->pde[pdei] = NULL;
179 			}
180 		} else {
181 			/* PDE was pointing at dual-PTs and we're removing
182 			 * one of them, leaving the other in place.
183 			 */
184 			func->pde(vmm, pgd, pdei);
185 		}
186 
187 		/* GPU may have cached the PTs, flush before freeing. */
188 		nvkm_vmm_flush_mark(it);
189 		nvkm_vmm_flush(it);
190 	} else {
191 		/* PD has no valid PDEs left, so we can just destroy it. */
192 		nvkm_vmm_unref_pdes(it);
193 	}
194 
195 	/* Destroy PD/PT. */
196 	TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
197 	nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
198 	if (!pgt->refs[!type])
199 		nvkm_vmm_pt_del(&pgt);
200 	it->lvl--;
201 }
202 
203 static void
nvkm_vmm_unref_sptes(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgt,const struct nvkm_vmm_desc * desc,u32 ptei,u32 ptes)204 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
205 		     const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
206 {
207 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
208 	const u32 sptb = desc->bits - pair->bits;
209 	const u32 sptn = 1 << sptb;
210 	struct nvkm_vmm *vmm = it->vmm;
211 	u32 spti = ptei & (sptn - 1), lpti, pteb;
212 
213 	/* Determine how many SPTEs are being touched under each LPTE,
214 	 * and drop reference counts.
215 	 */
216 	for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
217 		const u32 pten = min(sptn - spti, ptes);
218 		pgt->pte[lpti] -= pten;
219 		ptes -= pten;
220 	}
221 
222 	/* We're done here if there's no corresponding LPT. */
223 	if (!pgt->refs[0])
224 		return;
225 
226 	for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
227 		/* Skip over any LPTEs that still have valid SPTEs. */
228 		if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
229 			for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
230 				if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
231 					break;
232 			}
233 			continue;
234 		}
235 
236 		/* As there's no more non-UNMAPPED SPTEs left in the range
237 		 * covered by a number of LPTEs, the LPTEs once again take
238 		 * control over their address range.
239 		 *
240 		 * Determine how many LPTEs need to transition state.
241 		 */
242 		pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
243 		for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
244 			if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
245 				break;
246 			pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
247 		}
248 
249 		if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
250 			TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
251 			pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
252 		} else
253 		if (pair->func->invalid) {
254 			/* If the MMU supports it, restore the LPTE to the
255 			 * INVALID state to tell the MMU there is no point
256 			 * trying to fetch the corresponding SPTEs.
257 			 */
258 			TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
259 			pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
260 		}
261 	}
262 }
263 
264 static bool
nvkm_vmm_unref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)265 nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
266 {
267 	const struct nvkm_vmm_desc *desc = it->desc;
268 	const int type = desc->type == SPT;
269 	struct nvkm_vmm_pt *pgt = it->pt[0];
270 	bool dma;
271 
272 	if (pfn) {
273 		/* Need to clear PTE valid bits before we dma_unmap_page(). */
274 		dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
275 		if (dma) {
276 			/* GPU may have cached the PT, flush before unmap. */
277 			nvkm_vmm_flush_mark(it);
278 			nvkm_vmm_flush(it);
279 			desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
280 		}
281 	}
282 
283 	/* Drop PTE references. */
284 	pgt->refs[type] -= ptes;
285 
286 	/* Dual-PTs need special handling, unless PDE becoming invalid. */
287 	if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
288 		nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
289 
290 	/* PT no longer neeed?  Destroy it. */
291 	if (!pgt->refs[type]) {
292 		it->lvl++;
293 		TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
294 		it->lvl--;
295 		nvkm_vmm_unref_pdes(it);
296 		return false; /* PTE writes for unmap() not necessary. */
297 	}
298 
299 	return true;
300 }
301 
302 static void
nvkm_vmm_ref_sptes(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgt,const struct nvkm_vmm_desc * desc,u32 ptei,u32 ptes)303 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
304 		   const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
305 {
306 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
307 	const u32 sptb = desc->bits - pair->bits;
308 	const u32 sptn = 1 << sptb;
309 	struct nvkm_vmm *vmm = it->vmm;
310 	u32 spti = ptei & (sptn - 1), lpti, pteb;
311 
312 	/* Determine how many SPTEs are being touched under each LPTE,
313 	 * and increase reference counts.
314 	 */
315 	for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
316 		const u32 pten = min(sptn - spti, ptes);
317 		pgt->pte[lpti] += pten;
318 		ptes -= pten;
319 	}
320 
321 	/* We're done here if there's no corresponding LPT. */
322 	if (!pgt->refs[0])
323 		return;
324 
325 	for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
326 		/* Skip over any LPTEs that already have valid SPTEs. */
327 		if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
328 			for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
329 				if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
330 					break;
331 			}
332 			continue;
333 		}
334 
335 		/* As there are now non-UNMAPPED SPTEs in the range covered
336 		 * by a number of LPTEs, we need to transfer control of the
337 		 * address range to the SPTEs.
338 		 *
339 		 * Determine how many LPTEs need to transition state.
340 		 */
341 		pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
342 		for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
343 			if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
344 				break;
345 			pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
346 		}
347 
348 		if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
349 			const u32 spti = pteb * sptn;
350 			const u32 sptc = ptes * sptn;
351 			/* The entire LPTE is marked as sparse, we need
352 			 * to make sure that the SPTEs are too.
353 			 */
354 			TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
355 			desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
356 			/* Sparse LPTEs prevent SPTEs from being accessed. */
357 			TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
358 			pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
359 		} else
360 		if (pair->func->invalid) {
361 			/* MMU supports blocking SPTEs by marking an LPTE
362 			 * as INVALID.  We need to reverse that here.
363 			 */
364 			TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
365 			pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
366 		}
367 	}
368 }
369 
370 static bool
nvkm_vmm_ref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)371 nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
372 {
373 	const struct nvkm_vmm_desc *desc = it->desc;
374 	const int type = desc->type == SPT;
375 	struct nvkm_vmm_pt *pgt = it->pt[0];
376 
377 	/* Take PTE references. */
378 	pgt->refs[type] += ptes;
379 
380 	/* Dual-PTs need special handling. */
381 	if (desc->type == SPT)
382 		nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
383 
384 	return true;
385 }
386 
387 static void
nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc * desc,struct nvkm_vmm_pt * pgt,u32 ptei,u32 ptes)388 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
389 		     struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
390 {
391 	if (desc->type == PGD) {
392 		while (ptes--)
393 			pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
394 	} else
395 	if (desc->type == LPT) {
396 		memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
397 	}
398 }
399 
400 static bool
nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)401 nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
402 {
403 	struct nvkm_vmm_pt *pt = it->pt[0];
404 	if (it->desc->type == PGD)
405 		memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
406 	else
407 	if (it->desc->type == LPT)
408 		memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
409 	return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
410 }
411 
412 static bool
nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)413 nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
414 {
415 	nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
416 	return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
417 }
418 
419 static bool
nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgd,u32 pdei)420 nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
421 {
422 	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
423 	const int type = desc->type == SPT;
424 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
425 	const bool zero = !pgt->sparse && !desc->func->invalid;
426 	struct nvkm_vmm *vmm = it->vmm;
427 	struct nvkm_mmu *mmu = vmm->mmu;
428 	struct nvkm_mmu_pt *pt;
429 	u32 pten = 1 << desc->bits;
430 	u32 pteb, ptei, ptes;
431 	u32 size = desc->size * pten;
432 
433 	pgd->refs[0]++;
434 
435 	pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
436 	if (!pgt->pt[type]) {
437 		it->lvl--;
438 		nvkm_vmm_unref_pdes(it);
439 		return false;
440 	}
441 
442 	if (zero)
443 		goto done;
444 
445 	pt = pgt->pt[type];
446 
447 	if (desc->type == LPT && pgt->refs[1]) {
448 		/* SPT already exists covering the same range as this LPT,
449 		 * which means we need to be careful that any LPTEs which
450 		 * overlap valid SPTEs are unmapped as opposed to invalid
451 		 * or sparse, which would prevent the MMU from looking at
452 		 * the SPTEs on some GPUs.
453 		 */
454 		for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
455 			bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
456 			for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
457 				bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
458 				if (spte != next)
459 					break;
460 			}
461 
462 			if (!spte) {
463 				if (pgt->sparse)
464 					desc->func->sparse(vmm, pt, pteb, ptes);
465 				else
466 					desc->func->invalid(vmm, pt, pteb, ptes);
467 				memset(&pgt->pte[pteb], 0x00, ptes);
468 			} else {
469 				desc->func->unmap(vmm, pt, pteb, ptes);
470 				while (ptes--)
471 					pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
472 			}
473 		}
474 	} else {
475 		if (pgt->sparse) {
476 			nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
477 			desc->func->sparse(vmm, pt, 0, pten);
478 		} else {
479 			desc->func->invalid(vmm, pt, 0, pten);
480 		}
481 	}
482 
483 done:
484 	TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
485 	it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
486 	nvkm_vmm_flush_mark(it);
487 	return true;
488 }
489 
490 static bool
nvkm_vmm_ref_swpt(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgd,u32 pdei)491 nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
492 {
493 	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
494 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
495 
496 	pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
497 	if (!pgt) {
498 		if (!pgd->refs[0])
499 			nvkm_vmm_unref_pdes(it);
500 		return false;
501 	}
502 
503 	pgd->pde[pdei] = pgt;
504 	return true;
505 }
506 
507 static inline u64
nvkm_vmm_iter(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,const char * name,bool ref,bool pfn,bool (* REF_PTES)(struct nvkm_vmm_iter *,bool pfn,u32,u32),nvkm_vmm_pte_func MAP_PTES,struct nvkm_vmm_map * map,nvkm_vmm_pxe_func CLR_PTES)508 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
509 	      u64 addr, u64 size, const char *name, bool ref, bool pfn,
510 	      bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
511 	      nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
512 	      nvkm_vmm_pxe_func CLR_PTES)
513 {
514 	const struct nvkm_vmm_desc *desc = page->desc;
515 	struct nvkm_vmm_iter it;
516 	u64 bits = addr >> page->shift;
517 
518 	it.page = page;
519 	it.desc = desc;
520 	it.vmm = vmm;
521 	it.cnt = size >> page->shift;
522 	it.flush = NVKM_VMM_LEVELS_MAX;
523 
524 	/* Deconstruct address into PTE indices for each mapping level. */
525 	for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
526 		it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
527 		bits >>= desc[it.lvl].bits;
528 	}
529 	it.max = --it.lvl;
530 	it.pt[it.max] = vmm->pd;
531 
532 	it.lvl = 0;
533 	TRA(&it, "%s: %016"PRIx64" %016"PRIx64" %d %lld PTEs", name,
534 	         addr, size, page->shift, it.cnt);
535 	it.lvl = it.max;
536 
537 	/* Depth-first traversal of page tables. */
538 	while (it.cnt) {
539 		struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
540 		const int type = desc->type == SPT;
541 		const u32 pten = 1 << desc->bits;
542 		const u32 ptei = it.pte[0];
543 		const u32 ptes = min_t(u64, it.cnt, pten - ptei);
544 
545 		/* Walk down the tree, finding page tables for each level. */
546 		for (; it.lvl; it.lvl--) {
547 			const u32 pdei = it.pte[it.lvl];
548 			struct nvkm_vmm_pt *pgd = pgt;
549 
550 			/* Software PT. */
551 			if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
552 				if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
553 					goto fail;
554 			}
555 			it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
556 
557 			/* Hardware PT.
558 			 *
559 			 * This is a separate step from above due to GF100 and
560 			 * newer having dual page tables at some levels, which
561 			 * are refcounted independently.
562 			 */
563 			if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
564 				if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
565 					goto fail;
566 			}
567 		}
568 
569 		/* Handle PTE updates. */
570 		if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
571 			struct nvkm_mmu_pt *pt = pgt->pt[type];
572 			if (MAP_PTES || CLR_PTES) {
573 				if (MAP_PTES)
574 					MAP_PTES(vmm, pt, ptei, ptes, map);
575 				else
576 					CLR_PTES(vmm, pt, ptei, ptes);
577 				nvkm_vmm_flush_mark(&it);
578 			}
579 		}
580 
581 		/* Walk back up the tree to the next position. */
582 		it.pte[it.lvl] += ptes;
583 		it.cnt -= ptes;
584 		if (it.cnt) {
585 			while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
586 				it.pte[it.lvl++] = 0;
587 				it.pte[it.lvl]++;
588 			}
589 		}
590 	};
591 
592 	nvkm_vmm_flush(&it);
593 	return ~0ULL;
594 
595 fail:
596 	/* Reconstruct the failure address so the caller is able to
597 	 * reverse any partially completed operations.
598 	 */
599 	addr = it.pte[it.max--];
600 	do {
601 		addr  = addr << desc[it.max].bits;
602 		addr |= it.pte[it.max];
603 	} while (it.max--);
604 
605 	return addr << page->shift;
606 }
607 
608 static void
nvkm_vmm_ptes_sparse_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)609 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
610 			 u64 addr, u64 size)
611 {
612 	nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
613 		      nvkm_vmm_sparse_unref_ptes, NULL, NULL,
614 		      page->desc->func->invalid ?
615 		      page->desc->func->invalid : page->desc->func->unmap);
616 }
617 
618 static int
nvkm_vmm_ptes_sparse_get(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)619 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
620 			 u64 addr, u64 size)
621 {
622 	if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
623 		u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
624 					 true, false, nvkm_vmm_sparse_ref_ptes,
625 					 NULL, NULL, page->desc->func->sparse);
626 		if (fail != ~0ULL) {
627 			if ((size = fail - addr))
628 				nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
629 			return -ENOMEM;
630 		}
631 		return 0;
632 	}
633 	return -EINVAL;
634 }
635 
636 static int
nvkm_vmm_ptes_sparse(struct nvkm_vmm * vmm,u64 addr,u64 size,bool ref)637 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
638 {
639 	const struct nvkm_vmm_page *page = vmm->func->page;
640 	int m = 0, i;
641 	u64 start = addr;
642 	u64 block;
643 
644 	while (size) {
645 		/* Limit maximum page size based on remaining size. */
646 		while (size < (1ULL << page[m].shift))
647 			m++;
648 		i = m;
649 
650 		/* Find largest page size suitable for alignment. */
651 		while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
652 			i++;
653 
654 		/* Determine number of PTEs at this page size. */
655 		if (i != m) {
656 			/* Limited to alignment boundary of next page size. */
657 			u64 next = 1ULL << page[i - 1].shift;
658 			u64 part = ALIGN(addr, next) - addr;
659 			if (size - part >= next)
660 				block = (part >> page[i].shift) << page[i].shift;
661 			else
662 				block = (size >> page[i].shift) << page[i].shift;
663 		} else {
664 			block = (size >> page[i].shift) << page[i].shift;
665 		}
666 
667 		/* Perform operation. */
668 		if (ref) {
669 			int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
670 			if (ret) {
671 				if ((size = addr - start))
672 					nvkm_vmm_ptes_sparse(vmm, start, size, false);
673 				return ret;
674 			}
675 		} else {
676 			nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
677 		}
678 
679 		size -= block;
680 		addr += block;
681 	}
682 
683 	return 0;
684 }
685 
686 static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)687 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
688 			u64 addr, u64 size, bool sparse, bool pfn)
689 {
690 	const struct nvkm_vmm_desc_func *func = page->desc->func;
691 	nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
692 		      false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
693 		      sparse ? func->sparse : func->invalid ? func->invalid :
694 							      func->unmap);
695 }
696 
697 static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)698 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
699 		      u64 addr, u64 size, struct nvkm_vmm_map *map,
700 		      nvkm_vmm_pte_func func)
701 {
702 	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
703 				 false, nvkm_vmm_ref_ptes, func, map, NULL);
704 	if (fail != ~0ULL) {
705 		if ((size = fail - addr))
706 			nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
707 		return -ENOMEM;
708 	}
709 	return 0;
710 }
711 
712 static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)713 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
714 		    u64 addr, u64 size, bool sparse, bool pfn)
715 {
716 	const struct nvkm_vmm_desc_func *func = page->desc->func;
717 	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
718 		      NULL, NULL, NULL,
719 		      sparse ? func->sparse : func->invalid ? func->invalid :
720 							      func->unmap);
721 }
722 
723 static void
nvkm_vmm_ptes_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)724 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
725 		  u64 addr, u64 size, struct nvkm_vmm_map *map,
726 		  nvkm_vmm_pte_func func)
727 {
728 	nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
729 		      NULL, func, map, NULL);
730 }
731 
732 static void
nvkm_vmm_ptes_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)733 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
734 		  u64 addr, u64 size)
735 {
736 	nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
737 		      nvkm_vmm_unref_ptes, NULL, NULL, NULL);
738 }
739 
740 static int
nvkm_vmm_ptes_get(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)741 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
742 		  u64 addr, u64 size)
743 {
744 	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
745 				 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
746 	if (fail != ~0ULL) {
747 		if (fail != addr)
748 			nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
749 		return -ENOMEM;
750 	}
751 	return 0;
752 }
753 
754 static inline struct nvkm_vma *
nvkm_vma_new(u64 addr,u64 size)755 nvkm_vma_new(u64 addr, u64 size)
756 {
757 	struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
758 	if (vma) {
759 		vma->addr = addr;
760 		vma->size = size;
761 		vma->page = NVKM_VMA_PAGE_NONE;
762 		vma->refd = NVKM_VMA_PAGE_NONE;
763 	}
764 	return vma;
765 }
766 
767 struct nvkm_vma *
nvkm_vma_tail(struct nvkm_vma * vma,u64 tail)768 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
769 {
770 	struct nvkm_vma *new;
771 
772 	BUG_ON(vma->size == tail);
773 
774 	if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
775 		return NULL;
776 	vma->size -= tail;
777 
778 	new->mapref = vma->mapref;
779 	new->sparse = vma->sparse;
780 	new->page = vma->page;
781 	new->refd = vma->refd;
782 	new->used = vma->used;
783 	new->part = vma->part;
784 	new->user = vma->user;
785 	new->busy = vma->busy;
786 	new->mapped = vma->mapped;
787 	list_add(&new->head, &vma->head);
788 	return new;
789 }
790 
791 #ifdef __NetBSD__
792 struct nvkm_vma_key {
793 	u64 size;
794 	u64 addr;
795 } __packed;
796 
797 static int
compare_vma_free_nodes(void * cookie,const void * va,const void * vb)798 compare_vma_free_nodes(void *cookie, const void *va, const void *vb)
799 {
800 	const struct nvkm_vma *a = va, *b = vb;
801 
802 	if (a->size < b->size)
803 		return -1;
804 	if (a->size > b->size)
805 		return +1;
806 	if (a->addr < b->addr)
807 		return -1;
808 	if (a->addr > b->addr)
809 		return +1;
810 	return 0;
811 }
812 
813 static int
compare_vma_free_key(void * cookie,const void * vn,const void * vk)814 compare_vma_free_key(void *cookie, const void *vn, const void *vk)
815 {
816 	const struct nvkm_vma *n = vn;
817 	const struct nvkm_vma_key *k = vk;
818 
819 	if (n->size < k->size)
820 		return -1;
821 	if (n->size > k->size)
822 		return +1;
823 	if (n->addr < k->addr)
824 		return -1;
825 	if (n->addr > k->addr)
826 		return +1;
827 	return 0;
828 }
829 
830 static const rb_tree_ops_t vmm_free_rb_ops = {
831 	.rbto_compare_nodes = compare_vma_free_nodes,
832 	.rbto_compare_key = compare_vma_free_key,
833 	.rbto_node_offset = offsetof(struct nvkm_vma, tree),
834 };
835 #endif
836 
837 static inline void
nvkm_vmm_free_remove(struct nvkm_vmm * vmm,struct nvkm_vma * vma)838 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
839 {
840 #ifdef __NetBSD__
841 	rb_tree_remove_node(&vmm->free, vma);
842 #else
843 	rb_erase(&vma->tree, &vmm->free);
844 #endif
845 }
846 
847 static inline void
nvkm_vmm_free_delete(struct nvkm_vmm * vmm,struct nvkm_vma * vma)848 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
849 {
850 	nvkm_vmm_free_remove(vmm, vma);
851 	list_del(&vma->head);
852 	kfree(vma);
853 }
854 
855 static void
nvkm_vmm_free_insert(struct nvkm_vmm * vmm,struct nvkm_vma * vma)856 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
857 {
858 #ifdef __NetBSD__
859 	struct nvkm_vma *collision __diagused =
860 	    rb_tree_insert_node(&vmm->free, vma);
861 	KASSERT(collision == vma);
862 #else
863 	struct rb_node **ptr = &vmm->free.rb_node;
864 	struct rb_node *parent = NULL;
865 
866 	while (*ptr) {
867 		struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
868 		parent = *ptr;
869 		if (vma->size < this->size)
870 			ptr = &parent->rb_left;
871 		else
872 		if (vma->size > this->size)
873 			ptr = &parent->rb_right;
874 		else
875 		if (vma->addr < this->addr)
876 			ptr = &parent->rb_left;
877 		else
878 		if (vma->addr > this->addr)
879 			ptr = &parent->rb_right;
880 		else
881 			BUG();
882 	}
883 
884 	rb_link_node(&vma->tree, parent, ptr);
885 	rb_insert_color(&vma->tree, &vmm->free);
886 #endif
887 }
888 
889 #ifdef __NetBSD__
890 static int
compare_vma_nodes(void * cookie,const void * va,const void * vb)891 compare_vma_nodes(void *cookie, const void *va, const void *vb)
892 {
893 	const struct nvkm_vma *a = va, *b = vb;
894 
895 	if (a->addr < b->addr)
896 		return -1;
897 	if (a->addr > b->addr)
898 		return +1;
899 	return 0;
900 }
901 
902 static int
compare_vma_key(void * cookie,const void * vn,const void * vk)903 compare_vma_key(void *cookie, const void *vn, const void *vk)
904 {
905 	const struct nvkm_vma *n = vn;
906 	const u64 *k = vk;
907 
908 	if (n->addr < *k)
909 		return -1;
910 	if (n->addr > *k)
911 		return +1;
912 	return 0;
913 }
914 
915 static const rb_tree_ops_t vmm_rb_ops = {
916 	.rbto_compare_nodes = compare_vma_nodes,
917 	.rbto_compare_key = compare_vma_key,
918 	.rbto_node_offset = offsetof(struct nvkm_vma, tree),
919 };
920 #endif
921 
922 static inline void
nvkm_vmm_node_remove(struct nvkm_vmm * vmm,struct nvkm_vma * vma)923 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
924 {
925 #ifdef __NetBSD__
926 	rb_tree_remove_node(&vmm->root, vma);
927 #else
928 	rb_erase(&vma->tree, &vmm->root);
929 #endif
930 }
931 
932 static inline void
nvkm_vmm_node_delete(struct nvkm_vmm * vmm,struct nvkm_vma * vma)933 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
934 {
935 	nvkm_vmm_node_remove(vmm, vma);
936 	list_del(&vma->head);
937 	kfree(vma);
938 }
939 
940 static void
nvkm_vmm_node_insert(struct nvkm_vmm * vmm,struct nvkm_vma * vma)941 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
942 {
943 #ifdef __NetBSD__
944 	struct nvkm_vma *collision __diagused =
945 	    rb_tree_insert_node(&vmm->root, vma);
946 	KASSERT(collision == vma);
947 #else
948 	struct rb_node **ptr = &vmm->root.rb_node;
949 	struct rb_node *parent = NULL;
950 
951 	while (*ptr) {
952 		struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
953 		parent = *ptr;
954 		if (vma->addr < this->addr)
955 			ptr = &parent->rb_left;
956 		else
957 		if (vma->addr > this->addr)
958 			ptr = &parent->rb_right;
959 		else
960 			BUG();
961 	}
962 
963 	rb_link_node(&vma->tree, parent, ptr);
964 	rb_insert_color(&vma->tree, &vmm->root);
965 #endif
966 }
967 
968 struct nvkm_vma *
nvkm_vmm_node_search(struct nvkm_vmm * vmm,u64 addr)969 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
970 {
971 #ifdef __NetBSD__
972 	return rb_tree_find_node(&vmm->root, &addr);
973 #else
974 	struct rb_node *node = vmm->root.rb_node;
975 	while (node) {
976 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
977 		if (addr < vma->addr)
978 			node = node->rb_left;
979 		else
980 		if (addr >= vma->addr + vma->size)
981 			node = node->rb_right;
982 		else
983 			return vma;
984 	}
985 	return NULL;
986 #endif
987 }
988 
989 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL :             \
990 	list_entry((root)->head.dir, struct nvkm_vma, head))
991 
992 static struct nvkm_vma *
nvkm_vmm_node_merge(struct nvkm_vmm * vmm,struct nvkm_vma * prev,struct nvkm_vma * vma,struct nvkm_vma * next,u64 size)993 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
994 		    struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
995 {
996 	if (next) {
997 		if (vma->size == size) {
998 			vma->size += next->size;
999 			nvkm_vmm_node_delete(vmm, next);
1000 			if (prev) {
1001 				prev->size += vma->size;
1002 				nvkm_vmm_node_delete(vmm, vma);
1003 				return prev;
1004 			}
1005 			return vma;
1006 		}
1007 		BUG_ON(prev);
1008 
1009 		nvkm_vmm_node_remove(vmm, next);
1010 		vma->size -= size;
1011 		next->addr -= size;
1012 		next->size += size;
1013 		nvkm_vmm_node_insert(vmm, next);
1014 		return next;
1015 	}
1016 
1017 	if (prev) {
1018 		if (vma->size != size) {
1019 			nvkm_vmm_node_remove(vmm, vma);
1020 			prev->size += size;
1021 			vma->addr += size;
1022 			vma->size -= size;
1023 			nvkm_vmm_node_insert(vmm, vma);
1024 		} else {
1025 			prev->size += vma->size;
1026 			nvkm_vmm_node_delete(vmm, vma);
1027 		}
1028 		return prev;
1029 	}
1030 
1031 	return vma;
1032 }
1033 
1034 struct nvkm_vma *
nvkm_vmm_node_split(struct nvkm_vmm * vmm,struct nvkm_vma * vma,u64 addr,u64 size)1035 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
1036 		    struct nvkm_vma *vma, u64 addr, u64 size)
1037 {
1038 	struct nvkm_vma *prev = NULL;
1039 
1040 	if (vma->addr != addr) {
1041 		prev = vma;
1042 		if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
1043 			return NULL;
1044 		vma->part = true;
1045 		nvkm_vmm_node_insert(vmm, vma);
1046 	}
1047 
1048 	if (vma->size != size) {
1049 		struct nvkm_vma *tmp;
1050 		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1051 			nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
1052 			return NULL;
1053 		}
1054 		tmp->part = true;
1055 		nvkm_vmm_node_insert(vmm, tmp);
1056 	}
1057 
1058 	return vma;
1059 }
1060 
1061 static void
nvkm_vma_dump(struct nvkm_vma * vma)1062 nvkm_vma_dump(struct nvkm_vma *vma)
1063 {
1064 	printk(KERN_ERR "%016"PRIx64" %016"PRIx64" %c%c%c%c%c%c%c%c%c %p\n",
1065 	       vma->addr, (u64)vma->size,
1066 	       vma->used ? '-' : 'F',
1067 	       vma->mapref ? 'R' : '-',
1068 	       vma->sparse ? 'S' : '-',
1069 	       vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
1070 	       vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
1071 	       vma->part ? 'P' : '-',
1072 	       vma->user ? 'U' : '-',
1073 	       vma->busy ? 'B' : '-',
1074 	       vma->mapped ? 'M' : '-',
1075 	       vma->memory);
1076 }
1077 
1078 static void
nvkm_vmm_dump(struct nvkm_vmm * vmm)1079 nvkm_vmm_dump(struct nvkm_vmm *vmm)
1080 {
1081 	struct nvkm_vma *vma;
1082 	list_for_each_entry(vma, &vmm->list, head) {
1083 		nvkm_vma_dump(vma);
1084 	}
1085 }
1086 
1087 static void
nvkm_vmm_dtor(struct nvkm_vmm * vmm)1088 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
1089 {
1090 	struct nvkm_vma *vma;
1091 	struct rb_node *node;
1092 
1093 	if (0)
1094 		nvkm_vmm_dump(vmm);
1095 
1096 #ifdef __NetBSD__
1097 	__USE(node);
1098 	while ((vma = RB_TREE_MIN(&vmm->root)) != NULL)
1099 		nvkm_vmm_put(vmm, &vma);
1100 #else
1101 	while ((node = rb_first(&vmm->root))) {
1102 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
1103 		nvkm_vmm_put(vmm, &vma);
1104 	}
1105 #endif
1106 
1107 	if (vmm->bootstrapped) {
1108 		const struct nvkm_vmm_page *page = vmm->func->page;
1109 		const u64 limit = vmm->limit - vmm->start;
1110 
1111 		while (page[1].shift)
1112 			page++;
1113 
1114 		nvkm_mmu_ptc_dump(vmm->mmu);
1115 		nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
1116 	}
1117 
1118 	vma = list_first_entry(&vmm->list, typeof(*vma), head);
1119 	list_del(&vma->head);
1120 	kfree(vma);
1121 	WARN_ON(!list_empty(&vmm->list));
1122 
1123 	if (vmm->nullp) {
1124 #ifdef __NetBSD__
1125 		struct nvkm_device *device = vmm->mmu->subdev.device;
1126 		const bus_dma_tag_t dmat = device->func->dma_tag(device);
1127 		bus_dmamap_unload(dmat, vmm->nullmap);
1128 		bus_dmamem_unmap(dmat, vmm->nullp, 16 * 1024);
1129 		bus_dmamap_destroy(dmat, vmm->nullmap);
1130 		bus_dmamem_free(dmat, &vmm->nullseg, 1);
1131 #else
1132 		dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1133 				  vmm->nullp, vmm->null);
1134 #endif
1135 	}
1136 
1137 	if (vmm->pd) {
1138 		nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1139 		nvkm_vmm_pt_del(&vmm->pd);
1140 	}
1141 
1142 	mutex_destroy(&vmm->mutex);
1143 }
1144 
1145 static int
nvkm_vmm_ctor_managed(struct nvkm_vmm * vmm,u64 addr,u64 size)1146 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1147 {
1148 	struct nvkm_vma *vma;
1149 	if (!(vma = nvkm_vma_new(addr, size)))
1150 		return -ENOMEM;
1151 	vma->mapref = true;
1152 	vma->sparse = false;
1153 	vma->used = true;
1154 	vma->user = true;
1155 	nvkm_vmm_node_insert(vmm, vma);
1156 	list_add_tail(&vma->head, &vmm->list);
1157 	return 0;
1158 }
1159 
1160 int
nvkm_vmm_ctor(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 pd_header,bool managed,u64 addr,u64 size,struct lock_class_key * key,const char * name,struct nvkm_vmm * vmm)1161 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1162 	      u32 pd_header, bool managed, u64 addr, u64 size,
1163 	      struct lock_class_key *key, const char *name,
1164 	      struct nvkm_vmm *vmm)
1165 {
1166 	static struct lock_class_key _key;
1167 	const struct nvkm_vmm_page *page = func->page;
1168 	const struct nvkm_vmm_desc *desc;
1169 	struct nvkm_vma *vma;
1170 	int levels, bits = 0, ret;
1171 
1172 	vmm->func = func;
1173 	vmm->mmu = mmu;
1174 	vmm->name = name;
1175 	vmm->debug = mmu->subdev.debug;
1176 	kref_init(&vmm->kref);
1177 
1178 	__mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
1179 
1180 	/* Locate the smallest page size supported by the backend, it will
1181 	 * have the the deepest nesting of page tables.
1182 	 */
1183 	while (page[1].shift)
1184 		page++;
1185 
1186 	/* Locate the structure that describes the layout of the top-level
1187 	 * page table, and determine the number of valid bits in a virtual
1188 	 * address.
1189 	 */
1190 	for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
1191 		bits += desc->bits;
1192 	bits += page->shift;
1193 	desc--;
1194 
1195 	if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
1196 		return -EINVAL;
1197 
1198 	/* Allocate top-level page table. */
1199 	vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1200 	if (!vmm->pd)
1201 		return -ENOMEM;
1202 	vmm->pd->refs[0] = 1;
1203 	INIT_LIST_HEAD(&vmm->join);
1204 
1205 	/* ... and the GPU storage for it, except on Tesla-class GPUs that
1206 	 * have the PD embedded in the instance structure.
1207 	 */
1208 	if (desc->size) {
1209 		const u32 size = pd_header + desc->size * (1 << desc->bits);
1210 		vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1211 		if (!vmm->pd->pt[0])
1212 			return -ENOMEM;
1213 	}
1214 
1215 	/* Initialise address-space MM. */
1216 	INIT_LIST_HEAD(&vmm->list);
1217 #ifdef __NetBSD__
1218 	rb_tree_init(&vmm->free, &vmm_free_rb_ops);
1219 	rb_tree_init(&vmm->root, &vmm_rb_ops);
1220 #else
1221 	vmm->free = RB_ROOT;
1222 	vmm->root = RB_ROOT;
1223 #endif
1224 
1225 	if (managed) {
1226 		/* Address-space will be managed by the client for the most
1227 		 * part, except for a specified area where NVKM allocations
1228 		 * are allowed to be placed.
1229 		 */
1230 		vmm->start = 0;
1231 		vmm->limit = 1ULL << bits;
1232 		if (addr + size < addr || addr + size > vmm->limit)
1233 			return -EINVAL;
1234 
1235 		/* Client-managed area before the NVKM-managed area. */
1236 		if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1237 			return ret;
1238 
1239 		/* NVKM-managed area. */
1240 		if (size) {
1241 			if (!(vma = nvkm_vma_new(addr, size)))
1242 				return -ENOMEM;
1243 			nvkm_vmm_free_insert(vmm, vma);
1244 			list_add_tail(&vma->head, &vmm->list);
1245 		}
1246 
1247 		/* Client-managed area after the NVKM-managed area. */
1248 		addr = addr + size;
1249 		size = vmm->limit - addr;
1250 		if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1251 			return ret;
1252 	} else {
1253 		/* Address-space fully managed by NVKM, requiring calls to
1254 		 * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
1255 		 */
1256 		vmm->start = addr;
1257 		vmm->limit = size ? (addr + size) : (1ULL << bits);
1258 		if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1259 			return -EINVAL;
1260 
1261 		if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1262 			return -ENOMEM;
1263 
1264 		nvkm_vmm_free_insert(vmm, vma);
1265 		list_add(&vma->head, &vmm->list);
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 int
nvkm_vmm_new_(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 hdr,bool managed,u64 addr,u64 size,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)1272 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1273 	      u32 hdr, bool managed, u64 addr, u64 size,
1274 	      struct lock_class_key *key, const char *name,
1275 	      struct nvkm_vmm **pvmm)
1276 {
1277 	if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
1278 		return -ENOMEM;
1279 	return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
1280 }
1281 
1282 static struct nvkm_vma *
nvkm_vmm_pfn_split_merge(struct nvkm_vmm * vmm,struct nvkm_vma * vma,u64 addr,u64 size,u8 page,bool map)1283 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1284 			 u64 addr, u64 size, u8 page, bool map)
1285 {
1286 	struct nvkm_vma *prev = NULL;
1287 	struct nvkm_vma *next = NULL;
1288 
1289 	if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1290 		if (prev->memory || prev->mapped != map)
1291 			prev = NULL;
1292 	}
1293 
1294 	if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1295 		if (!next->part ||
1296 		    next->memory || next->mapped != map)
1297 			next = NULL;
1298 	}
1299 
1300 	if (prev || next)
1301 		return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1302 	return nvkm_vmm_node_split(vmm, vma, addr, size);
1303 }
1304 
1305 int
nvkm_vmm_pfn_unmap(struct nvkm_vmm * vmm,u64 addr,u64 size)1306 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1307 {
1308 	struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1309 	struct nvkm_vma *next;
1310 	u64 limit = addr + size;
1311 	u64 start = addr;
1312 
1313 	if (!vma)
1314 		return -EINVAL;
1315 
1316 	do {
1317 		if (!vma->mapped || vma->memory)
1318 			continue;
1319 
1320 		size = min(limit - start, vma->size - (start - vma->addr));
1321 
1322 		nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1323 					start, size, false, true);
1324 
1325 		next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1326 		if (!WARN_ON(!next)) {
1327 			vma = next;
1328 			vma->refd = NVKM_VMA_PAGE_NONE;
1329 			vma->mapped = false;
1330 		}
1331 	} while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1332 
1333 	return 0;
1334 }
1335 
1336 /*TODO:
1337  * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
1338  *   with inside HMM, which would be a lot nicer for us to deal with.
1339  * - Multiple page sizes (particularly for huge page support).
1340  * - Support for systems without a 4KiB page size.
1341  */
1342 int
nvkm_vmm_pfn_map(struct nvkm_vmm * vmm,u8 shift,u64 addr,u64 size,u64 * pfn)1343 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1344 {
1345 	const struct nvkm_vmm_page *page = vmm->func->page;
1346 	struct nvkm_vma *vma, *tmp;
1347 	u64 limit = addr + size;
1348 	u64 start = addr;
1349 	int pm = size >> shift;
1350 	int pi = 0;
1351 
1352 	/* Only support mapping where the page size of the incoming page
1353 	 * array matches a page size available for direct mapping.
1354 	 */
1355 	while (page->shift && page->shift != shift &&
1356 	       page->desc->func->pfn == NULL)
1357 		page++;
1358 
1359 	if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
1360 			    !IS_ALIGNED(size, 1ULL << shift) ||
1361 	    addr + size < addr || addr + size > vmm->limit) {
1362 		VMM_DEBUG(vmm, "paged map %d %d %016"PRIx64" %016"PRIx64"\n",
1363 			  shift, page->shift, addr, size);
1364 		return -EINVAL;
1365 	}
1366 
1367 	if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1368 		return -ENOENT;
1369 
1370 	do {
1371 		bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
1372 		bool mapped = vma->mapped;
1373 		u64 size = limit - start;
1374 		u64 addr = start;
1375 		int pn, ret = 0;
1376 
1377 		/* Narrow the operation window to cover a single action (page
1378 		 * should be mapped or not) within a single VMA.
1379 		 */
1380 		for (pn = 0; pi + pn < pm; pn++) {
1381 			if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
1382 				break;
1383 		}
1384 		size = min_t(u64, size, pn << page->shift);
1385 		size = min_t(u64, size, vma->size + vma->addr - addr);
1386 
1387 		/* Reject any operation to unmanaged regions, and areas that
1388 		 * have nvkm_memory objects mapped in them already.
1389 		 */
1390 		if (!vma->mapref || vma->memory) {
1391 			ret = -EINVAL;
1392 			goto next;
1393 		}
1394 
1395 		/* In order to both properly refcount GPU page tables, and
1396 		 * prevent "normal" mappings and these direct mappings from
1397 		 * interfering with each other, we need to track contiguous
1398 		 * ranges that have been mapped with this interface.
1399 		 *
1400 		 * Here we attempt to either split an existing VMA so we're
1401 		 * able to flag the region as either unmapped/mapped, or to
1402 		 * merge with adjacent VMAs that are already compatible.
1403 		 *
1404 		 * If the region is already compatible, nothing is required.
1405 		 */
1406 		if (map != mapped) {
1407 			tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1408 						       page -
1409 						       vmm->func->page, map);
1410 			if (WARN_ON(!tmp)) {
1411 				ret = -ENOMEM;
1412 				goto next;
1413 			}
1414 
1415 			if ((tmp->mapped = map))
1416 				tmp->refd = page - vmm->func->page;
1417 			else
1418 				tmp->refd = NVKM_VMA_PAGE_NONE;
1419 			vma = tmp;
1420 		}
1421 
1422 		/* Update HW page tables. */
1423 		if (map) {
1424 			struct nvkm_vmm_map args;
1425 			args.page = page;
1426 			args.pfn = &pfn[pi];
1427 
1428 			if (!mapped) {
1429 				ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1430 							    size, &args, page->
1431 							    desc->func->pfn);
1432 			} else {
1433 				nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1434 						  page->desc->func->pfn);
1435 			}
1436 		} else {
1437 			if (mapped) {
1438 				nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1439 							false, true);
1440 			}
1441 		}
1442 
1443 next:
1444 		/* Iterate to next operation. */
1445 		if (vma->addr + vma->size == addr + size)
1446 			vma = node(vma, next);
1447 		start += size;
1448 
1449 		if (ret) {
1450 			/* Failure is signalled by clearing the valid bit on
1451 			 * any PFN that couldn't be modified as requested.
1452 			 */
1453 			while (size) {
1454 				pfn[pi++] = NVKM_VMM_PFN_NONE;
1455 				size -= 1 << page->shift;
1456 			}
1457 		} else {
1458 			pi += size >> page->shift;
1459 		}
1460 	} while (vma && start < limit);
1461 
1462 	return 0;
1463 }
1464 
1465 void
nvkm_vmm_unmap_region(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1466 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1467 {
1468 	struct nvkm_vma *prev = NULL;
1469 	struct nvkm_vma *next;
1470 
1471 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1472 	nvkm_memory_unref(&vma->memory);
1473 	vma->mapped = false;
1474 
1475 	if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1476 		prev = NULL;
1477 	if ((next = node(vma, next)) && (!next->part || next->mapped))
1478 		next = NULL;
1479 	nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1480 }
1481 
1482 void
nvkm_vmm_unmap_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma,bool pfn)1483 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1484 {
1485 	const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1486 
1487 	if (vma->mapref) {
1488 		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1489 		vma->refd = NVKM_VMA_PAGE_NONE;
1490 	} else {
1491 		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1492 	}
1493 
1494 	nvkm_vmm_unmap_region(vmm, vma);
1495 }
1496 
1497 void
nvkm_vmm_unmap(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1498 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1499 {
1500 	if (vma->memory) {
1501 		mutex_lock(&vmm->mutex);
1502 		nvkm_vmm_unmap_locked(vmm, vma, false);
1503 		mutex_unlock(&vmm->mutex);
1504 	}
1505 }
1506 
1507 static int
nvkm_vmm_map_valid(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1508 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1509 		   void *argv, u32 argc, struct nvkm_vmm_map *map)
1510 {
1511 	switch (nvkm_memory_target(map->memory)) {
1512 	case NVKM_MEM_TARGET_VRAM:
1513 		if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
1514 			VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1515 			return -EINVAL;
1516 		}
1517 		break;
1518 	case NVKM_MEM_TARGET_HOST:
1519 	case NVKM_MEM_TARGET_NCOH:
1520 		if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
1521 			VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1522 			return -EINVAL;
1523 		}
1524 		break;
1525 	default:
1526 		WARN_ON(1);
1527 		return -ENOSYS;
1528 	}
1529 
1530 	if (!IS_ALIGNED(     vma->addr, 1ULL << map->page->shift) ||
1531 	    !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1532 	    !IS_ALIGNED(   map->offset, 1ULL << map->page->shift) ||
1533 	    nvkm_memory_page(map->memory) < map->page->shift) {
1534 		VMM_DEBUG(vmm, "alignment %016"PRIx64" %016"PRIx64" %016"PRIx64" %d %d",
1535 		    vma->addr, (u64)vma->size, map->offset, map->page->shift,
1536 		    nvkm_memory_page(map->memory));
1537 		return -EINVAL;
1538 	}
1539 
1540 	return vmm->func->valid(vmm, argv, argc, map);
1541 }
1542 
1543 static int
nvkm_vmm_map_choose(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1544 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1545 		    void *argv, u32 argc, struct nvkm_vmm_map *map)
1546 {
1547 	for (map->page = vmm->func->page; map->page->shift; map->page++) {
1548 		VMM_DEBUG(vmm, "trying %d", map->page->shift);
1549 		if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1550 			return 0;
1551 	}
1552 	return -EINVAL;
1553 }
1554 
1555 static int
nvkm_vmm_map_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1556 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1557 		    void *argv, u32 argc, struct nvkm_vmm_map *map)
1558 {
1559 	nvkm_vmm_pte_func func;
1560 	int ret;
1561 
1562 	/* Make sure we won't overrun the end of the memory object. */
1563 	if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1564 		VMM_DEBUG(vmm, "overrun %016"PRIx64" %016"PRIx64" %016"PRIx64"",
1565 			  nvkm_memory_size(map->memory),
1566 			  map->offset, (u64)vma->size);
1567 		return -EINVAL;
1568 	}
1569 
1570 	/* Check remaining arguments for validity. */
1571 	if (vma->page == NVKM_VMA_PAGE_NONE &&
1572 	    vma->refd == NVKM_VMA_PAGE_NONE) {
1573 		/* Find the largest page size we can perform the mapping at. */
1574 		const u32 debug = vmm->debug;
1575 		vmm->debug = 0;
1576 		ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1577 		vmm->debug = debug;
1578 		if (ret) {
1579 			VMM_DEBUG(vmm, "invalid at any page size");
1580 			nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1581 			return -EINVAL;
1582 		}
1583 	} else {
1584 		/* Page size of the VMA is already pre-determined. */
1585 		if (vma->refd != NVKM_VMA_PAGE_NONE)
1586 			map->page = &vmm->func->page[vma->refd];
1587 		else
1588 			map->page = &vmm->func->page[vma->page];
1589 
1590 		ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1591 		if (ret) {
1592 			VMM_DEBUG(vmm, "invalid %d\n", ret);
1593 			return ret;
1594 		}
1595 	}
1596 
1597 	/* Deal with the 'offset' argument, and fetch the backend function. */
1598 	map->off = map->offset;
1599 	if (map->mem) {
1600 		for (; map->off; map->mem = map->mem->next) {
1601 			u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
1602 			if (size > map->off)
1603 				break;
1604 			map->off -= size;
1605 		}
1606 		func = map->page->desc->func->mem;
1607 #ifndef __NetBSD__		/* XXX prime? */
1608 	} else
1609 	if (map->sgl) {
1610 		for (; map->off; map->sgl = sg_next(map->sgl)) {
1611 			u64 size = sg_dma_len(map->sgl);
1612 			if (size > map->off)
1613 				break;
1614 			map->off -= size;
1615 		}
1616 		func = map->page->desc->func->sgl;
1617 #endif
1618 	} else {
1619 		map->dma += map->offset >> PAGE_SHIFT;
1620 		map->off  = map->offset & PAGE_MASK;
1621 		func = map->page->desc->func->dma;
1622 	}
1623 
1624 	/* Perform the map. */
1625 	if (vma->refd == NVKM_VMA_PAGE_NONE) {
1626 		ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1627 		if (ret)
1628 			return ret;
1629 
1630 		vma->refd = map->page - vmm->func->page;
1631 	} else {
1632 		nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1633 	}
1634 
1635 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1636 	nvkm_memory_unref(&vma->memory);
1637 	vma->memory = nvkm_memory_ref(map->memory);
1638 	vma->mapped = true;
1639 	vma->tags = map->tags;
1640 	return 0;
1641 }
1642 
1643 int
nvkm_vmm_map(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1644 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1645 	     struct nvkm_vmm_map *map)
1646 {
1647 	int ret;
1648 	mutex_lock(&vmm->mutex);
1649 	ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1650 	vma->busy = false;
1651 	mutex_unlock(&vmm->mutex);
1652 	return ret;
1653 }
1654 
1655 static void
nvkm_vmm_put_region(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1656 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1657 {
1658 	struct nvkm_vma *prev, *next;
1659 
1660 	if ((prev = node(vma, prev)) && !prev->used) {
1661 		vma->addr  = prev->addr;
1662 		vma->size += prev->size;
1663 		nvkm_vmm_free_delete(vmm, prev);
1664 	}
1665 
1666 	if ((next = node(vma, next)) && !next->used) {
1667 		vma->size += next->size;
1668 		nvkm_vmm_free_delete(vmm, next);
1669 	}
1670 
1671 	nvkm_vmm_free_insert(vmm, vma);
1672 }
1673 
1674 void
nvkm_vmm_put_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1675 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1676 {
1677 	const struct nvkm_vmm_page *page = vmm->func->page;
1678 	struct nvkm_vma *next = vma;
1679 
1680 	BUG_ON(vma->part);
1681 
1682 	if (vma->mapref || !vma->sparse) {
1683 		do {
1684 			const bool mem = next->memory != NULL;
1685 			const bool map = next->mapped;
1686 			const u8  refd = next->refd;
1687 			const u64 addr = next->addr;
1688 			u64 size = next->size;
1689 
1690 			/* Merge regions that are in the same state. */
1691 			while ((next = node(next, next)) && next->part &&
1692 			       (next->mapped == map) &&
1693 			       (next->memory != NULL) == mem &&
1694 			       (next->refd == refd))
1695 				size += next->size;
1696 
1697 			if (map) {
1698 				/* Region(s) are mapped, merge the unmap
1699 				 * and dereference into a single walk of
1700 				 * the page tree.
1701 				 */
1702 				nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1703 							size, vma->sparse,
1704 							!mem);
1705 			} else
1706 			if (refd != NVKM_VMA_PAGE_NONE) {
1707 				/* Drop allocation-time PTE references. */
1708 				nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1709 			}
1710 		} while (next && next->part);
1711 	}
1712 
1713 	/* Merge any mapped regions that were split from the initial
1714 	 * address-space allocation back into the allocated VMA, and
1715 	 * release memory/compression resources.
1716 	 */
1717 	next = vma;
1718 	do {
1719 		if (next->mapped)
1720 			nvkm_vmm_unmap_region(vmm, next);
1721 	} while ((next = node(vma, next)) && next->part);
1722 
1723 	if (vma->sparse && !vma->mapref) {
1724 		/* Sparse region that was allocated with a fixed page size,
1725 		 * meaning all relevant PTEs were referenced once when the
1726 		 * region was allocated, and remained that way, regardless
1727 		 * of whether memory was mapped into it afterwards.
1728 		 *
1729 		 * The process of unmapping, unsparsing, and dereferencing
1730 		 * PTEs can be done in a single page tree walk.
1731 		 */
1732 		nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1733 	} else
1734 	if (vma->sparse) {
1735 		/* Sparse region that wasn't allocated with a fixed page size,
1736 		 * PTE references were taken both at allocation time (to make
1737 		 * the GPU see the region as sparse), and when mapping memory
1738 		 * into the region.
1739 		 *
1740 		 * The latter was handled above, and the remaining references
1741 		 * are dealt with here.
1742 		 */
1743 		nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1744 	}
1745 
1746 	/* Remove VMA from the list of allocated nodes. */
1747 	nvkm_vmm_node_remove(vmm, vma);
1748 
1749 	/* Merge VMA back into the free list. */
1750 	vma->page = NVKM_VMA_PAGE_NONE;
1751 	vma->refd = NVKM_VMA_PAGE_NONE;
1752 	vma->used = false;
1753 	vma->user = false;
1754 	nvkm_vmm_put_region(vmm, vma);
1755 }
1756 
1757 void
nvkm_vmm_put(struct nvkm_vmm * vmm,struct nvkm_vma ** pvma)1758 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1759 {
1760 	struct nvkm_vma *vma = *pvma;
1761 	if (vma) {
1762 		mutex_lock(&vmm->mutex);
1763 		nvkm_vmm_put_locked(vmm, vma);
1764 		mutex_unlock(&vmm->mutex);
1765 		*pvma = NULL;
1766 	}
1767 }
1768 
1769 int
nvkm_vmm_get_locked(struct nvkm_vmm * vmm,bool getref,bool mapref,bool sparse,u8 shift,u8 align,u64 size,struct nvkm_vma ** pvma)1770 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1771 		    u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
1772 {
1773 	const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1774 #ifndef __NetBSD__
1775 	struct rb_node *node = NULL, *temp;
1776 #endif
1777 	struct nvkm_vma *vma = NULL, *tmp;
1778 	u64 addr, tail;
1779 	int ret;
1780 
1781 	VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1782 		       "shift: %d align: %d size: %016"PRIx64"",
1783 		  getref, mapref, sparse, shift, align, size);
1784 
1785 	/* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
1786 	if (unlikely(!size || (!getref && !mapref && sparse))) {
1787 		VMM_DEBUG(vmm, "args %016"PRIx64" %d %d %d",
1788 			  size, getref, mapref, sparse);
1789 		return -EINVAL;
1790 	}
1791 
1792 	/* Tesla-class GPUs can only select page size per-PDE, which means
1793 	 * we're required to know the mapping granularity up-front to find
1794 	 * a suitable region of address-space.
1795 	 *
1796 	 * The same goes if we're requesting up-front allocation of PTES.
1797 	 */
1798 	if (unlikely((getref || vmm->func->page_block) && !shift)) {
1799 		VMM_DEBUG(vmm, "page size required: %d %016"PRIx64"",
1800 			  getref, vmm->func->page_block);
1801 		return -EINVAL;
1802 	}
1803 
1804 	/* If a specific page size was requested, determine its index and
1805 	 * make sure the requested size is a multiple of the page size.
1806 	 */
1807 	if (shift) {
1808 		for (page = vmm->func->page; page->shift; page++) {
1809 			if (shift == page->shift)
1810 				break;
1811 		}
1812 
1813 		if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
1814 			VMM_DEBUG(vmm, "page %d %016"PRIx64"", shift, size);
1815 			return -EINVAL;
1816 		}
1817 		align = max_t(u8, align, shift);
1818 	} else {
1819 		align = max_t(u8, align, 12);
1820 	}
1821 
1822 	/* Locate smallest block that can possibly satisfy the allocation. */
1823 #ifdef __NetBSD__
1824 	struct nvkm_vma_key key = { .size = size, .addr = 0 };
1825 	for (struct nvkm_vma *this = rb_tree_find_node_geq(&vmm->free, &key);
1826 		 this != NULL; this = RB_TREE_NEXT(&vmm->free, this)) {
1827 #else
1828 	temp = vmm->free.rb_node;
1829 	while (temp) {
1830 		struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
1831 		if (this->size < size) {
1832 			temp = temp->rb_right;
1833 		} else {
1834 			node = temp;
1835 			temp = temp->rb_left;
1836 		}
1837 	}
1838 
1839 	if (unlikely(!node))
1840 		return -ENOSPC;
1841 
1842 	/* Take into account alignment restrictions, trying larger blocks
1843 	 * in turn until we find a suitable free block.
1844 	 */
1845 	do {
1846 		struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
1847 #endif
1848 		struct nvkm_vma *prev = node(this, prev);
1849 		struct nvkm_vma *next = node(this, next);
1850 		const int p = page - vmm->func->page;
1851 
1852 		addr = this->addr;
1853 		if (vmm->func->page_block && prev && prev->page != p)
1854 			addr = ALIGN(addr, vmm->func->page_block);
1855 		addr = ALIGN(addr, 1ULL << align);
1856 
1857 		tail = this->addr + this->size;
1858 		if (vmm->func->page_block && next && next->page != p)
1859 			tail = ALIGN_DOWN(tail, vmm->func->page_block);
1860 
1861 		if (addr <= tail && tail - addr >= size) {
1862 			nvkm_vmm_free_remove(vmm, this);
1863 			vma = this;
1864 			break;
1865 		}
1866 #ifdef __NetBSD__
1867 	}
1868 #else
1869 	} while ((node = rb_next(node)));
1870 #endif
1871 
1872 	if (unlikely(!vma))
1873 		return -ENOSPC;
1874 
1875 	/* If the VMA we found isn't already exactly the requested size,
1876 	 * it needs to be split, and the remaining free blocks returned.
1877 	 */
1878 	if (addr != vma->addr) {
1879 		if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1880 			nvkm_vmm_put_region(vmm, vma);
1881 			return -ENOMEM;
1882 		}
1883 		nvkm_vmm_free_insert(vmm, vma);
1884 		vma = tmp;
1885 	}
1886 
1887 	if (size != vma->size) {
1888 		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1889 			nvkm_vmm_put_region(vmm, vma);
1890 			return -ENOMEM;
1891 		}
1892 		nvkm_vmm_free_insert(vmm, tmp);
1893 	}
1894 
1895 	/* Pre-allocate page tables and/or setup sparse mappings. */
1896 	if (sparse && getref)
1897 		ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1898 	else if (sparse)
1899 		ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1900 	else if (getref)
1901 		ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1902 	else
1903 		ret = 0;
1904 	if (ret) {
1905 		nvkm_vmm_put_region(vmm, vma);
1906 		return ret;
1907 	}
1908 
1909 	vma->mapref = mapref && !getref;
1910 	vma->sparse = sparse;
1911 	vma->page = page - vmm->func->page;
1912 	vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1913 	vma->used = true;
1914 	nvkm_vmm_node_insert(vmm, vma);
1915 	*pvma = vma;
1916 	return 0;
1917 }
1918 
1919 int
nvkm_vmm_get(struct nvkm_vmm * vmm,u8 page,u64 size,struct nvkm_vma ** pvma)1920 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1921 {
1922 	int ret;
1923 	mutex_lock(&vmm->mutex);
1924 	ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1925 	mutex_unlock(&vmm->mutex);
1926 	return ret;
1927 }
1928 
1929 void
nvkm_vmm_part(struct nvkm_vmm * vmm,struct nvkm_memory * inst)1930 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1931 {
1932 	if (inst && vmm && vmm->func->part) {
1933 		mutex_lock(&vmm->mutex);
1934 		vmm->func->part(vmm, inst);
1935 		mutex_unlock(&vmm->mutex);
1936 	}
1937 }
1938 
1939 int
nvkm_vmm_join(struct nvkm_vmm * vmm,struct nvkm_memory * inst)1940 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1941 {
1942 	int ret = 0;
1943 	if (vmm->func->join) {
1944 		mutex_lock(&vmm->mutex);
1945 		ret = vmm->func->join(vmm, inst);
1946 		mutex_unlock(&vmm->mutex);
1947 	}
1948 	return ret;
1949 }
1950 
1951 static bool
nvkm_vmm_boot_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)1952 nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
1953 {
1954 	const struct nvkm_vmm_desc *desc = it->desc;
1955 	const int type = desc->type == SPT;
1956 	nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1957 	return false;
1958 }
1959 
1960 int
nvkm_vmm_boot(struct nvkm_vmm * vmm)1961 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1962 {
1963 	const struct nvkm_vmm_page *page = vmm->func->page;
1964 	const u64 limit = vmm->limit - vmm->start;
1965 	int ret;
1966 
1967 	while (page[1].shift)
1968 		page++;
1969 
1970 	ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1971 	if (ret)
1972 		return ret;
1973 
1974 	nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1975 		      nvkm_vmm_boot_ptes, NULL, NULL, NULL);
1976 	vmm->bootstrapped = true;
1977 	return 0;
1978 }
1979 
1980 static void
nvkm_vmm_del(struct kref * kref)1981 nvkm_vmm_del(struct kref *kref)
1982 {
1983 	struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1984 	nvkm_vmm_dtor(vmm);
1985 	kfree(vmm);
1986 }
1987 
1988 void
nvkm_vmm_unref(struct nvkm_vmm ** pvmm)1989 nvkm_vmm_unref(struct nvkm_vmm **pvmm)
1990 {
1991 	struct nvkm_vmm *vmm = *pvmm;
1992 	if (vmm) {
1993 		kref_put(&vmm->kref, nvkm_vmm_del);
1994 		*pvmm = NULL;
1995 	}
1996 }
1997 
1998 struct nvkm_vmm *
nvkm_vmm_ref(struct nvkm_vmm * vmm)1999 nvkm_vmm_ref(struct nvkm_vmm *vmm)
2000 {
2001 	if (vmm)
2002 		kref_get(&vmm->kref);
2003 	return vmm;
2004 }
2005 
2006 int
nvkm_vmm_new(struct nvkm_device * device,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)2007 nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
2008 	     u32 argc, struct lock_class_key *key, const char *name,
2009 	     struct nvkm_vmm **pvmm)
2010 {
2011 	struct nvkm_mmu *mmu = device->mmu;
2012 	struct nvkm_vmm *vmm = NULL;
2013 	int ret;
2014 	ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
2015 				  key, name, &vmm);
2016 	if (ret)
2017 		nvkm_vmm_unref(&vmm);
2018 	*pvmm = vmm;
2019 	return ret;
2020 }
2021