xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_vmmnv50.c (revision 7649e88fcfe6a7c92de68bd5e592dec3e35224fb)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_vmmnv50.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_vmmnv50.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $");
26 
27 #include "vmm.h"
28 
29 #include <subdev/fb.h>
30 #include <subdev/timer.h>
31 #include <engine/gr.h>
32 
33 #include <nvif/if500d.h>
34 #include <nvif/unpack.h>
35 
36 static inline void
nv50_vmm_pgt_pte(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map,u64 addr)37 nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
38 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
39 {
40 	u64 next = addr + map->type, data;
41 	u32 pten;
42 	int log2blk;
43 
44 	map->type += ptes * map->ctag;
45 
46 	while (ptes) {
47 		for (log2blk = 7; log2blk >= 0; log2blk--) {
48 			pten = 1 << log2blk;
49 			if (ptes >= pten && IS_ALIGNED(ptei, pten))
50 				break;
51 		}
52 
53 		data  = next | (log2blk << 7);
54 		next += pten * map->next;
55 		ptes -= pten;
56 
57 		while (pten--)
58 			VMM_WO064(pt, vmm, ptei++ * 8, data);
59 	}
60 }
61 
62 #ifndef __NetBSD__
63 static void
nv50_vmm_pgt_sgl(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)64 nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
65 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
66 {
67 	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
68 }
69 #endif
70 
71 static void
nv50_vmm_pgt_dma(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)72 nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
73 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
74 {
75 	if (map->page->shift == PAGE_SHIFT) {
76 		VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
77 		nvkm_kmap(pt->memory);
78 		while (ptes--) {
79 			const u64 data = *map->dma++ + map->type;
80 			VMM_WO064(pt, vmm, ptei++ * 8, data);
81 			map->type += map->ctag;
82 		}
83 		nvkm_done(pt->memory);
84 		return;
85 	}
86 
87 	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
88 }
89 
90 static void
nv50_vmm_pgt_mem(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)91 nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
92 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
93 {
94 	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
95 }
96 
97 static void
nv50_vmm_pgt_unmap(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes)98 nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
99 		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
100 {
101 	VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
102 }
103 
104 static const struct nvkm_vmm_desc_func
105 nv50_vmm_pgt = {
106 	.unmap = nv50_vmm_pgt_unmap,
107 	.mem = nv50_vmm_pgt_mem,
108 	.dma = nv50_vmm_pgt_dma,
109 #ifndef __NetBSD__
110 	.sgl = nv50_vmm_pgt_sgl,
111 #endif
112 };
113 
114 static bool
nv50_vmm_pde(struct nvkm_vmm * vmm,struct nvkm_vmm_pt * pgt,u64 * pdata)115 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
116 {
117 	struct nvkm_mmu_pt *pt;
118 	u64 data = 0xdeadcafe00000000ULL;
119 	if (pgt && (pt = pgt->pt[0])) {
120 		switch (pgt->page) {
121 		case 16: data = 0x00000001; break;
122 		case 12: data = 0x00000003;
123 			switch (nvkm_memory_size(pt->memory)) {
124 			case 0x100000: data |= 0x00000000; break;
125 			case 0x040000: data |= 0x00000020; break;
126 			case 0x020000: data |= 0x00000040; break;
127 			case 0x010000: data |= 0x00000060; break;
128 			default:
129 				WARN_ON(1);
130 				return false;
131 			}
132 			break;
133 		default:
134 			WARN_ON(1);
135 			return false;
136 		}
137 
138 		switch (nvkm_memory_target(pt->memory)) {
139 		case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
140 		case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
141 		case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
142 		default:
143 			WARN_ON(1);
144 			return false;
145 		}
146 
147 		data |= pt->addr;
148 	}
149 	*pdata = data;
150 	return true;
151 }
152 
153 static void
nv50_vmm_pgd_pde(struct nvkm_vmm * vmm,struct nvkm_vmm_pt * pgd,u32 pdei)154 nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
155 {
156 	struct nvkm_vmm_join *join;
157 	u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
158 	u64 data;
159 
160 	if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
161 		return;
162 
163 	list_for_each_entry(join, &vmm->join, head) {
164 		nvkm_kmap(join->inst);
165 		nvkm_wo64(join->inst, pdeo, data);
166 		nvkm_done(join->inst);
167 	}
168 }
169 
170 static const struct nvkm_vmm_desc_func
171 nv50_vmm_pgd = {
172 	.pde = nv50_vmm_pgd_pde,
173 };
174 
175 const struct nvkm_vmm_desc
176 nv50_vmm_desc_12[] = {
177 	{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
178 	{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
179 	{}
180 };
181 
182 const struct nvkm_vmm_desc
183 nv50_vmm_desc_16[] = {
184 	{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
185 	{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
186 	{}
187 };
188 
189 void
nv50_vmm_flush(struct nvkm_vmm * vmm,int level)190 nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
191 {
192 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
193 	struct nvkm_device *device = subdev->device;
194 	int i, id;
195 
196 	mutex_lock(&subdev->mutex);
197 	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
198 		if (!atomic_read(&vmm->engref[i]))
199 			continue;
200 
201 		/* unfortunate hw bug workaround... */
202 		if (i == NVKM_ENGINE_GR && device->gr) {
203 			int ret = nvkm_gr_tlb_flush(device->gr);
204 			if (ret != -ENODEV)
205 				continue;
206 		}
207 
208 		switch (i) {
209 		case NVKM_ENGINE_GR    : id = 0x00; break;
210 		case NVKM_ENGINE_VP    :
211 		case NVKM_ENGINE_MSPDEC: id = 0x01; break;
212 		case NVKM_SUBDEV_BAR   : id = 0x06; break;
213 		case NVKM_ENGINE_MSPPP :
214 		case NVKM_ENGINE_MPEG  : id = 0x08; break;
215 		case NVKM_ENGINE_BSP   :
216 		case NVKM_ENGINE_MSVLD : id = 0x09; break;
217 		case NVKM_ENGINE_CIPHER:
218 		case NVKM_ENGINE_SEC   : id = 0x0a; break;
219 		case NVKM_ENGINE_CE0   : id = 0x0d; break;
220 		default:
221 			continue;
222 		}
223 
224 		nvkm_wr32(device, 0x100c80, (id << 16) | 1);
225 		if (nvkm_msec(device, 2000,
226 			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
227 				break;
228 		) < 0)
229 			nvkm_error(subdev, "%s mmu invalidate timeout\n",
230 				   nvkm_subdev_name[i]);
231 	}
232 	mutex_unlock(&subdev->mutex);
233 }
234 
235 int
nv50_vmm_valid(struct nvkm_vmm * vmm,void * argv,u32 argc,struct nvkm_vmm_map * map)236 nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
237 	       struct nvkm_vmm_map *map)
238 {
239 	const struct nvkm_vmm_page *page = map->page;
240 	union {
241 		struct nv50_vmm_map_vn vn;
242 		struct nv50_vmm_map_v0 v0;
243 	} *args = argv;
244 	struct nvkm_device *device = vmm->mmu->subdev.device;
245 	struct nvkm_ram *ram = device->fb->ram;
246 	struct nvkm_memory *memory = map->memory;
247 	u8  aper, kind, kind_inv, comp, priv, ro;
248 	int kindn, ret = -ENOSYS;
249 	const u8 *kindm;
250 
251 	map->type = map->ctag = 0;
252 	map->next = 1 << page->shift;
253 
254 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
255 		ro   = !!args->v0.ro;
256 		priv = !!args->v0.priv;
257 		kind = args->v0.kind & 0x7f;
258 		comp = args->v0.comp & 0x03;
259 	} else
260 	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
261 		ro   = 0;
262 		priv = 0;
263 		kind = 0x00;
264 		comp = 0;
265 	} else {
266 		VMM_DEBUG(vmm, "args");
267 		return ret;
268 	}
269 
270 	switch (nvkm_memory_target(memory)) {
271 	case NVKM_MEM_TARGET_VRAM:
272 		if (ram->stolen) {
273 			map->type |= ram->stolen;
274 			aper = 3;
275 		} else {
276 			aper = 0;
277 		}
278 		break;
279 	case NVKM_MEM_TARGET_HOST:
280 		aper = 2;
281 		break;
282 	case NVKM_MEM_TARGET_NCOH:
283 		aper = 3;
284 		break;
285 	default:
286 		WARN_ON(1);
287 		return -EINVAL;
288 	}
289 
290 	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
291 	if (kind >= kindn || kindm[kind] == kind_inv) {
292 		VMM_DEBUG(vmm, "kind %02x", kind);
293 		return -EINVAL;
294 	}
295 
296 	if (map->mem && map->mem->type != kindm[kind]) {
297 		VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
298 			  kindm[kind], map->mem->type);
299 		return -EINVAL;
300 	}
301 
302 	if (comp) {
303 		u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
304 		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
305 			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
306 			return -EINVAL;
307 		}
308 
309 		ret = nvkm_memory_tags_get(memory, device, tags, NULL,
310 					   &map->tags);
311 		if (ret) {
312 			VMM_DEBUG(vmm, "comp %d", ret);
313 			return ret;
314 		}
315 
316 		if (map->tags->mn) {
317 			u32 tags = map->tags->mn->offset + (map->offset >> 16);
318 			map->ctag |= (u64)comp << 49;
319 			map->type |= (u64)comp << 47;
320 			map->type |= (u64)tags << 49;
321 			map->next |= map->ctag;
322 		}
323 	}
324 
325 	map->type |= BIT(0); /* Valid. */
326 	map->type |= (u64)ro << 3;
327 	map->type |= (u64)aper << 4;
328 	map->type |= (u64)priv << 6;
329 	map->type |= (u64)kind << 40;
330 	return 0;
331 }
332 
333 void
nv50_vmm_part(struct nvkm_vmm * vmm,struct nvkm_memory * inst)334 nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
335 {
336 	struct nvkm_vmm_join *join;
337 
338 	list_for_each_entry(join, &vmm->join, head) {
339 		if (join->inst == inst) {
340 			list_del(&join->head);
341 			kfree(join);
342 			break;
343 		}
344 	}
345 }
346 
347 int
nv50_vmm_join(struct nvkm_vmm * vmm,struct nvkm_memory * inst)348 nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
349 {
350 	const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
351 	struct nvkm_vmm_join *join;
352 	int ret = 0;
353 	u64 data;
354 	u32 pdei;
355 
356 	if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
357 		return -ENOMEM;
358 	join->inst = inst;
359 	list_add_tail(&join->head, &vmm->join);
360 
361 	nvkm_kmap(join->inst);
362 	for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
363 		if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
364 			ret = -EINVAL;
365 			break;
366 		}
367 		nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
368 	}
369 	nvkm_done(join->inst);
370 	return ret;
371 }
372 
373 static const struct nvkm_vmm_func
374 nv50_vmm = {
375 	.join = nv50_vmm_join,
376 	.part = nv50_vmm_part,
377 	.valid = nv50_vmm_valid,
378 	.flush = nv50_vmm_flush,
379 	.page_block = 1 << 29,
380 	.page = {
381 		{ 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
382 		{ 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
383 		{}
384 	}
385 };
386 
387 int
nv50_vmm_new(struct nvkm_mmu * mmu,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)388 nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
389 	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
390 	     struct nvkm_vmm **pvmm)
391 {
392 	return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
393 			     argv, argc, key, name, pvmm);
394 }
395