xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_uvmm.c (revision 7649e88fcfe6a7c92de68bd5e592dec3e35224fb)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_uvmm.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_uvmm.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $");
26 
27 #include "uvmm.h"
28 #include "umem.h"
29 #include "ummu.h"
30 
31 #include <core/client.h>
32 #include <core/memory.h>
33 
34 #include <nvif/if000c.h>
35 #include <nvif/unpack.h>
36 
37 static const struct nvkm_object_func nvkm_uvmm;
38 struct nvkm_vmm *
nvkm_uvmm_search(struct nvkm_client * client,u64 handle)39 nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
40 {
41 	struct nvkm_object *object;
42 
43 	object = nvkm_object_search(client, handle, &nvkm_uvmm);
44 	if (IS_ERR(object))
45 		return (void *)object;
46 
47 	return nvkm_uvmm(object)->vmm;
48 }
49 
50 static int
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm * uvmm,void * argv,u32 argc)51 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
52 {
53 	struct nvkm_client *client = uvmm->object.client;
54 	union {
55 		struct nvif_vmm_pfnclr_v0 v0;
56 	} *args = argv;
57 	struct nvkm_vmm *vmm = uvmm->vmm;
58 	int ret = -ENOSYS;
59 	u64 addr, size;
60 
61 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
62 		addr = args->v0.addr;
63 		size = args->v0.size;
64 	} else
65 		return ret;
66 
67 	if (!client->super)
68 		return -ENOENT;
69 
70 	if (size) {
71 		mutex_lock(&vmm->mutex);
72 		ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
73 		mutex_unlock(&vmm->mutex);
74 	}
75 
76 	return ret;
77 }
78 
79 static int
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm * uvmm,void * argv,u32 argc)80 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
81 {
82 	struct nvkm_client *client = uvmm->object.client;
83 	union {
84 		struct nvif_vmm_pfnmap_v0 v0;
85 	} *args = argv;
86 	struct nvkm_vmm *vmm = uvmm->vmm;
87 	int ret = -ENOSYS;
88 	u64 addr, size, *phys;
89 	u8  page;
90 
91 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
92 		page = args->v0.page;
93 		addr = args->v0.addr;
94 		size = args->v0.size;
95 		phys = args->v0.phys;
96 		if (argc != (size >> page) * sizeof(args->v0.phys[0]))
97 			return -EINVAL;
98 	} else
99 		return ret;
100 
101 	if (!client->super)
102 		return -ENOENT;
103 
104 	if (size) {
105 		mutex_lock(&vmm->mutex);
106 		ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
107 		mutex_unlock(&vmm->mutex);
108 	}
109 
110 	return ret;
111 }
112 
113 static int
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm * uvmm,void * argv,u32 argc)114 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
115 {
116 	struct nvkm_client *client = uvmm->object.client;
117 	union {
118 		struct nvif_vmm_unmap_v0 v0;
119 	} *args = argv;
120 	struct nvkm_vmm *vmm = uvmm->vmm;
121 	struct nvkm_vma *vma;
122 	int ret = -ENOSYS;
123 	u64 addr;
124 
125 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
126 		addr = args->v0.addr;
127 	} else
128 		return ret;
129 
130 	mutex_lock(&vmm->mutex);
131 	vma = nvkm_vmm_node_search(vmm, addr);
132 	if (ret = -ENOENT, !vma || vma->addr != addr) {
133 		VMM_DEBUG(vmm, "lookup %016"PRIx64": %016"PRIx64"",
134 			  addr, vma ? vma->addr : ~(u64)0);
135 		goto done;
136 	}
137 
138 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
139 		VMM_DEBUG(vmm, "denied %016"PRIx64": %d %d %d", addr,
140 			  vma->user, !client->super, vma->busy);
141 		goto done;
142 	}
143 
144 	if (ret = -EINVAL, !vma->memory) {
145 		VMM_DEBUG(vmm, "unmapped");
146 		goto done;
147 	}
148 
149 	nvkm_vmm_unmap_locked(vmm, vma, false);
150 	ret = 0;
151 done:
152 	mutex_unlock(&vmm->mutex);
153 	return ret;
154 }
155 
156 static int
nvkm_uvmm_mthd_map(struct nvkm_uvmm * uvmm,void * argv,u32 argc)157 nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
158 {
159 	struct nvkm_client *client = uvmm->object.client;
160 	union {
161 		struct nvif_vmm_map_v0 v0;
162 	} *args = argv;
163 	u64 addr, size, handle, offset;
164 	struct nvkm_vmm *vmm = uvmm->vmm;
165 	struct nvkm_vma *vma;
166 	struct nvkm_memory *memory;
167 	int ret = -ENOSYS;
168 
169 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
170 		addr = args->v0.addr;
171 		size = args->v0.size;
172 		handle = args->v0.memory;
173 		offset = args->v0.offset;
174 	} else
175 		return ret;
176 
177 	memory = nvkm_umem_search(client, handle);
178 	if (IS_ERR(memory)) {
179 		VMM_DEBUG(vmm, "memory %016"PRIx64" %ld\n", handle, PTR_ERR(memory));
180 		return PTR_ERR(memory);
181 	}
182 
183 	mutex_lock(&vmm->mutex);
184 	if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
185 		VMM_DEBUG(vmm, "lookup %016"PRIx64"", addr);
186 		goto fail;
187 	}
188 
189 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
190 		VMM_DEBUG(vmm, "denied %016"PRIx64": %d %d %d", addr,
191 			  vma->user, !client->super, vma->busy);
192 		goto fail;
193 	}
194 
195 	if (ret = -EINVAL, vma->mapped && !vma->memory) {
196 		VMM_DEBUG(vmm, "pfnmap %016"PRIx64"", addr);
197 		goto fail;
198 	}
199 
200 	if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
201 		if (addr + size > vma->addr + vma->size || vma->memory ||
202 		    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
203 			VMM_DEBUG(vmm, "split %d %d %d "
204 				       "%016"PRIx64" %016"PRIx64" %016"PRIx64" %016"PRIx64"",
205 				  !!vma->memory, vma->refd, vma->mapref,
206 				  addr, size, vma->addr, (u64)vma->size);
207 			goto fail;
208 		}
209 
210 		vma = nvkm_vmm_node_split(vmm, vma, addr, size);
211 		if (!vma) {
212 			ret = -ENOMEM;
213 			goto fail;
214 		}
215 	}
216 	vma->busy = true;
217 	mutex_unlock(&vmm->mutex);
218 
219 	ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
220 	if (ret == 0) {
221 		/* Successful map will clear vma->busy. */
222 		nvkm_memory_unref(&memory);
223 		return 0;
224 	}
225 
226 	mutex_lock(&vmm->mutex);
227 	vma->busy = false;
228 	nvkm_vmm_unmap_region(vmm, vma);
229 fail:
230 	mutex_unlock(&vmm->mutex);
231 	nvkm_memory_unref(&memory);
232 	return ret;
233 }
234 
235 static int
nvkm_uvmm_mthd_put(struct nvkm_uvmm * uvmm,void * argv,u32 argc)236 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
237 {
238 	struct nvkm_client *client = uvmm->object.client;
239 	union {
240 		struct nvif_vmm_put_v0 v0;
241 	} *args = argv;
242 	struct nvkm_vmm *vmm = uvmm->vmm;
243 	struct nvkm_vma *vma;
244 	int ret = -ENOSYS;
245 	u64 addr;
246 
247 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
248 		addr = args->v0.addr;
249 	} else
250 		return ret;
251 
252 	mutex_lock(&vmm->mutex);
253 	vma = nvkm_vmm_node_search(vmm, args->v0.addr);
254 	if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
255 		VMM_DEBUG(vmm, "lookup %016"PRIx64": %016"PRIx64" %d", addr,
256 			  vma ? vma->addr : ~(u64)0, vma ? vma->part : 0);
257 		goto done;
258 	}
259 
260 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
261 		VMM_DEBUG(vmm, "denied %016"PRIx64": %d %d %d", addr,
262 			  vma->user, !client->super, vma->busy);
263 		goto done;
264 	}
265 
266 	nvkm_vmm_put_locked(vmm, vma);
267 	ret = 0;
268 done:
269 	mutex_unlock(&vmm->mutex);
270 	return ret;
271 }
272 
273 static int
nvkm_uvmm_mthd_get(struct nvkm_uvmm * uvmm,void * argv,u32 argc)274 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
275 {
276 	struct nvkm_client *client = uvmm->object.client;
277 	union {
278 		struct nvif_vmm_get_v0 v0;
279 	} *args = argv;
280 	struct nvkm_vmm *vmm = uvmm->vmm;
281 	struct nvkm_vma *vma;
282 	int ret = -ENOSYS;
283 	bool getref, mapref, sparse;
284 	u8 page, align;
285 	u64 size;
286 
287 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
288 		getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
289 		mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
290 		sparse = args->v0.sparse;
291 		page = args->v0.page;
292 		align = args->v0.align;
293 		size = args->v0.size;
294 	} else
295 		return ret;
296 
297 	mutex_lock(&vmm->mutex);
298 	ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
299 				  page, align, size, &vma);
300 	mutex_unlock(&vmm->mutex);
301 	if (ret)
302 		return ret;
303 
304 	args->v0.addr = vma->addr;
305 	vma->user = !client->super;
306 	return ret;
307 }
308 
309 static int
nvkm_uvmm_mthd_page(struct nvkm_uvmm * uvmm,void * argv,u32 argc)310 nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
311 {
312 	union {
313 		struct nvif_vmm_page_v0 v0;
314 	} *args = argv;
315 	const struct nvkm_vmm_page *page;
316 	int ret = -ENOSYS;
317 	u8 type, index, nr;
318 
319 	page = uvmm->vmm->func->page;
320 	for (nr = 0; page[nr].shift; nr++);
321 
322 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
323 		if ((index = args->v0.index) >= nr)
324 			return -EINVAL;
325 		type = page[index].type;
326 		args->v0.shift = page[index].shift;
327 		args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
328 		args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
329 		args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
330 		args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
331 	} else
332 		return -ENOSYS;
333 
334 	return 0;
335 }
336 
337 static int
nvkm_uvmm_mthd(struct nvkm_object * object,u32 mthd,void * argv,u32 argc)338 nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
339 {
340 	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
341 	switch (mthd) {
342 	case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
343 	case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
344 	case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
345 	case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
346 	case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
347 	case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
348 	case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
349 	case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
350 		if (uvmm->vmm->func->mthd) {
351 			return uvmm->vmm->func->mthd(uvmm->vmm,
352 						     uvmm->object.client,
353 						     mthd, argv, argc);
354 		}
355 		break;
356 	default:
357 		break;
358 	}
359 	return -EINVAL;
360 }
361 
362 static void *
nvkm_uvmm_dtor(struct nvkm_object * object)363 nvkm_uvmm_dtor(struct nvkm_object *object)
364 {
365 	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
366 	nvkm_vmm_unref(&uvmm->vmm);
367 	return uvmm;
368 }
369 
370 static const struct nvkm_object_func
371 nvkm_uvmm = {
372 	.dtor = nvkm_uvmm_dtor,
373 	.mthd = nvkm_uvmm_mthd,
374 };
375 
376 int
nvkm_uvmm_new(const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)377 nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
378 	      struct nvkm_object **pobject)
379 {
380 	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
381 	const bool more = oclass->base.maxver >= 0;
382 	union {
383 		struct nvif_vmm_v0 v0;
384 	} *args = argv;
385 	const struct nvkm_vmm_page *page;
386 	struct nvkm_uvmm *uvmm;
387 	int ret = -ENOSYS;
388 	u64 addr, size;
389 	bool managed;
390 
391 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
392 		managed = args->v0.managed != 0;
393 		addr = args->v0.addr;
394 		size = args->v0.size;
395 	} else
396 		return ret;
397 
398 	if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
399 		return -ENOMEM;
400 	nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
401 	*pobject = &uvmm->object;
402 
403 	if (!mmu->vmm) {
404 		ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
405 					  NULL, "user", &uvmm->vmm);
406 		if (ret)
407 			return ret;
408 
409 		uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
410 	} else {
411 		if (size)
412 			return -EINVAL;
413 
414 		uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
415 	}
416 
417 	page = uvmm->vmm->func->page;
418 	args->v0.page_nr = 0;
419 	while (page && (page++)->shift)
420 		args->v0.page_nr++;
421 	args->v0.addr = uvmm->vmm->start;
422 	args->v0.size = uvmm->vmm->limit;
423 	return 0;
424 }
425