xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvif/nouveau_nvif_vmm.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvif_vmm.c,v 1.2 2021/12/18 23:45:33 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvif_vmm.c,v 1.2 2021/12/18 23:45:33 riastradh Exp $");
26 
27 #include <nvif/vmm.h>
28 #include <nvif/mem.h>
29 
30 #include <nvif/if000c.h>
31 
32 int
nvif_vmm_unmap(struct nvif_vmm * vmm,u64 addr)33 nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
34 {
35 	return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
36 				&(struct nvif_vmm_unmap_v0) { .addr = addr },
37 				sizeof(struct nvif_vmm_unmap_v0));
38 }
39 
40 int
nvif_vmm_map(struct nvif_vmm * vmm,u64 addr,u64 size,void * argv,u32 argc,struct nvif_mem * mem,u64 offset)41 nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
42 	     struct nvif_mem *mem, u64 offset)
43 {
44 	struct nvif_vmm_map_v0 *args;
45 	u8 stack[48];
46 	int ret;
47 
48 	if (sizeof(*args) + argc > sizeof(stack)) {
49 		if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
50 			return -ENOMEM;
51 	} else {
52 		args = (void *)stack;
53 	}
54 
55 	args->version = 0;
56 	args->addr = addr;
57 	args->size = size;
58 	args->memory = nvif_handle(&mem->object);
59 	args->offset = offset;
60 	memcpy(args->data, argv, argc);
61 
62 	ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
63 			       args, sizeof(*args) + argc);
64 	if (args != (void *)stack)
65 		kfree(args);
66 	return ret;
67 }
68 
69 void
nvif_vmm_put(struct nvif_vmm * vmm,struct nvif_vma * vma)70 nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
71 {
72 	if (vma->size) {
73 		WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
74 					 &(struct nvif_vmm_put_v0) {
75 						.addr = vma->addr,
76 					 }, sizeof(struct nvif_vmm_put_v0)));
77 		vma->size = 0;
78 	}
79 }
80 
81 int
nvif_vmm_get(struct nvif_vmm * vmm,enum nvif_vmm_get type,bool sparse,u8 page,u8 align,u64 size,struct nvif_vma * vma)82 nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
83 	     u8 page, u8 align, u64 size, struct nvif_vma *vma)
84 {
85 	struct nvif_vmm_get_v0 args;
86 	int ret;
87 
88 	args.version = vma->size = 0;
89 	args.sparse = sparse;
90 	args.page = page;
91 	args.align = align;
92 	args.size = size;
93 
94 	switch (type) {
95 	case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
96 	case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
97 	case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
98 	default:
99 		WARN_ON(1);
100 		return -EINVAL;
101 	}
102 
103 	ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
104 			       &args, sizeof(args));
105 	if (ret == 0) {
106 		vma->addr = args.addr;
107 		vma->size = args.size;
108 	}
109 	return ret;
110 }
111 
112 void
nvif_vmm_fini(struct nvif_vmm * vmm)113 nvif_vmm_fini(struct nvif_vmm *vmm)
114 {
115 	kfree(vmm->page);
116 	nvif_object_fini(&vmm->object);
117 }
118 
119 int
nvif_vmm_init(struct nvif_mmu * mmu,s32 oclass,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct nvif_vmm * vmm)120 nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
121 	      u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
122 {
123 	struct nvif_vmm_v0 *args;
124 	u32 argn = sizeof(*args) + argc;
125 	int ret = -ENOSYS, i;
126 
127 	vmm->object.client = NULL;
128 	vmm->page = NULL;
129 
130 	if (!(args = kmalloc(argn, GFP_KERNEL)))
131 		return -ENOMEM;
132 	args->version = 0;
133 	args->managed = managed;
134 	args->addr = addr;
135 	args->size = size;
136 	memcpy(args->data, argv, argc);
137 
138 	ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
139 			       &vmm->object);
140 	if (ret)
141 		goto done;
142 
143 	vmm->start = args->addr;
144 	vmm->limit = args->size;
145 
146 	vmm->page_nr = args->page_nr;
147 	vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
148 				  GFP_KERNEL);
149 	if (!vmm->page) {
150 		ret = -ENOMEM;
151 		goto done;
152 	}
153 
154 	for (i = 0; i < vmm->page_nr; i++) {
155 		struct nvif_vmm_page_v0 args = { .index = i };
156 
157 		ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
158 				       &args, sizeof(args));
159 		if (ret)
160 			break;
161 
162 		vmm->page[i].shift = args.shift;
163 		vmm->page[i].sparse = args.sparse;
164 		vmm->page[i].vram = args.vram;
165 		vmm->page[i].host = args.host;
166 		vmm->page[i].comp = args.comp;
167 	}
168 
169 done:
170 	if (ret)
171 		nvif_vmm_fini(vmm);
172 	kfree(args);
173 	return ret;
174 }
175