xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_ummu.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_ummu.c,v 1.2 2021/12/18 23:45:41 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_ummu.c,v 1.2 2021/12/18 23:45:41 riastradh Exp $");
26 
27 #include "ummu.h"
28 #include "umem.h"
29 #include "uvmm.h"
30 
31 #include <core/client.h>
32 
33 #include <nvif/if0008.h>
34 #include <nvif/unpack.h>
35 
36 static int
nvkm_ummu_sclass(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)37 nvkm_ummu_sclass(struct nvkm_object *object, int index,
38 		 struct nvkm_oclass *oclass)
39 {
40 	struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
41 
42 	if (mmu->func->mem.user.oclass && oclass->client->super) {
43 		if (index-- == 0) {
44 			oclass->base = mmu->func->mem.user;
45 			oclass->ctor = nvkm_umem_new;
46 			return 0;
47 		}
48 	}
49 
50 	if (mmu->func->vmm.user.oclass) {
51 		if (index-- == 0) {
52 			oclass->base = mmu->func->vmm.user;
53 			oclass->ctor = nvkm_uvmm_new;
54 			return 0;
55 		}
56 	}
57 
58 	return -EINVAL;
59 }
60 
61 static int
nvkm_ummu_heap(struct nvkm_ummu * ummu,void * argv,u32 argc)62 nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
63 {
64 	struct nvkm_mmu *mmu = ummu->mmu;
65 	union {
66 		struct nvif_mmu_heap_v0 v0;
67 	} *args = argv;
68 	int ret = -ENOSYS;
69 	u8 index;
70 
71 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
72 		if ((index = args->v0.index) >= mmu->heap_nr)
73 			return -EINVAL;
74 		args->v0.size = mmu->heap[index].size;
75 	} else
76 		return ret;
77 
78 	return 0;
79 }
80 
81 static int
nvkm_ummu_type(struct nvkm_ummu * ummu,void * argv,u32 argc)82 nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
83 {
84 	struct nvkm_mmu *mmu = ummu->mmu;
85 	union {
86 		struct nvif_mmu_type_v0 v0;
87 	} *args = argv;
88 	int ret = -ENOSYS;
89 	u8 type, index;
90 
91 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
92 		if ((index = args->v0.index) >= mmu->type_nr)
93 			return -EINVAL;
94 		type = mmu->type[index].type;
95 		args->v0.heap = mmu->type[index].heap;
96 		args->v0.vram = !!(type & NVKM_MEM_VRAM);
97 		args->v0.host = !!(type & NVKM_MEM_HOST);
98 		args->v0.comp = !!(type & NVKM_MEM_COMP);
99 		args->v0.disp = !!(type & NVKM_MEM_DISP);
100 		args->v0.kind = !!(type & NVKM_MEM_KIND);
101 		args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
102 		args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
103 		args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
104 	} else
105 		return ret;
106 
107 	return 0;
108 }
109 
110 static int
nvkm_ummu_kind(struct nvkm_ummu * ummu,void * argv,u32 argc)111 nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
112 {
113 	struct nvkm_mmu *mmu = ummu->mmu;
114 	union {
115 		struct nvif_mmu_kind_v0 v0;
116 	} *args = argv;
117 	const u8 *kind = NULL;
118 	int ret = -ENOSYS, count = 0;
119 	u8 kind_inv = 0;
120 
121 	if (mmu->func->kind)
122 		kind = mmu->func->kind(mmu, &count, &kind_inv);
123 
124 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
125 		if (argc != args->v0.count * sizeof(*args->v0.data))
126 			return -EINVAL;
127 		if (args->v0.count > count)
128 			return -EINVAL;
129 		args->v0.kind_inv = kind_inv;
130 		memcpy(args->v0.data, kind, args->v0.count);
131 	} else
132 		return ret;
133 
134 	return 0;
135 }
136 
137 static int
nvkm_ummu_mthd(struct nvkm_object * object,u32 mthd,void * argv,u32 argc)138 nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
139 {
140 	struct nvkm_ummu *ummu = nvkm_ummu(object);
141 	switch (mthd) {
142 	case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
143 	case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
144 	case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
145 	default:
146 		break;
147 	}
148 	return -EINVAL;
149 }
150 
151 static const struct nvkm_object_func
152 nvkm_ummu = {
153 	.mthd = nvkm_ummu_mthd,
154 	.sclass = nvkm_ummu_sclass,
155 };
156 
157 int
nvkm_ummu_new(struct nvkm_device * device,const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)158 nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
159 	      void *argv, u32 argc, struct nvkm_object **pobject)
160 {
161 	union {
162 		struct nvif_mmu_v0 v0;
163 	} *args = argv;
164 	struct nvkm_mmu *mmu = device->mmu;
165 	struct nvkm_ummu *ummu;
166 	int ret = -ENOSYS, kinds = 0;
167 	u8 unused = 0;
168 
169 	if (mmu->func->kind)
170 		mmu->func->kind(mmu, &kinds, &unused);
171 
172 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
173 		args->v0.dmabits = mmu->dma_bits;
174 		args->v0.heap_nr = mmu->heap_nr;
175 		args->v0.type_nr = mmu->type_nr;
176 		args->v0.kind_nr = kinds;
177 	} else
178 		return ret;
179 
180 	if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
181 		return -ENOMEM;
182 	nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
183 	ummu->mmu = mmu;
184 	*pobject = &ummu->object;
185 	return 0;
186 }
187