xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_mem.c (revision 78608607bfda87a5eb67d9ffbf229aa5e4f8ccad)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_mem.c,v 1.8 2022/05/31 20:53:35 mrg Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_mem.c,v 1.8 2022/05/31 20:53:35 mrg Exp $");
26 
27 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
28 #include "mem.h"
29 
30 #include <core/memory.h>
31 
32 #include <nvif/if000a.h>
33 #include <nvif/unpack.h>
34 
35 #include <linux/nbsd-namespace.h>
36 
37 struct nvkm_mem {
38 	struct nvkm_memory memory;
39 	enum nvkm_memory_target target;
40 	struct nvkm_mmu *mmu;
41 	u64 pages;
42 #ifdef __NetBSD__
43 	bus_dma_segment_t *mem;
44 	int nseg;
45 	bus_dmamap_t dmamap;
46 	bus_addr_t *dma;
47 #else
48 	struct page **mem;
49 	union {
50 		struct scatterlist *sgl;
51 		dma_addr_t *dma;
52 	};
53 #endif
54 };
55 
56 static enum nvkm_memory_target
nvkm_mem_target(struct nvkm_memory * memory)57 nvkm_mem_target(struct nvkm_memory *memory)
58 {
59 	return nvkm_mem(memory)->target;
60 }
61 
62 static u8
nvkm_mem_page(struct nvkm_memory * memory)63 nvkm_mem_page(struct nvkm_memory *memory)
64 {
65 	return PAGE_SHIFT;
66 }
67 
68 static u64
nvkm_mem_addr(struct nvkm_memory * memory)69 nvkm_mem_addr(struct nvkm_memory *memory)
70 {
71 	struct nvkm_mem *mem = nvkm_mem(memory);
72 	if (mem->pages == 1 && mem->mem)
73 		return mem->dma[0];
74 	return ~0ULL;
75 }
76 
77 static u64
nvkm_mem_size(struct nvkm_memory * memory)78 nvkm_mem_size(struct nvkm_memory *memory)
79 {
80 	return nvkm_mem(memory)->pages << PAGE_SHIFT;
81 }
82 
83 static int
nvkm_mem_map_dma(struct nvkm_memory * memory,u64 offset,struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc)84 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
85 		 struct nvkm_vma *vma, void *argv, u32 argc)
86 {
87 	struct nvkm_mem *mem = nvkm_mem(memory);
88 	struct nvkm_vmm_map map = {
89 		.memory = &mem->memory,
90 		.offset = offset,
91 		.dma = mem->dma,
92 	};
93 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
94 }
95 
96 static void *
nvkm_mem_dtor(struct nvkm_memory * memory)97 nvkm_mem_dtor(struct nvkm_memory *memory)
98 {
99 	struct nvkm_mem *mem = nvkm_mem(memory);
100 #ifdef __NetBSD__
101 	if (mem->dma) {
102 		kmem_free(mem->dma, mem->nseg * sizeof(mem->dma[0]));
103 	}
104 	if (mem->mem) {
105 		struct nvkm_device *device = mem->mmu->subdev.device;
106 		bus_dma_tag_t dmat = device->func->dma_tag(device);
107 
108 		bus_dmamap_unload(dmat, mem->dmamap);
109 		bus_dmamem_free(dmat, mem->mem, mem->nseg);
110 		bus_dmamap_destroy(dmat, mem->dmamap);
111 		kmem_free(mem->mem, mem->pages * sizeof(mem->mem[0]));
112 	}
113 #else
114 	if (mem->mem) {
115 		while (mem->pages--) {
116 			dma_unmap_page(mem->mmu->subdev.device->dev,
117 				       mem->dma[mem->pages], PAGE_SIZE,
118 				       DMA_BIDIRECTIONAL);
119 			__free_page(mem->mem[mem->pages]);
120 		}
121 		kvfree(mem->dma);
122 		kvfree(mem->mem);
123 	}
124 #endif
125 	return mem;
126 }
127 
128 static const struct nvkm_memory_func
129 nvkm_mem_dma = {
130 	.dtor = nvkm_mem_dtor,
131 	.target = nvkm_mem_target,
132 	.page = nvkm_mem_page,
133 	.addr = nvkm_mem_addr,
134 	.size = nvkm_mem_size,
135 	.map = nvkm_mem_map_dma,
136 };
137 
138 #ifndef __NetBSD__
139 static int
nvkm_mem_map_sgl(struct nvkm_memory * memory,u64 offset,struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc)140 nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
141 		 struct nvkm_vma *vma, void *argv, u32 argc)
142 {
143 	struct nvkm_mem *mem = nvkm_mem(memory);
144 	struct nvkm_vmm_map map = {
145 		.memory = &mem->memory,
146 		.offset = offset,
147 		.sgl = mem->sgl,
148 	};
149 	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
150 }
151 
152 static const struct nvkm_memory_func
153 nvkm_mem_sgl = {
154 	.dtor = nvkm_mem_dtor,
155 	.target = nvkm_mem_target,
156 	.page = nvkm_mem_page,
157 	.addr = nvkm_mem_addr,
158 	.size = nvkm_mem_size,
159 	.map = nvkm_mem_map_sgl,
160 };
161 #endif
162 
163 int
164 #ifdef __NetBSD__
nvkm_mem_map_host(struct nvkm_memory * memory,bus_dma_tag_t * tagp,void ** pmap,bus_size_t * sizep)165 nvkm_mem_map_host(struct nvkm_memory *memory, bus_dma_tag_t *tagp, void **pmap,
166     bus_size_t *sizep)
167 #else
168 nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
169 #endif
170 {
171 	struct nvkm_mem *mem = nvkm_mem(memory);
172 	if (mem->mem) {
173 #ifdef __NetBSD__
174 		struct nvkm_device *device = mem->mmu->subdev.device;
175 		bus_dma_tag_t dmat = device->func->dma_tag(device);
176 		/* XXX errno NetBSD->Linux */
177 		int ret = -bus_dmamem_map(dmat, mem->mem, mem->nseg,
178 		    mem->pages << PAGE_SHIFT, pmap, BUS_DMA_WAITOK);
179 		if (ret) {
180 			*pmap = NULL;
181 			return ret;
182 		}
183 		*tagp = dmat;
184 		*sizep = mem->pages << PAGE_SHIFT;
185 		return 0;
186 #else
187 		*pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
188 #endif
189 		return *pmap ? 0 : -EFAULT;
190 	}
191 	return -EINVAL;
192 }
193 
194 static int
nvkm_mem_new_host(struct nvkm_mmu * mmu,int type,u8 page,u64 size,void * argv,u32 argc,struct nvkm_memory ** pmemory)195 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
196 		  void *argv, u32 argc, struct nvkm_memory **pmemory)
197 {
198 	struct device *dev = mmu->subdev.device->dev;
199 	union {
200 		struct nvif_mem_ram_vn vn;
201 		struct nvif_mem_ram_v0 v0;
202 	} *args = argv;
203 	int ret = -ENOSYS;
204 	enum nvkm_memory_target target;
205 	struct nvkm_mem *mem;
206 	gfp_t gfp = GFP_USER | __GFP_ZERO;
207 
208 	if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
209 	    !(mmu->type[type].type & NVKM_MEM_UNCACHED))
210 		target = NVKM_MEM_TARGET_HOST;
211 	else
212 		target = NVKM_MEM_TARGET_NCOH;
213 
214 	if (page != PAGE_SHIFT)
215 		return -EINVAL;
216 
217 	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
218 		return -ENOMEM;
219 	mem->target = target;
220 	mem->mmu = mmu;
221 	*pmemory = &mem->memory;
222 
223 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
224 		if (args->v0.dma) {
225 			nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
226 #ifndef __NetBSD__
227 			mem->dma = args->v0.dma;
228 #else
229 			mem->dmamap = args->v0.dma;
230 			mem->nseg = mem->dmamap->dm_nsegs;
231 			mem->dma = kmem_zalloc(mem->dmamap->dm_nsegs *
232 			    sizeof(mem->dma[0]), KM_SLEEP);
233 			for (unsigned i = 0; i < mem->dmamap->dm_nsegs; i++) {
234 				KASSERT(mem->dmamap->dm_segs[i].ds_len <=
235 				    PAGE_SIZE);
236 				mem->dma[i] = mem->dmamap->dm_segs[i].ds_addr;
237 			}
238 #endif
239 		} else {
240 #ifdef __NetBSD__
241 			return -ENODEV;
242 #else
243 			nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
244 			mem->sgl = args->v0.sgl;
245 #endif
246 		}
247 
248 		if (!IS_ALIGNED(size, PAGE_SIZE))
249 			return -EINVAL;
250 		mem->pages = size >> PAGE_SHIFT;
251 #ifdef __NetBSD__
252 		KASSERT(mem->pages == mem->nseg);
253 #endif
254 		return 0;
255 	} else
256 	if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
257 		kfree(mem);
258 		return ret;
259 	}
260 
261 	nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
262 	size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
263 
264 #ifdef __NetBSD__
265 	__USE(gfp);
266 	__USE(dev);
267 	struct nvkm_device *device = mem->mmu->subdev.device;
268 	bus_dma_tag_t dmat = device->func->dma_tag(device);
269 	mem->mem = kmem_zalloc(size * sizeof(mem->mem[0]), KM_SLEEP);
270 	/* XXX errno NetBSD->Linux */
271 	ret = -bus_dmamem_alloc(dmat, size << PAGE_SHIFT, PAGE_SIZE, PAGE_SIZE,
272 	    mem->mem, size, &mem->nseg, BUS_DMA_WAITOK);
273 	if (ret) {
274 fail0:		kmem_free(mem->mem, size * sizeof(mem->mem[0]));
275 		return ret;
276 	}
277 	/* XXX errno NetBSD->Linux */
278 	ret = -bus_dmamap_create(dmat, size << PAGE_SHIFT, mem->nseg,
279 	    PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK, &mem->dmamap);
280 	if (ret) {
281 fail1:		bus_dmamem_free(dmat, mem->mem, mem->nseg);
282 		goto fail0;
283 	}
284 	/* XXX errno NetBSD->Linux */
285 	ret = -bus_dmamap_load_raw(dmat, mem->dmamap, mem->mem, mem->nseg,
286 	    size << PAGE_SHIFT, BUS_DMA_WAITOK);
287 	if (ret) {
288 fail2: __unused
289 		bus_dmamap_destroy(dmat, mem->dmamap);
290 		goto fail1;
291 	}
292 	mem->dma = kmem_zalloc(mem->dmamap->dm_nsegs * sizeof(mem->dma[0]),
293 	    KM_SLEEP);
294 	for (unsigned i = 0; i < mem->dmamap->dm_nsegs; i++) {
295 		KASSERT(mem->dmamap->dm_segs[i].ds_len <= PAGE_SIZE);
296 		mem->dma[i] = mem->dmamap->dm_segs[i].ds_addr;
297 	}
298 	mem->pages = size;
299 	KASSERT(mem->pages == mem->nseg);
300 #else
301 	if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL)))
302 		return -ENOMEM;
303 	if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL)))
304 		return -ENOMEM;
305 
306 	if (mmu->dma_bits > 32)
307 		gfp |= GFP_HIGHUSER;
308 	else
309 		gfp |= GFP_DMA32;
310 
311 	for (mem->pages = 0; size; size--, mem->pages++) {
312 		struct page *p = alloc_page(gfp);
313 		if (!p)
314 			return -ENOMEM;
315 
316 		mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
317 						    p, 0, PAGE_SIZE,
318 						    DMA_BIDIRECTIONAL);
319 		if (dma_mapping_error(dev, mem->dma[mem->pages])) {
320 			__free_page(p);
321 			return -ENOMEM;
322 		}
323 
324 		mem->mem[mem->pages] = p;
325 	}
326 #endif
327 
328 	return 0;
329 }
330 
331 int
nvkm_mem_new_type(struct nvkm_mmu * mmu,int type,u8 page,u64 size,void * argv,u32 argc,struct nvkm_memory ** pmemory)332 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
333 		  void *argv, u32 argc, struct nvkm_memory **pmemory)
334 {
335 	struct nvkm_memory *memory = NULL;
336 	int ret;
337 
338 	if (mmu->type[type].type & NVKM_MEM_VRAM) {
339 		ret = mmu->func->mem.vram(mmu, type, page, size,
340 					  argv, argc, &memory);
341 	} else {
342 		ret = nvkm_mem_new_host(mmu, type, page, size,
343 					argv, argc, &memory);
344 	}
345 
346 	if (ret)
347 		nvkm_memory_unref(&memory);
348 	*pmemory = memory;
349 	return ret;
350 }
351