xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_vmmnv41.c (revision 7649e88fcfe6a7c92de68bd5e592dec3e35224fb)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_vmmnv41.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_vmmnv41.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $");
26 
27 #include "vmm.h"
28 
29 #include <subdev/timer.h>
30 
31 static void __unused
nv41_vmm_pgt_pte(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map,u64 addr)32 nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
33 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
34 {
35 	u32 data = (addr >> 7) | 0x00000001; /* VALID. */
36 	while (ptes--) {
37 		VMM_WO032(pt, vmm, ptei++ * 4, data);
38 		data += 0x00000020;
39 	}
40 }
41 
42 #ifndef __NetBSD__
43 static void
nv41_vmm_pgt_sgl(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)44 nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
45 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
46 {
47 	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
48 }
49 #endif
50 
51 static void
nv41_vmm_pgt_dma(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)52 nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
53 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
54 {
55 #if PAGE_SHIFT == 12
56 	nvkm_kmap(pt->memory);
57 	while (ptes--) {
58 		const u32 data = (*map->dma++ >> 7) | 0x00000001;
59 		VMM_WO032(pt, vmm, ptei++ * 4, data);
60 	}
61 	nvkm_done(pt->memory);
62 #else
63 	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
64 #endif
65 }
66 
67 static void
nv41_vmm_pgt_unmap(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes)68 nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
69 		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
70 {
71 	VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
72 }
73 
74 static const struct nvkm_vmm_desc_func
75 nv41_vmm_desc_pgt = {
76 	.unmap = nv41_vmm_pgt_unmap,
77 	.dma = nv41_vmm_pgt_dma,
78 #ifndef __NetBSD__
79 	.sgl = nv41_vmm_pgt_sgl,
80 #endif
81 };
82 
83 static const struct nvkm_vmm_desc
84 nv41_vmm_desc_12[] = {
85 	{ PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
86 	{}
87 };
88 
89 static void
nv41_vmm_flush(struct nvkm_vmm * vmm,int level)90 nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
91 {
92 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
93 	struct nvkm_device *device = subdev->device;
94 
95 	mutex_lock(&subdev->mutex);
96 	nvkm_wr32(device, 0x100810, 0x00000022);
97 	nvkm_msec(device, 2000,
98 		if (nvkm_rd32(device, 0x100810) & 0x00000020)
99 			break;
100 	);
101 	nvkm_wr32(device, 0x100810, 0x00000000);
102 	mutex_unlock(&subdev->mutex);
103 }
104 
105 static const struct nvkm_vmm_func
106 nv41_vmm = {
107 	.valid = nv04_vmm_valid,
108 	.flush = nv41_vmm_flush,
109 	.page = {
110 		{ 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
111 		{}
112 	}
113 };
114 
115 int
nv41_vmm_new(struct nvkm_mmu * mmu,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)116 nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
117 	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
118 	     struct nvkm_vmm **pvmm)
119 {
120 	return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
121 			     argv, argc, key, name, pvmm);
122 }
123