xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_vmmnv04.c (revision 7649e88fcfe6a7c92de68bd5e592dec3e35224fb)
1 /*	$NetBSD: nouveau_nvkm_subdev_mmu_vmmnv04.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_vmmnv04.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $");
26 
27 #include "vmm.h"
28 
29 #include <nvif/if000d.h>
30 #include <nvif/unpack.h>
31 
32 static inline void
nv04_vmm_pgt_pte(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map,u64 addr)33 nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
34 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
35 {
36 	u32 data = addr | 0x00000003; /* PRESENT, RW. */
37 	while (ptes--) {
38 		VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
39 		data += 0x00001000;
40 	}
41 }
42 
43 #ifndef __NetBSD__
44 static void
nv04_vmm_pgt_sgl(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)45 nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
46 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
47 {
48 	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
49 }
50 #endif
51 
52 static void
nv04_vmm_pgt_dma(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)53 nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
54 		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
55 {
56 #if PAGE_SHIFT == 12
57 	nvkm_kmap(pt->memory);
58 	while (ptes--)
59 		VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
60 	nvkm_done(pt->memory);
61 #else
62 	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
63 #endif
64 }
65 
66 static void
nv04_vmm_pgt_unmap(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes)67 nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
68 		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
69 {
70 	VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
71 }
72 
73 static const struct nvkm_vmm_desc_func
74 nv04_vmm_desc_pgt = {
75 	.unmap = nv04_vmm_pgt_unmap,
76 	.dma = nv04_vmm_pgt_dma,
77 #ifndef __NetBSD__
78 	.sgl = nv04_vmm_pgt_sgl,
79 #endif
80 };
81 
82 static const struct nvkm_vmm_desc
83 nv04_vmm_desc_12[] = {
84 	{ PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
85 	{}
86 };
87 
88 int
nv04_vmm_valid(struct nvkm_vmm * vmm,void * argv,u32 argc,struct nvkm_vmm_map * map)89 nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
90 	       struct nvkm_vmm_map *map)
91 {
92 	union {
93 		struct nv04_vmm_map_vn vn;
94 	} *args = argv;
95 	int ret = -ENOSYS;
96 	if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
97 		VMM_DEBUG(vmm, "args");
98 	return ret;
99 }
100 
101 static const struct nvkm_vmm_func
102 nv04_vmm = {
103 	.valid = nv04_vmm_valid,
104 	.page = {
105 		{ 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
106 		{}
107 	}
108 };
109 
110 int
nv04_vmm_new_(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 pd_header,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)111 nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
112 	      u32 pd_header, bool managed, u64 addr, u64 size,
113 	      void *argv, u32 argc, struct lock_class_key *key,
114 	      const char *name, struct nvkm_vmm **pvmm)
115 {
116 	union {
117 		struct nv04_vmm_vn vn;
118 	} *args = argv;
119 	int ret;
120 
121 	ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
122 			    key, name, pvmm);
123 	if (ret)
124 		return ret;
125 
126 	return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
127 }
128 
129 int
nv04_vmm_new(struct nvkm_mmu * mmu,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)130 nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
131 	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
132 	     struct nvkm_vmm **pvmm)
133 {
134 	struct nvkm_memory *mem;
135 	struct nvkm_vmm *vmm;
136 	int ret;
137 
138 	ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
139 			    argv, argc, key, name, &vmm);
140 	*pvmm = vmm;
141 	if (ret)
142 		return ret;
143 
144 	mem = vmm->pd->pt[0]->memory;
145 	nvkm_kmap(mem);
146 	nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
147 	nvkm_wo32(mem, 0x00004, vmm->limit - 1);
148 	nvkm_done(mem);
149 	return 0;
150 }
151