1 /* $NetBSD: nouveau_nvkm_subdev_mmu_vmmgf100.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2017 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_mmu_vmmgf100.c,v 1.3 2021/12/19 10:51:58 riastradh Exp $");
26
27 #include "vmm.h"
28
29 #include <subdev/fb.h>
30 #include <subdev/ltc.h>
31 #include <subdev/timer.h>
32
33 #include <nvif/if900d.h>
34 #include <nvif/unpack.h>
35
36 #include <linux/nbsd-namespace.h>
37
38 static inline void
gf100_vmm_pgt_pte(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map,u64 addr)39 gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
40 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
41 {
42 u64 base = (addr >> 8) | map->type;
43 u64 data = base;
44
45 if (map->ctag && !(map->next & (1ULL << 44))) {
46 while (ptes--) {
47 data = base | ((map->ctag >> 1) << 44);
48 if (!(map->ctag++ & 1))
49 data |= BIT_ULL(60);
50
51 VMM_WO064(pt, vmm, ptei++ * 8, data);
52 base += map->next;
53 }
54 } else {
55 map->type += ptes * map->ctag;
56
57 while (ptes--) {
58 VMM_WO064(pt, vmm, ptei++ * 8, data);
59 data += map->next;
60 }
61 }
62 }
63
64 #ifndef __NetBSD__
65 void
gf100_vmm_pgt_sgl(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)66 gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
67 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
68 {
69 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
70 }
71 #endif
72
73 void
gf100_vmm_pgt_dma(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)74 gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
75 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
76 {
77 if (map->page->shift == PAGE_SHIFT) {
78 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
79 nvkm_kmap(pt->memory);
80 while (ptes--) {
81 const u64 data = (*map->dma++ >> 8) | map->type;
82 VMM_WO064(pt, vmm, ptei++ * 8, data);
83 map->type += map->ctag;
84 }
85 nvkm_done(pt->memory);
86 return;
87 }
88
89 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
90 }
91
92 void
gf100_vmm_pgt_mem(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes,struct nvkm_vmm_map * map)93 gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
94 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
95 {
96 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
97 }
98
99 void
gf100_vmm_pgt_unmap(struct nvkm_vmm * vmm,struct nvkm_mmu_pt * pt,u32 ptei,u32 ptes)100 gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
101 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
102 {
103 VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
104 }
105
106 const struct nvkm_vmm_desc_func
107 gf100_vmm_pgt = {
108 .unmap = gf100_vmm_pgt_unmap,
109 .mem = gf100_vmm_pgt_mem,
110 .dma = gf100_vmm_pgt_dma,
111 #ifndef __NetBSD__
112 .sgl = gf100_vmm_pgt_sgl,
113 #endif
114 };
115
116 void
gf100_vmm_pgd_pde(struct nvkm_vmm * vmm,struct nvkm_vmm_pt * pgd,u32 pdei)117 gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
118 {
119 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
120 struct nvkm_mmu_pt *pd = pgd->pt[0];
121 struct nvkm_mmu_pt *pt;
122 u64 data = 0;
123
124 if ((pt = pgt->pt[0])) {
125 switch (nvkm_memory_target(pt->memory)) {
126 case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
127 case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
128 data |= BIT_ULL(35); /* VOL */
129 break;
130 case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
131 default:
132 WARN_ON(1);
133 return;
134 }
135 data |= pt->addr >> 8;
136 }
137
138 if ((pt = pgt->pt[1])) {
139 switch (nvkm_memory_target(pt->memory)) {
140 case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
141 case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
142 data |= BIT_ULL(34); /* VOL */
143 break;
144 case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
145 default:
146 WARN_ON(1);
147 return;
148 }
149 data |= pt->addr << 24;
150 }
151
152 nvkm_kmap(pd->memory);
153 VMM_WO064(pd, vmm, pdei * 8, data);
154 nvkm_done(pd->memory);
155 }
156
157 const struct nvkm_vmm_desc_func
158 gf100_vmm_pgd = {
159 .unmap = gf100_vmm_pgt_unmap,
160 .pde = gf100_vmm_pgd_pde,
161 };
162
163 static const struct nvkm_vmm_desc
164 gf100_vmm_desc_17_12[] = {
165 { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
166 { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
167 {}
168 };
169
170 static const struct nvkm_vmm_desc
171 gf100_vmm_desc_17_17[] = {
172 { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
173 { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
174 {}
175 };
176
177 static const struct nvkm_vmm_desc
178 gf100_vmm_desc_16_12[] = {
179 { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
180 { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
181 {}
182 };
183
184 static const struct nvkm_vmm_desc
185 gf100_vmm_desc_16_16[] = {
186 { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
187 { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
188 {}
189 };
190
191 void
gf100_vmm_invalidate_pdb(struct nvkm_vmm * vmm,u64 addr)192 gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
193 {
194 struct nvkm_device *device = vmm->mmu->subdev.device;
195 nvkm_wr32(device, 0x100cb8, addr);
196 }
197
198 void
gf100_vmm_invalidate(struct nvkm_vmm * vmm,u32 type)199 gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
200 {
201 struct nvkm_subdev *subdev = &vmm->mmu->subdev;
202 struct nvkm_device *device = subdev->device;
203 struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
204 u64 addr = 0;
205
206 mutex_lock(&subdev->mutex);
207 /* Looks like maybe a "free flush slots" counter, the
208 * faster you write to 0x100cbc to more it decreases.
209 */
210 nvkm_msec(device, 2000,
211 if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
212 break;
213 );
214
215 if (!(type & 0x00000002) /* ALL_PDB. */) {
216 switch (nvkm_memory_target(pd->memory)) {
217 case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break;
218 case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break;
219 case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break;
220 default:
221 WARN_ON(1);
222 break;
223 }
224 addr |= (vmm->pd->pt[0]->addr >> 12) << 4;
225
226 vmm->func->invalidate_pdb(vmm, addr);
227 }
228
229 nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
230
231 /* Wait for flush to be queued? */
232 nvkm_msec(device, 2000,
233 if (nvkm_rd32(device, 0x100c80) & 0x00008000)
234 break;
235 );
236 mutex_unlock(&subdev->mutex);
237 }
238
239 void
gf100_vmm_flush(struct nvkm_vmm * vmm,int depth)240 gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
241 {
242 u32 type = 0x00000001; /* PAGE_ALL */
243 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
244 type |= 0x00000004; /* HUB_ONLY */
245 gf100_vmm_invalidate(vmm, type);
246 }
247
248 int
gf100_vmm_valid(struct nvkm_vmm * vmm,void * argv,u32 argc,struct nvkm_vmm_map * map)249 gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
250 struct nvkm_vmm_map *map)
251 {
252 const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
253 const struct nvkm_vmm_page *page = map->page;
254 const bool gm20x = page->desc->func->sparse != NULL;
255 union {
256 struct gf100_vmm_map_vn vn;
257 struct gf100_vmm_map_v0 v0;
258 } *args = argv;
259 struct nvkm_device *device = vmm->mmu->subdev.device;
260 struct nvkm_memory *memory = map->memory;
261 u8 kind, kind_inv, priv, ro, vol;
262 int kindn, aper, ret = -ENOSYS;
263 const u8 *kindm;
264
265 map->next = (1 << page->shift) >> 8;
266 map->type = map->ctag = 0;
267
268 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
269 vol = !!args->v0.vol;
270 ro = !!args->v0.ro;
271 priv = !!args->v0.priv;
272 kind = args->v0.kind;
273 } else
274 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
275 vol = target == NVKM_MEM_TARGET_HOST;
276 ro = 0;
277 priv = 0;
278 kind = 0x00;
279 } else {
280 VMM_DEBUG(vmm, "args");
281 return ret;
282 }
283
284 aper = vmm->func->aper(target);
285 if (WARN_ON(aper < 0))
286 return aper;
287
288 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
289 if (kind >= kindn || kindm[kind] == kind_inv) {
290 VMM_DEBUG(vmm, "kind %02x", kind);
291 return -EINVAL;
292 }
293
294 if (kindm[kind] != kind) {
295 u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
296 u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
297 if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
298 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
299 return -EINVAL;
300 }
301
302 ret = nvkm_memory_tags_get(memory, device, tags,
303 nvkm_ltc_tags_clear,
304 &map->tags);
305 if (ret) {
306 VMM_DEBUG(vmm, "comp %d", ret);
307 return ret;
308 }
309
310 if (map->tags->mn) {
311 u64 tags = map->tags->mn->offset + (map->offset >> 17);
312 if (page->shift == 17 || !gm20x) {
313 map->type |= tags << 44;
314 map->ctag |= 1ULL << 44;
315 map->next |= 1ULL << 44;
316 } else {
317 map->ctag |= tags << 1 | 1;
318 }
319 } else {
320 kind = kindm[kind];
321 }
322 }
323
324 map->type |= BIT(0);
325 map->type |= (u64)priv << 1;
326 map->type |= (u64) ro << 2;
327 map->type |= (u64) vol << 32;
328 map->type |= (u64)aper << 33;
329 map->type |= (u64)kind << 36;
330 return 0;
331 }
332
333 int
gf100_vmm_aper(enum nvkm_memory_target target)334 gf100_vmm_aper(enum nvkm_memory_target target)
335 {
336 switch (target) {
337 case NVKM_MEM_TARGET_VRAM: return 0;
338 case NVKM_MEM_TARGET_HOST: return 2;
339 case NVKM_MEM_TARGET_NCOH: return 3;
340 default:
341 return -EINVAL;
342 }
343 }
344
345 void
gf100_vmm_part(struct nvkm_vmm * vmm,struct nvkm_memory * inst)346 gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
347 {
348 nvkm_fo64(inst, 0x0200, 0x00000000, 2);
349 }
350
351 int
gf100_vmm_join_(struct nvkm_vmm * vmm,struct nvkm_memory * inst,u64 base)352 gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
353 {
354 struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
355
356 switch (nvkm_memory_target(pd->memory)) {
357 case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
358 case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
359 base |= BIT_ULL(2) /* VOL. */;
360 break;
361 case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
362 default:
363 WARN_ON(1);
364 return -EINVAL;
365 }
366 base |= pd->addr;
367
368 nvkm_kmap(inst);
369 nvkm_wo64(inst, 0x0200, base);
370 nvkm_wo64(inst, 0x0208, vmm->limit - 1);
371 nvkm_done(inst);
372 return 0;
373 }
374
375 int
gf100_vmm_join(struct nvkm_vmm * vmm,struct nvkm_memory * inst)376 gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
377 {
378 return gf100_vmm_join_(vmm, inst, 0);
379 }
380
381 static const struct nvkm_vmm_func
382 gf100_vmm_17 = {
383 .join = gf100_vmm_join,
384 .part = gf100_vmm_part,
385 .aper = gf100_vmm_aper,
386 .valid = gf100_vmm_valid,
387 .flush = gf100_vmm_flush,
388 .invalidate_pdb = gf100_vmm_invalidate_pdb,
389 .page = {
390 { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
391 { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
392 {}
393 }
394 };
395
396 static const struct nvkm_vmm_func
397 gf100_vmm_16 = {
398 .join = gf100_vmm_join,
399 .part = gf100_vmm_part,
400 .aper = gf100_vmm_aper,
401 .valid = gf100_vmm_valid,
402 .flush = gf100_vmm_flush,
403 .invalidate_pdb = gf100_vmm_invalidate_pdb,
404 .page = {
405 { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
406 { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
407 {}
408 }
409 };
410
411 int
gf100_vmm_new_(const struct nvkm_vmm_func * func_16,const struct nvkm_vmm_func * func_17,struct nvkm_mmu * mmu,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)412 gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
413 const struct nvkm_vmm_func *func_17,
414 struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
415 void *argv, u32 argc, struct lock_class_key *key,
416 const char *name, struct nvkm_vmm **pvmm)
417 {
418 switch (mmu->subdev.device->fb->page) {
419 case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
420 argv, argc, key, name, pvmm);
421 case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
422 argv, argc, key, name, pvmm);
423 default:
424 WARN_ON(1);
425 return -EINVAL;
426 }
427 }
428
429 int
gf100_vmm_new(struct nvkm_mmu * mmu,bool managed,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)430 gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
431 void *argv, u32 argc, struct lock_class_key *key,
432 const char *name, struct nvkm_vmm **pvmm)
433 {
434 return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
435 size, argv, argc, key, name, pvmm);
436 }
437