xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/fb/nouveau_nvkm_subdev_fb_nv50.c (revision 7649e88fcfe6a7c92de68bd5e592dec3e35224fb)
1 /*	$NetBSD: nouveau_nvkm_subdev_fb_nv50.c,v 1.5 2021/12/19 10:51:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fb_nv50.c,v 1.5 2021/12/19 10:51:58 riastradh Exp $");
28 
29 #include "nv50.h"
30 #include "ram.h"
31 
32 #include <core/client.h>
33 #include <core/enum.h>
34 #include <engine/fifo.h>
35 
36 static int
nv50_fb_ram_new(struct nvkm_fb * base,struct nvkm_ram ** pram)37 nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
38 {
39 	struct nv50_fb *fb = nv50_fb(base);
40 	return fb->func->ram_new(&fb->base, pram);
41 }
42 
43 static const struct nvkm_enum vm_dispatch_subclients[] = {
44 	{ 0x00000000, "GRCTX" },
45 	{ 0x00000001, "NOTIFY" },
46 	{ 0x00000002, "QUERY" },
47 	{ 0x00000003, "COND" },
48 	{ 0x00000004, "M2M_IN" },
49 	{ 0x00000005, "M2M_OUT" },
50 	{ 0x00000006, "M2M_NOTIFY" },
51 	{}
52 };
53 
54 static const struct nvkm_enum vm_ccache_subclients[] = {
55 	{ 0x00000000, "CB" },
56 	{ 0x00000001, "TIC" },
57 	{ 0x00000002, "TSC" },
58 	{}
59 };
60 
61 static const struct nvkm_enum vm_prop_subclients[] = {
62 	{ 0x00000000, "RT0" },
63 	{ 0x00000001, "RT1" },
64 	{ 0x00000002, "RT2" },
65 	{ 0x00000003, "RT3" },
66 	{ 0x00000004, "RT4" },
67 	{ 0x00000005, "RT5" },
68 	{ 0x00000006, "RT6" },
69 	{ 0x00000007, "RT7" },
70 	{ 0x00000008, "ZETA" },
71 	{ 0x00000009, "LOCAL" },
72 	{ 0x0000000a, "GLOBAL" },
73 	{ 0x0000000b, "STACK" },
74 	{ 0x0000000c, "DST2D" },
75 	{}
76 };
77 
78 static const struct nvkm_enum vm_pfifo_subclients[] = {
79 	{ 0x00000000, "PUSHBUF" },
80 	{ 0x00000001, "SEMAPHORE" },
81 	{}
82 };
83 
84 static const struct nvkm_enum vm_bar_subclients[] = {
85 	{ 0x00000000, "FB" },
86 	{ 0x00000001, "IN" },
87 	{}
88 };
89 
90 static const struct nvkm_enum vm_client[] = {
91 	{ 0x00000000, "STRMOUT" },
92 	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
93 	{ 0x00000004, "PFIFO_WRITE" },
94 	{ 0x00000005, "CCACHE", vm_ccache_subclients },
95 	{ 0x00000006, "PMSPPP" },
96 	{ 0x00000007, "CLIPID" },
97 	{ 0x00000008, "PFIFO_READ" },
98 	{ 0x00000009, "VFETCH" },
99 	{ 0x0000000a, "TEXTURE" },
100 	{ 0x0000000b, "PROP", vm_prop_subclients },
101 	{ 0x0000000c, "PVP" },
102 	{ 0x0000000d, "PBSP" },
103 	{ 0x0000000e, "PCRYPT" },
104 	{ 0x0000000f, "PCOUNTER" },
105 	{ 0x00000011, "PDAEMON" },
106 	{}
107 };
108 
109 static const struct nvkm_enum vm_engine[] = {
110 	{ 0x00000000, "PGRAPH" },
111 	{ 0x00000001, "PVP" },
112 	{ 0x00000004, "PEEPHOLE" },
113 	{ 0x00000005, "PFIFO", vm_pfifo_subclients },
114 	{ 0x00000006, "BAR", vm_bar_subclients },
115 	{ 0x00000008, "PMSPPP" },
116 	{ 0x00000008, "PMPEG" },
117 	{ 0x00000009, "PBSP" },
118 	{ 0x0000000a, "PCRYPT" },
119 	{ 0x0000000b, "PCOUNTER" },
120 	{ 0x0000000c, "SEMAPHORE_BG" },
121 	{ 0x0000000d, "PCE0" },
122 	{ 0x0000000e, "PMU" },
123 	{}
124 };
125 
126 static const struct nvkm_enum vm_fault[] = {
127 	{ 0x00000000, "PT_NOT_PRESENT" },
128 	{ 0x00000001, "PT_TOO_SHORT" },
129 	{ 0x00000002, "PAGE_NOT_PRESENT" },
130 	{ 0x00000003, "PAGE_SYSTEM_ONLY" },
131 	{ 0x00000004, "PAGE_READ_ONLY" },
132 	{ 0x00000006, "NULL_DMAOBJ" },
133 	{ 0x00000007, "WRONG_MEMTYPE" },
134 	{ 0x0000000b, "VRAM_LIMIT" },
135 	{ 0x0000000f, "DMAOBJ_LIMIT" },
136 	{}
137 };
138 
139 static void
nv50_fb_intr(struct nvkm_fb * base)140 nv50_fb_intr(struct nvkm_fb *base)
141 {
142 	struct nv50_fb *fb = nv50_fb(base);
143 	struct nvkm_subdev *subdev = &fb->base.subdev;
144 	struct nvkm_device *device = subdev->device;
145 	struct nvkm_fifo *fifo = device->fifo;
146 	struct nvkm_fifo_chan *chan;
147 	const struct nvkm_enum *en, *re, *cl, *sc;
148 	u32 trap[6], idx, inst;
149 	u8 st0, st1, st2, st3;
150 	unsigned long flags;
151 	int i;
152 
153 	idx = nvkm_rd32(device, 0x100c90);
154 	if (!(idx & 0x80000000))
155 		return;
156 	idx &= 0x00ffffff;
157 
158 	for (i = 0; i < 6; i++) {
159 		nvkm_wr32(device, 0x100c90, idx | i << 24);
160 		trap[i] = nvkm_rd32(device, 0x100c94);
161 	}
162 	nvkm_wr32(device, 0x100c90, idx | 0x80000000);
163 
164 	/* decode status bits into something more useful */
165 	if (device->chipset  < 0xa3 ||
166 	    device->chipset == 0xaa || device->chipset == 0xac) {
167 		st0 = (trap[0] & 0x0000000f) >> 0;
168 		st1 = (trap[0] & 0x000000f0) >> 4;
169 		st2 = (trap[0] & 0x00000f00) >> 8;
170 		st3 = (trap[0] & 0x0000f000) >> 12;
171 	} else {
172 		st0 = (trap[0] & 0x000000ff) >> 0;
173 		st1 = (trap[0] & 0x0000ff00) >> 8;
174 		st2 = (trap[0] & 0x00ff0000) >> 16;
175 		st3 = (trap[0] & 0xff000000) >> 24;
176 	}
177 	inst = ((trap[2] << 16) | trap[1]) << 12;
178 
179 	en = nvkm_enum_find(vm_engine, st0);
180 	re = nvkm_enum_find(vm_fault , st1);
181 	cl = nvkm_enum_find(vm_client, st2);
182 	if      (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
183 	else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
184 	else                     sc = NULL;
185 
186 	chan = nvkm_fifo_chan_inst(fifo, inst, &flags);
187 	nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
188 			   "engine %02x [%s] client %02x [%s] "
189 			   "subclient %02x [%s] reason %08x [%s]\n",
190 		   (trap[5] & 0x00000100) ? "read" : "write",
191 		   trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
192 		   chan ? chan->chid : -1, inst,
193 		   chan ? chan->object.client->name : "unknown",
194 		   st0, en ? en->name : "",
195 		   st2, cl ? cl->name : "", st3, sc ? sc->name : "",
196 		   st1, re ? re->name : "");
197 	nvkm_fifo_chan_put(fifo, flags, &chan);
198 }
199 
200 static int
nv50_fb_oneinit(struct nvkm_fb * base)201 nv50_fb_oneinit(struct nvkm_fb *base)
202 {
203 	struct nv50_fb *fb = nv50_fb(base);
204 	struct nvkm_device *device = fb->base.subdev.device;
205 
206 #ifdef __NetBSD__
207     {
208 	const bus_dma_tag_t dmat = device->func->dma_tag(device);
209 	int nsegs;
210 	int ret;
211 
212 	fb->r100c08_page = NULL; /* paranoia */
213 	fb->r100c08_kva = NULL;
214 
215 	/* XXX errno NetBSD->Linux */
216 	ret = -bus_dmamem_alloc(dmat, PAGE_SIZE, PAGE_SIZE, 0,
217 	    &fb->r100c08_seg, 1, &nsegs, BUS_DMA_WAITOK);
218 	if (ret)
219 fail0:		return ret;
220 	KASSERT(nsegs == 1);
221 
222 	/* XXX errno NetBSD->Linux */
223 	ret = -bus_dmamap_create(dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
224 	    BUS_DMA_WAITOK, &fb->r100c08_page);
225 	if (ret) {
226 fail1:		bus_dmamem_free(dmat, &fb->r100c08_seg, 1);
227 		goto fail0;
228 	}
229 
230 	/* XXX errno NetBSD->Linux */
231 	ret = -bus_dmamem_map(dmat, &fb->r100c08_seg, 1, PAGE_SIZE,
232 	    &fb->r100c08_kva, BUS_DMA_WAITOK);
233 	if (ret) {
234 fail2:		bus_dmamap_destroy(dmat, fb->r100c08_page);
235 		goto fail1;
236 	}
237 	(void)memset(fb->r100c08_kva, 0, PAGE_SIZE);
238 
239 	/* XXX errno NetBSD->Linux */
240 	ret = -bus_dmamap_load(dmat, fb->r100c08_page, fb->r100c08_kva,
241 	    PAGE_SIZE, NULL, BUS_DMA_WAITOK);
242 	if (ret) {
243 fail3: __unused	bus_dmamem_unmap(dmat, fb->r100c08_kva, PAGE_SIZE);
244 		goto fail2;
245 	}
246 
247 	fb->r100c08 = fb->r100c08_page->dm_segs[0].ds_addr;
248     }
249 #else
250 	fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
251 	if (fb->r100c08_page) {
252 		fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
253 					   PAGE_SIZE, DMA_BIDIRECTIONAL);
254 		if (dma_mapping_error(device->dev, fb->r100c08))
255 			return -EFAULT;
256 	}
257 #endif
258 
259 	return 0;
260 }
261 
262 static void
nv50_fb_init(struct nvkm_fb * base)263 nv50_fb_init(struct nvkm_fb *base)
264 {
265 	struct nv50_fb *fb = nv50_fb(base);
266 	struct nvkm_device *device = fb->base.subdev.device;
267 
268 	/* Not a clue what this is exactly.  Without pointing it at a
269 	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
270 	 * cause IOMMU "read from address 0" errors (rh#561267)
271 	 */
272 	nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
273 
274 	/* This is needed to get meaningful information from 100c90
275 	 * on traps. No idea what these values mean exactly. */
276 	nvkm_wr32(device, 0x100c90, fb->func->trap);
277 }
278 
279 static u32
nv50_fb_tags(struct nvkm_fb * base)280 nv50_fb_tags(struct nvkm_fb *base)
281 {
282 	struct nv50_fb *fb = nv50_fb(base);
283 	if (fb->func->tags)
284 		return fb->func->tags(&fb->base);
285 	return 0;
286 }
287 
288 static void *
nv50_fb_dtor(struct nvkm_fb * base)289 nv50_fb_dtor(struct nvkm_fb *base)
290 {
291 	struct nv50_fb *fb = nv50_fb(base);
292 	struct nvkm_device *device = fb->base.subdev.device;
293 
294 	if (fb->r100c08_page) {
295 #ifdef __NetBSD__
296 		const bus_dma_tag_t dmat = device->func->dma_tag(device);
297 
298 		bus_dmamap_unload(dmat, fb->r100c08_page);
299 		bus_dmamem_unmap(dmat, fb->r100c08_kva, PAGE_SIZE);
300 		bus_dmamap_destroy(dmat, fb->r100c08_page);
301 		bus_dmamem_free(dmat, &fb->r100c08_seg, 1);
302 		fb->r100c08_page = NULL;
303 #else
304 		dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
305 			       DMA_BIDIRECTIONAL);
306 		__free_page(fb->r100c08_page);
307 #endif
308 	}
309 
310 	return fb;
311 }
312 
313 static const struct nvkm_fb_func
314 nv50_fb_ = {
315 	.dtor = nv50_fb_dtor,
316 	.tags = nv50_fb_tags,
317 	.oneinit = nv50_fb_oneinit,
318 	.init = nv50_fb_init,
319 	.intr = nv50_fb_intr,
320 	.ram_new = nv50_fb_ram_new,
321 };
322 
323 int
nv50_fb_new_(const struct nv50_fb_func * func,struct nvkm_device * device,int index,struct nvkm_fb ** pfb)324 nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
325 	     int index, struct nvkm_fb **pfb)
326 {
327 	struct nv50_fb *fb;
328 
329 	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
330 		return -ENOMEM;
331 	nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
332 	fb->func = func;
333 	*pfb = &fb->base;
334 
335 	return 0;
336 }
337 
338 static const struct nv50_fb_func
339 nv50_fb = {
340 	.ram_new = nv50_ram_new,
341 	.tags = nv20_fb_tags,
342 	.trap = 0x000707ff,
343 };
344 
345 int
nv50_fb_new(struct nvkm_device * device,int index,struct nvkm_fb ** pfb)346 nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
347 {
348 	return nv50_fb_new_(&nv50_fb, device, index, pfb);
349 }
350