xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/engine/disp/nouveau_nvkm_engine_disp_gf119.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_engine_disp_gf119.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_disp_gf119.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $");
28 
29 #include "nv50.h"
30 #include "head.h"
31 #include "ior.h"
32 #include "channv50.h"
33 #include "rootnv50.h"
34 
35 #include <core/ramht.h>
36 #include <subdev/timer.h>
37 
38 void
gf119_disp_super(struct work_struct * work)39 gf119_disp_super(struct work_struct *work)
40 {
41 	struct nv50_disp *disp =
42 		container_of(work, struct nv50_disp, supervisor);
43 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
44 	struct nvkm_device *device = subdev->device;
45 	struct nvkm_head *head;
46 	u32 mask[4];
47 
48 	nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
49 	list_for_each_entry(head, &disp->base.head, head) {
50 		mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
51 		HEAD_DBG(head, "%08x", mask[head->id]);
52 	}
53 
54 	if (disp->super & 0x00000001) {
55 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
56 		nv50_disp_super_1(disp);
57 		list_for_each_entry(head, &disp->base.head, head) {
58 			if (!(mask[head->id] & 0x00001000))
59 				continue;
60 			nv50_disp_super_1_0(disp, head);
61 		}
62 	} else
63 	if (disp->super & 0x00000002) {
64 		list_for_each_entry(head, &disp->base.head, head) {
65 			if (!(mask[head->id] & 0x00001000))
66 				continue;
67 			nv50_disp_super_2_0(disp, head);
68 		}
69 		nvkm_outp_route(&disp->base);
70 		list_for_each_entry(head, &disp->base.head, head) {
71 			if (!(mask[head->id] & 0x00010000))
72 				continue;
73 			nv50_disp_super_2_1(disp, head);
74 		}
75 		list_for_each_entry(head, &disp->base.head, head) {
76 			if (!(mask[head->id] & 0x00001000))
77 				continue;
78 			nv50_disp_super_2_2(disp, head);
79 		}
80 	} else
81 	if (disp->super & 0x00000004) {
82 		list_for_each_entry(head, &disp->base.head, head) {
83 			if (!(mask[head->id] & 0x00001000))
84 				continue;
85 			nv50_disp_super_3_0(disp, head);
86 		}
87 	}
88 
89 	list_for_each_entry(head, &disp->base.head, head)
90 		nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
91 	nvkm_wr32(device, 0x6101d0, 0x80000000);
92 }
93 
94 void
gf119_disp_intr_error(struct nv50_disp * disp,int chid)95 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
96 {
97 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
98 	struct nvkm_device *device = subdev->device;
99 	u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
100 	u32 type = (stat & 0x00007000) >> 12;
101 	u32 mthd = (stat & 0x00000ffc);
102 	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
103 	u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
104 	const struct nvkm_enum *reason =
105 		nvkm_enum_find(nv50_disp_intr_error_type, type);
106 
107 	nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
108 			   "data %08x code %08x\n",
109 		   chid, stat, type, reason ? reason->name : "",
110 		   mthd, data, code);
111 
112 	if (chid < ARRAY_SIZE(disp->chan)) {
113 		switch (mthd) {
114 		case 0x0080:
115 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
116 			break;
117 		default:
118 			break;
119 		}
120 	}
121 
122 	nvkm_wr32(device, 0x61009c, (1 << chid));
123 	nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
124 }
125 
126 void
gf119_disp_intr(struct nv50_disp * disp)127 gf119_disp_intr(struct nv50_disp *disp)
128 {
129 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
130 	struct nvkm_device *device = subdev->device;
131 	struct nvkm_head *head;
132 	u32 intr = nvkm_rd32(device, 0x610088);
133 
134 	if (intr & 0x00000001) {
135 		u32 stat = nvkm_rd32(device, 0x61008c);
136 		while (stat) {
137 			int chid = __ffs(stat); stat &= ~(1 << chid);
138 			nv50_disp_chan_uevent_send(disp, chid);
139 			nvkm_wr32(device, 0x61008c, 1 << chid);
140 		}
141 		intr &= ~0x00000001;
142 	}
143 
144 	if (intr & 0x00000002) {
145 		u32 stat = nvkm_rd32(device, 0x61009c);
146 		int chid = ffs(stat) - 1;
147 		if (chid >= 0)
148 			disp->func->intr_error(disp, chid);
149 		intr &= ~0x00000002;
150 	}
151 
152 	if (intr & 0x00100000) {
153 		u32 stat = nvkm_rd32(device, 0x6100ac);
154 		if (stat & 0x00000007) {
155 			disp->super = (stat & 0x00000007);
156 			queue_work(disp->wq, &disp->supervisor);
157 			nvkm_wr32(device, 0x6100ac, disp->super);
158 			stat &= ~0x00000007;
159 		}
160 
161 		if (stat) {
162 			nvkm_warn(subdev, "intr24 %08x\n", stat);
163 			nvkm_wr32(device, 0x6100ac, stat);
164 		}
165 
166 		intr &= ~0x00100000;
167 	}
168 
169 	list_for_each_entry(head, &disp->base.head, head) {
170 		const u32 hoff = head->id * 0x800;
171 		u32 mask = 0x01000000 << head->id;
172 		if (mask & intr) {
173 			u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
174 			if (stat & 0x00000001)
175 				nvkm_disp_vblank(&disp->base, head->id);
176 			nvkm_mask(device, 0x6100bc + hoff, 0, 0);
177 			nvkm_rd32(device, 0x6100c0 + hoff);
178 		}
179 	}
180 }
181 
182 void
gf119_disp_fini(struct nv50_disp * disp)183 gf119_disp_fini(struct nv50_disp *disp)
184 {
185 	struct nvkm_device *device = disp->base.engine.subdev.device;
186 	/* disable all interrupts */
187 	nvkm_wr32(device, 0x6100b0, 0x00000000);
188 }
189 
190 int
gf119_disp_init(struct nv50_disp * disp)191 gf119_disp_init(struct nv50_disp *disp)
192 {
193 	struct nvkm_device *device = disp->base.engine.subdev.device;
194 	struct nvkm_head *head;
195 	u32 tmp;
196 	int i;
197 
198 	/* The below segments of code copying values from one register to
199 	 * another appear to inform EVO of the display capabilities or
200 	 * something similar.
201 	 */
202 
203 	/* ... CRTC caps */
204 	list_for_each_entry(head, &disp->base.head, head) {
205 		const u32 hoff = head->id * 0x800;
206 		tmp = nvkm_rd32(device, 0x616104 + hoff);
207 		nvkm_wr32(device, 0x6101b4 + hoff, tmp);
208 		tmp = nvkm_rd32(device, 0x616108 + hoff);
209 		nvkm_wr32(device, 0x6101b8 + hoff, tmp);
210 		tmp = nvkm_rd32(device, 0x61610c + hoff);
211 		nvkm_wr32(device, 0x6101bc + hoff, tmp);
212 	}
213 
214 	/* ... DAC caps */
215 	for (i = 0; i < disp->dac.nr; i++) {
216 		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
217 		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
218 	}
219 
220 	/* ... SOR caps */
221 	for (i = 0; i < disp->sor.nr; i++) {
222 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
223 		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
224 	}
225 
226 	/* steal display away from vbios, or something like that */
227 	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
228 		nvkm_wr32(device, 0x6100ac, 0x00000100);
229 		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
230 		if (nvkm_msec(device, 2000,
231 			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
232 				break;
233 		) < 0)
234 			return -EBUSY;
235 	}
236 
237 	/* point at display engine memory area (hash table, objects) */
238 	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
239 
240 	/* enable supervisor interrupts, disable everything else */
241 	nvkm_wr32(device, 0x610090, 0x00000000);
242 	nvkm_wr32(device, 0x6100a0, 0x00000000);
243 	nvkm_wr32(device, 0x6100b0, 0x00000307);
244 
245 	/* disable underflow reporting, preventing an intermittent issue
246 	 * on some gk104 boards where the production vbios left this
247 	 * setting enabled by default.
248 	 *
249 	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
250 	 */
251 	list_for_each_entry(head, &disp->base.head, head) {
252 		const u32 hoff = head->id * 0x800;
253 		nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
254 	}
255 
256 	return 0;
257 }
258 
259 static const struct nv50_disp_func
260 gf119_disp = {
261 	.init = gf119_disp_init,
262 	.fini = gf119_disp_fini,
263 	.intr = gf119_disp_intr,
264 	.intr_error = gf119_disp_intr_error,
265 	.uevent = &gf119_disp_chan_uevent,
266 	.super = gf119_disp_super,
267 	.root = &gf119_disp_root_oclass,
268 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
269 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
270 	.sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
271 };
272 
273 int
gf119_disp_new(struct nvkm_device * device,int index,struct nvkm_disp ** pdisp)274 gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
275 {
276 	return nv50_disp_new_(&gf119_disp, device, index, pdisp);
277 }
278