xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/engine/nouveau_nvkm_engine_falcon.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_engine_falcon.c,v 1.3 2021/12/18 23:45:34 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_falcon.c,v 1.3 2021/12/18 23:45:34 riastradh Exp $");
26 
27 #include <engine/falcon.h>
28 
29 #include <core/gpuobj.h>
30 #include <subdev/mc.h>
31 #include <subdev/timer.h>
32 #include <engine/fifo.h>
33 
34 static int
nvkm_falcon_oclass_get(struct nvkm_oclass * oclass,int index)35 nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
36 {
37 	struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
38 	int c = 0;
39 
40 	while (falcon->func->sclass[c].oclass) {
41 		if (c++ == index) {
42 			oclass->base = falcon->func->sclass[index];
43 			return index;
44 		}
45 	}
46 
47 	return c;
48 }
49 
50 static int
nvkm_falcon_cclass_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)51 nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
52 			int align, struct nvkm_gpuobj **pgpuobj)
53 {
54 	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
55 			       align, true, parent, pgpuobj);
56 }
57 
58 static const struct nvkm_object_func
59 nvkm_falcon_cclass = {
60 	.bind = nvkm_falcon_cclass_bind,
61 };
62 
63 static void
nvkm_falcon_intr(struct nvkm_engine * engine)64 nvkm_falcon_intr(struct nvkm_engine *engine)
65 {
66 	struct nvkm_falcon *falcon = nvkm_falcon(engine);
67 	struct nvkm_subdev *subdev = &falcon->engine.subdev;
68 	struct nvkm_device *device = subdev->device;
69 	const u32 base = falcon->addr;
70 	u32 dest = nvkm_rd32(device, base + 0x01c);
71 	u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
72 	u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
73 	struct nvkm_fifo_chan *chan;
74 	unsigned long flags;
75 
76 	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
77 
78 	if (intr & 0x00000040) {
79 		if (falcon->func->intr) {
80 			falcon->func->intr(falcon, chan);
81 			nvkm_wr32(device, base + 0x004, 0x00000040);
82 			intr &= ~0x00000040;
83 		}
84 	}
85 
86 	if (intr & 0x00000010) {
87 		nvkm_debug(subdev, "ucode halted\n");
88 		nvkm_wr32(device, base + 0x004, 0x00000010);
89 		intr &= ~0x00000010;
90 	}
91 
92 	if (intr)  {
93 		nvkm_error(subdev, "intr %08x\n", intr);
94 		nvkm_wr32(device, base + 0x004, intr);
95 	}
96 
97 	nvkm_fifo_chan_put(device->fifo, flags, &chan);
98 }
99 
100 static int
nvkm_falcon_fini(struct nvkm_engine * engine,bool suspend)101 nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
102 {
103 	struct nvkm_falcon *falcon = nvkm_falcon(engine);
104 	struct nvkm_device *device = falcon->engine.subdev.device;
105 	const u32 base = falcon->addr;
106 
107 	if (!suspend) {
108 		nvkm_memory_unref(&falcon->core);
109 		if (falcon->external) {
110 			vfree(falcon->data.data);
111 			vfree(falcon->code.data);
112 			falcon->code.data = NULL;
113 		}
114 	}
115 
116 	if (nvkm_mc_enabled(device, engine->subdev.index)) {
117 		nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
118 		nvkm_wr32(device, base + 0x014, 0xffffffff);
119 	}
120 	return 0;
121 }
122 
123 static void *
vmemdup(const void * src,size_t len)124 vmemdup(const void *src, size_t len)
125 {
126 	void *p = vmalloc(len);
127 
128 	if (p)
129 		memcpy(p, src, len);
130 	return p;
131 }
132 
133 static int
nvkm_falcon_oneinit(struct nvkm_engine * engine)134 nvkm_falcon_oneinit(struct nvkm_engine *engine)
135 {
136 	struct nvkm_falcon *falcon = nvkm_falcon(engine);
137 	struct nvkm_subdev *subdev = &falcon->engine.subdev;
138 	struct nvkm_device *device = subdev->device;
139 	const u32 base = falcon->addr;
140 	u32 caps;
141 
142 	/* determine falcon capabilities */
143 	if (device->chipset <  0xa3 ||
144 	    device->chipset == 0xaa || device->chipset == 0xac) {
145 		falcon->version = 0;
146 		falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
147 	} else {
148 		caps = nvkm_rd32(device, base + 0x12c);
149 		falcon->version = (caps & 0x0000000f);
150 		falcon->secret  = (caps & 0x00000030) >> 4;
151 	}
152 
153 	caps = nvkm_rd32(device, base + 0x108);
154 	falcon->code.limit = (caps & 0x000001ff) << 8;
155 	falcon->data.limit = (caps & 0x0003fe00) >> 1;
156 
157 	nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
158 	nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
159 	nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
160 	nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
161 	return 0;
162 }
163 
164 static int
nvkm_falcon_init(struct nvkm_engine * engine)165 nvkm_falcon_init(struct nvkm_engine *engine)
166 {
167 	struct nvkm_falcon *falcon = nvkm_falcon(engine);
168 	struct nvkm_subdev *subdev = &falcon->engine.subdev;
169 	struct nvkm_device *device = subdev->device;
170 	const struct firmware *fw;
171 	char name[32] = "internal";
172 	const u32 base = falcon->addr;
173 	int ret, i;
174 
175 	/* wait for 'uc halted' to be signalled before continuing */
176 	if (falcon->secret && falcon->version < 4) {
177 		if (!falcon->version) {
178 			nvkm_msec(device, 2000,
179 				if (nvkm_rd32(device, base + 0x008) & 0x00000010)
180 					break;
181 			);
182 		} else {
183 			nvkm_msec(device, 2000,
184 				if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
185 					break;
186 			);
187 		}
188 		nvkm_wr32(device, base + 0x004, 0x00000010);
189 	}
190 
191 	/* disable all interrupts */
192 	nvkm_wr32(device, base + 0x014, 0xffffffff);
193 
194 	/* no default ucode provided by the engine implementation, try and
195 	 * locate a "self-bootstrapping" firmware image for the engine
196 	 */
197 	if (!falcon->code.data) {
198 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
199 			 device->chipset, falcon->addr >> 12);
200 
201 		ret = request_firmware(&fw, name, device->dev);
202 		if (ret == 0) {
203 			falcon->code.data = vmemdup(fw->data, fw->size);
204 			falcon->code.size = fw->size;
205 			falcon->data.data = NULL;
206 			falcon->data.size = 0;
207 			release_firmware(fw);
208 		}
209 
210 		falcon->external = true;
211 	}
212 
213 	/* next step is to try and load "static code/data segment" firmware
214 	 * images for the engine
215 	 */
216 	if (!falcon->code.data) {
217 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
218 			 device->chipset, falcon->addr >> 12);
219 
220 		ret = request_firmware(&fw, name, device->dev);
221 		if (ret) {
222 			nvkm_error(subdev, "unable to load firmware data\n");
223 			return -ENODEV;
224 		}
225 
226 		falcon->data.data = vmemdup(fw->data, fw->size);
227 		falcon->data.size = fw->size;
228 		release_firmware(fw);
229 		if (!falcon->data.data)
230 			return -ENOMEM;
231 
232 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
233 			 device->chipset, falcon->addr >> 12);
234 
235 		ret = request_firmware(&fw, name, device->dev);
236 		if (ret) {
237 			nvkm_error(subdev, "unable to load firmware code\n");
238 			return -ENODEV;
239 		}
240 
241 		falcon->code.data = vmemdup(fw->data, fw->size);
242 		falcon->code.size = fw->size;
243 		release_firmware(fw);
244 		if (!falcon->code.data)
245 			return -ENOMEM;
246 	}
247 
248 	nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
249 		   "static code/data segments" : "self-bootstrapping");
250 
251 	/* ensure any "self-bootstrapping" firmware image is in vram */
252 	if (!falcon->data.data && !falcon->core) {
253 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
254 				      falcon->code.size, 256, false,
255 				      &falcon->core);
256 		if (ret) {
257 			nvkm_error(subdev, "core allocation failed, %d\n", ret);
258 			return ret;
259 		}
260 
261 		nvkm_kmap(falcon->core);
262 		for (i = 0; i < falcon->code.size; i += 4)
263 			nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
264 		nvkm_done(falcon->core);
265 	}
266 
267 	/* upload firmware bootloader (or the full code segments) */
268 	if (falcon->core) {
269 		u64 addr = nvkm_memory_addr(falcon->core);
270 		if (device->card_type < NV_C0)
271 			nvkm_wr32(device, base + 0x618, 0x04000000);
272 		else
273 			nvkm_wr32(device, base + 0x618, 0x00000114);
274 		nvkm_wr32(device, base + 0x11c, 0);
275 		nvkm_wr32(device, base + 0x110, addr >> 8);
276 		nvkm_wr32(device, base + 0x114, 0);
277 		nvkm_wr32(device, base + 0x118, 0x00006610);
278 	} else {
279 		if (falcon->code.size > falcon->code.limit ||
280 		    falcon->data.size > falcon->data.limit) {
281 			nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
282 			return -EINVAL;
283 		}
284 
285 		if (falcon->version < 3) {
286 			nvkm_wr32(device, base + 0xff8, 0x00100000);
287 			for (i = 0; i < falcon->code.size / 4; i++)
288 				nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
289 		} else {
290 			nvkm_wr32(device, base + 0x180, 0x01000000);
291 			for (i = 0; i < falcon->code.size / 4; i++) {
292 				if ((i & 0x3f) == 0)
293 					nvkm_wr32(device, base + 0x188, i >> 6);
294 				nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
295 			}
296 		}
297 	}
298 
299 	/* upload data segment (if necessary), zeroing the remainder */
300 	if (falcon->version < 3) {
301 		nvkm_wr32(device, base + 0xff8, 0x00000000);
302 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
303 			nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
304 		for (; i < falcon->data.limit; i += 4)
305 			nvkm_wr32(device, base + 0xff4, 0x00000000);
306 	} else {
307 		nvkm_wr32(device, base + 0x1c0, 0x01000000);
308 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
309 			nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
310 		for (; i < falcon->data.limit / 4; i++)
311 			nvkm_wr32(device, base + 0x1c4, 0x00000000);
312 	}
313 
314 	/* start it running */
315 	nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
316 	nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
317 	nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
318 	nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
319 
320 	if (falcon->func->init)
321 		falcon->func->init(falcon);
322 	return 0;
323 }
324 
325 static void *
nvkm_falcon_dtor(struct nvkm_engine * engine)326 nvkm_falcon_dtor(struct nvkm_engine *engine)
327 {
328 	return nvkm_falcon(engine);
329 }
330 
331 static const struct nvkm_engine_func
332 nvkm_falcon = {
333 	.dtor = nvkm_falcon_dtor,
334 	.oneinit = nvkm_falcon_oneinit,
335 	.init = nvkm_falcon_init,
336 	.fini = nvkm_falcon_fini,
337 	.intr = nvkm_falcon_intr,
338 	.fifo.sclass = nvkm_falcon_oclass_get,
339 	.cclass = &nvkm_falcon_cclass,
340 };
341 
342 int
nvkm_falcon_new_(const struct nvkm_falcon_func * func,struct nvkm_device * device,int index,bool enable,u32 addr,struct nvkm_engine ** pengine)343 nvkm_falcon_new_(const struct nvkm_falcon_func *func,
344 		 struct nvkm_device *device, int index, bool enable,
345 		 u32 addr, struct nvkm_engine **pengine)
346 {
347 	struct nvkm_falcon *falcon;
348 
349 	if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
350 		return -ENOMEM;
351 	falcon->func = func;
352 	falcon->addr = addr;
353 	falcon->code.data = func->code.data;
354 	falcon->code.size = func->code.size;
355 	falcon->data.data = func->data.data;
356 	falcon->data.size = func->data.size;
357 	*pengine = &falcon->engine;
358 
359 	return nvkm_engine_ctor(&nvkm_falcon, device, index,
360 				enable, &falcon->engine);
361 }
362