1 /* $NetBSD: nouveau_nvkm_engine_xtensa.c,v 1.3 2021/12/18 23:45:34 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Ilia Mirkin
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_xtensa.c,v 1.3 2021/12/18 23:45:34 riastradh Exp $");
26
27 #include <engine/xtensa.h>
28
29 #include <core/gpuobj.h>
30 #include <engine/fifo.h>
31
32 static int
nvkm_xtensa_oclass_get(struct nvkm_oclass * oclass,int index)33 nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
34 {
35 struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
36 int c = 0;
37
38 while (xtensa->func->sclass[c].oclass) {
39 if (c++ == index) {
40 oclass->base = xtensa->func->sclass[index];
41 return index;
42 }
43 }
44
45 return c;
46 }
47
48 static int
nvkm_xtensa_cclass_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)49 nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
50 int align, struct nvkm_gpuobj **pgpuobj)
51 {
52 return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
53 true, parent, pgpuobj);
54 }
55
56 static const struct nvkm_object_func
57 nvkm_xtensa_cclass = {
58 .bind = nvkm_xtensa_cclass_bind,
59 };
60
61 static void
nvkm_xtensa_intr(struct nvkm_engine * engine)62 nvkm_xtensa_intr(struct nvkm_engine *engine)
63 {
64 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
65 struct nvkm_subdev *subdev = &xtensa->engine.subdev;
66 struct nvkm_device *device = subdev->device;
67 const u32 base = xtensa->addr;
68 u32 unk104 = nvkm_rd32(device, base + 0xd04);
69 u32 intr = nvkm_rd32(device, base + 0xc20);
70 u32 chan = nvkm_rd32(device, base + 0xc28);
71 u32 unk10c = nvkm_rd32(device, base + 0xd0c);
72
73 if (intr & 0x10)
74 nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
75 nvkm_wr32(device, base + 0xc20, intr);
76 intr = nvkm_rd32(device, base + 0xc20);
77 if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
78 nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
79 nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
80 }
81 }
82
83 static int
nvkm_xtensa_fini(struct nvkm_engine * engine,bool suspend)84 nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
85 {
86 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
87 struct nvkm_device *device = xtensa->engine.subdev.device;
88 const u32 base = xtensa->addr;
89
90 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
91 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
92
93 if (!suspend)
94 nvkm_memory_unref(&xtensa->gpu_fw);
95 return 0;
96 }
97
98 static int
nvkm_xtensa_init(struct nvkm_engine * engine)99 nvkm_xtensa_init(struct nvkm_engine *engine)
100 {
101 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
102 struct nvkm_subdev *subdev = &xtensa->engine.subdev;
103 struct nvkm_device *device = subdev->device;
104 const u32 base = xtensa->addr;
105 const struct firmware *fw;
106 char name[32];
107 int i, ret;
108 u64 addr, size;
109 u32 tmp;
110
111 if (!xtensa->gpu_fw) {
112 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
113 xtensa->addr >> 12);
114
115 ret = request_firmware(&fw, name, device->dev);
116 if (ret) {
117 nvkm_warn(subdev, "unable to load firmware %s\n", name);
118 return ret;
119 }
120
121 if (fw->size > 0x40000) {
122 nvkm_warn(subdev, "firmware %s too large\n", name);
123 release_firmware(fw);
124 return -EINVAL;
125 }
126
127 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
128 0x40000, 0x1000, false,
129 &xtensa->gpu_fw);
130 if (ret) {
131 release_firmware(fw);
132 return ret;
133 }
134
135 nvkm_kmap(xtensa->gpu_fw);
136 for (i = 0; i < fw->size / 4; i++)
137 nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
138 nvkm_done(xtensa->gpu_fw);
139 release_firmware(fw);
140 }
141
142 addr = nvkm_memory_addr(xtensa->gpu_fw);
143 size = nvkm_memory_size(xtensa->gpu_fw);
144
145 nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
146 nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
147
148 nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
149 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
150 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
151
152 nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
153 nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
154 nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
155
156 tmp = nvkm_rd32(device, 0x0);
157 nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
158
159 nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
160
161 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
162 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
163 return 0;
164 }
165
166 static void *
nvkm_xtensa_dtor(struct nvkm_engine * engine)167 nvkm_xtensa_dtor(struct nvkm_engine *engine)
168 {
169 return nvkm_xtensa(engine);
170 }
171
172 static const struct nvkm_engine_func
173 nvkm_xtensa = {
174 .dtor = nvkm_xtensa_dtor,
175 .init = nvkm_xtensa_init,
176 .fini = nvkm_xtensa_fini,
177 .intr = nvkm_xtensa_intr,
178 .fifo.sclass = nvkm_xtensa_oclass_get,
179 .cclass = &nvkm_xtensa_cclass,
180 };
181
182 int
nvkm_xtensa_new_(const struct nvkm_xtensa_func * func,struct nvkm_device * device,int index,bool enable,u32 addr,struct nvkm_engine ** pengine)183 nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
184 struct nvkm_device *device, int index, bool enable,
185 u32 addr, struct nvkm_engine **pengine)
186 {
187 struct nvkm_xtensa *xtensa;
188
189 if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
190 return -ENOMEM;
191 xtensa->func = func;
192 xtensa->addr = addr;
193 *pengine = &xtensa->engine;
194
195 return nvkm_engine_ctor(&nvkm_xtensa, device, index,
196 enable, &xtensa->engine);
197 }
198