xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/falcon/nouveau_nvkm_falcon_base.c (revision 798b8d11ecd8257a8e35c3396210f98abf3d9ade)
1 /*	$NetBSD: nouveau_nvkm_falcon_base.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_falcon_base.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $");
26 
27 #include "priv.h"
28 
29 #include <subdev/mc.h>
30 #include <subdev/top.h>
31 
32 #include <linux/nbsd-namespace.h>
33 
34 void
nvkm_falcon_load_imem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u16 tag,u8 port,bool secure)35 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
36 		      u32 size, u16 tag, u8 port, bool secure)
37 {
38 	if (secure && !falcon->secret) {
39 		nvkm_warn(falcon->user,
40 			  "writing with secure tag on a non-secure falcon!\n");
41 		return;
42 	}
43 
44 	falcon->func->load_imem(falcon, data, start, size, tag, port,
45 				secure);
46 }
47 
48 void
nvkm_falcon_load_dmem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u8 port)49 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
50 		      u32 size, u8 port)
51 {
52 	mutex_lock(&falcon->dmem_mutex);
53 
54 	falcon->func->load_dmem(falcon, data, start, size, port);
55 
56 	mutex_unlock(&falcon->dmem_mutex);
57 }
58 
59 void
nvkm_falcon_read_dmem(struct nvkm_falcon * falcon,u32 start,u32 size,u8 port,void * data)60 nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
61 		      void *data)
62 {
63 	mutex_lock(&falcon->dmem_mutex);
64 
65 	falcon->func->read_dmem(falcon, start, size, port, data);
66 
67 	mutex_unlock(&falcon->dmem_mutex);
68 }
69 
70 void
nvkm_falcon_bind_context(struct nvkm_falcon * falcon,struct nvkm_memory * inst)71 nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
72 {
73 	if (!falcon->func->bind_context) {
74 		nvkm_error(falcon->user,
75 			   "Context binding not supported on this falcon!\n");
76 		return;
77 	}
78 
79 	falcon->func->bind_context(falcon, inst);
80 }
81 
82 void
nvkm_falcon_set_start_addr(struct nvkm_falcon * falcon,u32 start_addr)83 nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
84 {
85 	falcon->func->set_start_addr(falcon, start_addr);
86 }
87 
88 void
nvkm_falcon_start(struct nvkm_falcon * falcon)89 nvkm_falcon_start(struct nvkm_falcon *falcon)
90 {
91 	falcon->func->start(falcon);
92 }
93 
94 int
nvkm_falcon_enable(struct nvkm_falcon * falcon)95 nvkm_falcon_enable(struct nvkm_falcon *falcon)
96 {
97 	struct nvkm_device *device = falcon->owner->device;
98 	enum nvkm_devidx id = falcon->owner->index;
99 	int ret;
100 
101 	nvkm_mc_enable(device, id);
102 	ret = falcon->func->enable(falcon);
103 	if (ret) {
104 		nvkm_mc_disable(device, id);
105 		return ret;
106 	}
107 
108 	return 0;
109 }
110 
111 void
nvkm_falcon_disable(struct nvkm_falcon * falcon)112 nvkm_falcon_disable(struct nvkm_falcon *falcon)
113 {
114 	struct nvkm_device *device = falcon->owner->device;
115 	enum nvkm_devidx id = falcon->owner->index;
116 
117 	/* already disabled, return or wait_idle will timeout */
118 	if (!nvkm_mc_enabled(device, id))
119 		return;
120 
121 	falcon->func->disable(falcon);
122 
123 	nvkm_mc_disable(device, id);
124 }
125 
126 int
nvkm_falcon_reset(struct nvkm_falcon * falcon)127 nvkm_falcon_reset(struct nvkm_falcon *falcon)
128 {
129 	nvkm_falcon_disable(falcon);
130 	return nvkm_falcon_enable(falcon);
131 }
132 
133 int
nvkm_falcon_wait_for_halt(struct nvkm_falcon * falcon,u32 ms)134 nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
135 {
136 	return falcon->func->wait_for_halt(falcon, ms);
137 }
138 
139 int
nvkm_falcon_clear_interrupt(struct nvkm_falcon * falcon,u32 mask)140 nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
141 {
142 	return falcon->func->clear_interrupt(falcon, mask);
143 }
144 
145 static int
nvkm_falcon_oneinit(struct nvkm_falcon * falcon)146 nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
147 {
148 	const struct nvkm_falcon_func *func = falcon->func;
149 	const struct nvkm_subdev *subdev = falcon->owner;
150 	u32 reg;
151 
152 	if (!falcon->addr) {
153 		falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
154 		if (WARN_ON(!falcon->addr))
155 			return -ENODEV;
156 	}
157 
158 	reg = nvkm_falcon_rd32(falcon, 0x12c);
159 	falcon->version = reg & 0xf;
160 	falcon->secret = (reg >> 4) & 0x3;
161 	falcon->code.ports = (reg >> 8) & 0xf;
162 	falcon->data.ports = (reg >> 12) & 0xf;
163 
164 	reg = nvkm_falcon_rd32(falcon, 0x108);
165 	falcon->code.limit = (reg & 0x1ff) << 8;
166 	falcon->data.limit = (reg & 0x3fe00) >> 1;
167 
168 	if (func->debug) {
169 		u32 val = nvkm_falcon_rd32(falcon, func->debug);
170 		falcon->debug = (val >> 20) & 0x1;
171 	}
172 
173 	return 0;
174 }
175 
176 void
nvkm_falcon_put(struct nvkm_falcon * falcon,const struct nvkm_subdev * user)177 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
178 {
179 	if (unlikely(!falcon))
180 		return;
181 
182 	mutex_lock(&falcon->mutex);
183 	if (falcon->user == user) {
184 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
185 		falcon->user = NULL;
186 	}
187 	mutex_unlock(&falcon->mutex);
188 }
189 
190 int
nvkm_falcon_get(struct nvkm_falcon * falcon,const struct nvkm_subdev * user)191 nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
192 {
193 	int ret = 0;
194 
195 	mutex_lock(&falcon->mutex);
196 	if (falcon->user) {
197 		nvkm_error(user, "%s falcon already acquired by %s!\n",
198 			   falcon->name, nvkm_subdev_name[falcon->user->index]);
199 		mutex_unlock(&falcon->mutex);
200 		return -EBUSY;
201 	}
202 
203 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
204 	if (!falcon->oneinit)
205 		ret = nvkm_falcon_oneinit(falcon);
206 	falcon->user = user;
207 	mutex_unlock(&falcon->mutex);
208 	return ret;
209 }
210 
211 void
nvkm_falcon_dtor(struct nvkm_falcon * falcon)212 nvkm_falcon_dtor(struct nvkm_falcon *falcon)
213 {
214 }
215 
216 int
nvkm_falcon_ctor(const struct nvkm_falcon_func * func,struct nvkm_subdev * subdev,const char * name,u32 addr,struct nvkm_falcon * falcon)217 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
218 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
219 		 struct nvkm_falcon *falcon)
220 {
221 	falcon->func = func;
222 	falcon->owner = subdev;
223 	falcon->name = name;
224 	falcon->addr = addr;
225 	mutex_init(&falcon->mutex);
226 	mutex_init(&falcon->dmem_mutex);
227 	return 0;
228 }
229 
230 void
nvkm_falcon_del(struct nvkm_falcon ** pfalcon)231 nvkm_falcon_del(struct nvkm_falcon **pfalcon)
232 {
233 	if (*pfalcon) {
234 		nvkm_falcon_dtor(*pfalcon);
235 		mutex_destroy(&(*pfalcon)->mutex);
236 		mutex_destroy(&(*pfalcon)->dmem_mutex);
237 		kfree(*pfalcon);
238 		*pfalcon = NULL;
239 	}
240 }
241