xref: /openbsd-src/sys/dev/ofw/ofw_misc.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: ofw_misc.c,v 1.14 2020/03/01 18:00:12 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2017 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/systm.h>
20 #include <sys/malloc.h>
21 
22 #include <machine/bus.h>
23 
24 #include <dev/ofw/openfirm.h>
25 #include <dev/ofw/ofw_misc.h>
26 
27 /*
28  * Register maps.
29  */
30 
31 struct regmap {
32 	int			rm_node;
33 	uint32_t		rm_phandle;
34 	bus_space_tag_t		rm_tag;
35 	bus_space_handle_t	rm_handle;
36 	bus_size_t		rm_size;
37 
38 	LIST_ENTRY(regmap)	rm_list;
39 };
40 
41 LIST_HEAD(, regmap) regmaps = LIST_HEAD_INITIALIZER(regmap);
42 
43 void
44 regmap_register(int node, bus_space_tag_t tag, bus_space_handle_t handle,
45     bus_size_t size)
46 {
47 	struct regmap *rm;
48 
49 	rm = malloc(sizeof(struct regmap), M_DEVBUF, M_WAITOK);
50 	rm->rm_node = node;
51 	rm->rm_phandle = OF_getpropint(node, "phandle", 0);
52 	rm->rm_tag = tag;
53 	rm->rm_handle = handle;
54 	rm->rm_size = size;
55 	LIST_INSERT_HEAD(&regmaps, rm, rm_list);
56 }
57 
58 struct regmap *
59 regmap_bycompatible(char *compatible)
60 {
61 	struct regmap *rm;
62 
63 	LIST_FOREACH(rm, &regmaps, rm_list) {
64 		if (OF_is_compatible(rm->rm_node, compatible))
65 			return rm;
66 	}
67 
68 	return NULL;
69 }
70 
71 struct regmap *
72 regmap_bynode(int node)
73 {
74 	struct regmap *rm;
75 
76 	LIST_FOREACH(rm, &regmaps, rm_list) {
77 		if (rm->rm_node == node)
78 			return rm;
79 	}
80 
81 	return NULL;
82 }
83 
84 struct regmap *
85 regmap_byphandle(uint32_t phandle)
86 {
87 	struct regmap *rm;
88 
89 	LIST_FOREACH(rm, &regmaps, rm_list) {
90 		if (rm->rm_phandle == phandle)
91 			return rm;
92 	}
93 
94 	return NULL;
95 }
96 
97 void
98 regmap_write_4(struct regmap *rm, bus_size_t offset, uint32_t value)
99 {
100 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
101 	bus_space_write_4(rm->rm_tag, rm->rm_handle, offset, value);
102 }
103 
104 uint32_t
105 regmap_read_4(struct regmap *rm, bus_size_t offset)
106 {
107 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
108 	return bus_space_read_4(rm->rm_tag, rm->rm_handle, offset);
109 }
110 
111 
112 /*
113  * PHY support.
114  */
115 
116 LIST_HEAD(, phy_device) phy_devices =
117 	LIST_HEAD_INITIALIZER(phy_devices);
118 
119 void
120 phy_register(struct phy_device *pd)
121 {
122 	pd->pd_cells = OF_getpropint(pd->pd_node, "#phy-cells", 0);
123 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
124 	if (pd->pd_phandle == 0)
125 		return;
126 
127 	LIST_INSERT_HEAD(&phy_devices, pd, pd_list);
128 }
129 
130 int
131 phy_enable_cells(uint32_t *cells)
132 {
133 	struct phy_device *pd;
134 	uint32_t phandle = cells[0];
135 
136 	LIST_FOREACH(pd, &phy_devices, pd_list) {
137 		if (pd->pd_phandle == phandle)
138 			break;
139 	}
140 
141 	if (pd && pd->pd_enable)
142 		return pd->pd_enable(pd->pd_cookie, &cells[1]);
143 
144 	return -1;
145 }
146 
147 uint32_t *
148 phy_next_phy(uint32_t *cells)
149 {
150 	uint32_t phandle = cells[0];
151 	int node, ncells;
152 
153 	node = OF_getnodebyphandle(phandle);
154 	if (node == 0)
155 		return NULL;
156 
157 	ncells = OF_getpropint(node, "#phy-cells", 0);
158 	return cells + ncells + 1;
159 }
160 
161 int
162 phy_enable_idx(int node, int idx)
163 {
164 	uint32_t *phys;
165 	uint32_t *phy;
166 	int rv = -1;
167 	int len;
168 
169 	len = OF_getproplen(node, "phys");
170 	if (len <= 0)
171 		return -1;
172 
173 	phys = malloc(len, M_TEMP, M_WAITOK);
174 	OF_getpropintarray(node, "phys", phys, len);
175 
176 	phy = phys;
177 	while (phy && phy < phys + (len / sizeof(uint32_t))) {
178 		if (idx <= 0)
179 			rv = phy_enable_cells(phy);
180 		if (idx == 0)
181 			break;
182 		phy = phy_next_phy(phy);
183 		idx--;
184 	}
185 
186 	free(phys, M_TEMP, len);
187 	return rv;
188 }
189 
190 int
191 phy_enable(int node, const char *name)
192 {
193 	int idx;
194 
195 	idx = OF_getindex(node, name, "phy-names");
196 	if (idx == -1)
197 		return -1;
198 
199 	return phy_enable_idx(node, idx);
200 }
201 
202 /*
203  * I2C support.
204  */
205 
206 LIST_HEAD(, i2c_bus) i2c_busses =
207 	LIST_HEAD_INITIALIZER(i2c_bus);
208 
209 void
210 i2c_register(struct i2c_bus *ib)
211 {
212 	ib->ib_phandle = OF_getpropint(ib->ib_node, "phandle", 0);
213 	if (ib->ib_phandle == 0)
214 		return;
215 
216 	LIST_INSERT_HEAD(&i2c_busses, ib, ib_list);
217 }
218 
219 struct i2c_controller *
220 i2c_bynode(int node)
221 {
222 	struct i2c_bus *ib;
223 
224 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
225 		if (ib->ib_node == node)
226 			return ib->ib_ic;
227 	}
228 
229 	return NULL;
230 }
231 
232 struct i2c_controller *
233 i2c_byphandle(uint32_t phandle)
234 {
235 	struct i2c_bus *ib;
236 
237 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
238 		if (ib->ib_phandle == phandle)
239 			return ib->ib_ic;
240 	}
241 
242 	return NULL;
243 }
244 
245 /*
246  * SFP support.
247  */
248 
249 LIST_HEAD(, sfp_device) sfp_devices =
250 	LIST_HEAD_INITIALIZER(sfp_devices);
251 
252 void
253 sfp_register(struct sfp_device *sd)
254 {
255 	sd->sd_phandle = OF_getpropint(sd->sd_node, "phandle", 0);
256 	if (sd->sd_phandle == 0)
257 		return;
258 
259 	LIST_INSERT_HEAD(&sfp_devices, sd, sd_list);
260 }
261 
262 int
263 sfp_get_sffpage(uint32_t phandle, struct if_sffpage *sff)
264 {
265 	struct sfp_device *sd;
266 
267 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
268 		if (sd->sd_phandle == phandle)
269 			return sd->sd_get_sffpage(sd->sd_cookie, sff);
270 	}
271 
272 	return ENXIO;
273 }
274 
275 /*
276  * PWM support.
277  */
278 
279 LIST_HEAD(, pwm_device) pwm_devices =
280 	LIST_HEAD_INITIALIZER(pwm_devices);
281 
282 void
283 pwm_register(struct pwm_device *pd)
284 {
285 	pd->pd_cells = OF_getpropint(pd->pd_node, "#pwm-cells", 0);
286 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
287 	if (pd->pd_phandle == 0)
288 		return;
289 
290 	LIST_INSERT_HEAD(&pwm_devices, pd, pd_list);
291 
292 }
293 
294 int
295 pwm_init_state(uint32_t *cells, struct pwm_state *ps)
296 {
297 	struct pwm_device *pd;
298 
299 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
300 		if (pd->pd_phandle == cells[0]) {
301 			memset(ps, 0, sizeof(struct pwm_state));
302 			pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
303 			ps->ps_pulse_width = 0;
304 			if (pd->pd_cells > 2)
305 				ps->ps_period = cells[2];
306 			if (pd->pd_cells > 3)
307 				ps->ps_flags = cells[3];
308 			return 0;
309 		}
310 	}
311 
312 	return ENXIO;
313 }
314 
315 int
316 pwm_get_state(uint32_t *cells, struct pwm_state *ps)
317 {
318 	struct pwm_device *pd;
319 
320 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
321 		if (pd->pd_phandle == cells[0])
322 			return pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
323 	}
324 
325 	return ENXIO;
326 }
327 
328 int
329 pwm_set_state(uint32_t *cells, struct pwm_state *ps)
330 {
331 	struct pwm_device *pd;
332 
333 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
334 		if (pd->pd_phandle == cells[0])
335 			return pd->pd_set_state(pd->pd_cookie, &cells[1], ps);
336 	}
337 
338 	return ENXIO;
339 }
340 
341 /*
342  * Non-volatile memory support.
343  */
344 
345 LIST_HEAD(, nvmem_device) nvmem_devices =
346 	LIST_HEAD_INITIALIZER(nvmem_devices);
347 
348 struct nvmem_cell {
349 	uint32_t	nc_phandle;
350 	struct nvmem_device *nc_nd;
351 	bus_addr_t	nc_addr;
352 	bus_size_t	nc_size;
353 
354 	LIST_ENTRY(nvmem_cell) nc_list;
355 };
356 
357 LIST_HEAD(, nvmem_cell) nvmem_cells =
358 	LIST_HEAD_INITIALIZER(nvmem_cells);
359 
360 void
361 nvmem_register_child(int node, struct nvmem_device *nd)
362 {
363 	struct nvmem_cell *nc;
364 	uint32_t phandle;
365 	uint32_t reg[2];
366 
367 	phandle = OF_getpropint(node, "phandle", 0);
368 	if (phandle == 0)
369 		return;
370 
371 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
372 		return;
373 
374 	nc = malloc(sizeof(struct nvmem_cell), M_DEVBUF, M_WAITOK);
375 	nc->nc_phandle = phandle;
376 	nc->nc_nd = nd;
377 	nc->nc_addr = reg[0];
378 	nc->nc_size = reg[1];
379 	LIST_INSERT_HEAD(&nvmem_cells, nc, nc_list);
380 }
381 
382 void
383 nvmem_register(struct nvmem_device *nd)
384 {
385 	int node;
386 
387 	nd->nd_phandle = OF_getpropint(nd->nd_node, "phandle", 0);
388 	if (nd->nd_phandle)
389 		LIST_INSERT_HEAD(&nvmem_devices, nd, nd_list);
390 
391 	for (node = OF_child(nd->nd_node); node; node = OF_peer(node))
392 		nvmem_register_child(node, nd);
393 }
394 
395 int
396 nvmem_read(uint32_t phandle, bus_addr_t addr, void *data, bus_size_t size)
397 {
398 	struct nvmem_device *nd;
399 
400 	LIST_FOREACH(nd, &nvmem_devices, nd_list) {
401 		if (nd->nd_phandle == phandle)
402 			return nd->nd_read(nd->nd_cookie, addr, data, size);
403 	}
404 
405 	return ENXIO;
406 }
407 
408 int
409 nvmem_read_cell(int node, const char *name, void *data, bus_size_t size)
410 {
411 	struct nvmem_device *nd;
412 	struct nvmem_cell *nc;
413 	uint32_t phandle, *phandles;
414 	int id, len;
415 
416 	id = OF_getindex(node, name, "nvmem-cell-names");
417 	if (id < 0)
418 		return ENXIO;
419 
420 	len = OF_getproplen(node, "nvmem-cells");
421 	if (len <= 0)
422 		return ENXIO;
423 
424 	phandles = malloc(len, M_TEMP, M_WAITOK);
425 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
426 	phandle = phandles[id];
427 	free(phandles, M_TEMP, len);
428 
429 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
430 		if (nc->nc_phandle == phandle)
431 			break;
432 	}
433 	if (nc == NULL)
434 		return ENXIO;
435 
436 	if (size > nc->nc_size)
437 		return EINVAL;
438 
439 	nd = nc->nc_nd;
440 	return nd->nd_read(nd->nd_cookie, nc->nc_addr, data, size);
441 }
442 
443 /* Video interface support */
444 
445 LIST_HEAD(, video_device) video_devices =
446 	LIST_HEAD_INITIALIZER(video_devices);
447 
448 void
449 video_register(struct video_device *vd)
450 {
451 	vd->vd_phandle = OF_getpropint(vd->vd_node, "phandle", 0);
452 	if (vd->vd_phandle == 0)
453 		return;
454 	LIST_INSERT_HEAD(&video_devices, vd, vd_list);
455 }
456 
457 int
458 video_port_activate(uint32_t phandle, struct drm_device *ddev)
459 {
460 	uint32_t ep, rep;
461 	int node, error;
462 
463 	node = OF_getnodebyphandle(phandle);
464 	if (node == 0)
465 		return ENXIO;
466 
467 	for (node = OF_child(node); node; node = OF_peer(node)) {
468 		ep = OF_getpropint(node, "phandle", 0);
469 		rep = OF_getpropint(node, "remote-endpoint", 0);
470 		if (ep == 0 || rep == 0)
471 			continue;
472 		error = video_endpoint_activate(ep, ddev);
473 		if (error)
474 			continue;
475 		error = video_endpoint_activate(rep, ddev);
476 		if (error)
477 			continue;
478 	}
479 
480 	return 0;
481 }
482 
483 int
484 video_endpoint_activate(uint32_t phandle, struct drm_device *ddev)
485 {
486 	struct video_device *vd;
487 
488 	LIST_FOREACH(vd, &video_devices, vd_list) {
489 		if (vd->vd_phandle == phandle)
490 			break;
491 	}
492 	if (vd == NULL)
493 		return ENXIO;
494 
495 	return vd->vd_ep_activate(vd->vd_cookie, ddev);
496 }
497