xref: /openbsd-src/sys/dev/ofw/ofw_misc.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: ofw_misc.c,v 1.33 2021/06/25 17:41:22 patrick Exp $	*/
2 /*
3  * Copyright (c) 2017 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/device.h>
20 #include <sys/malloc.h>
21 #include <sys/systm.h>
22 
23 #include <net/if.h>
24 #include <net/if_media.h>
25 
26 #include <machine/bus.h>
27 
28 #include <dev/mii/mii.h>
29 #include <dev/mii/miivar.h>
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/ofw_gpio.h>
32 #include <dev/ofw/ofw_misc.h>
33 #include <dev/ofw/ofw_regulator.h>
34 
35 /*
36  * Register maps.
37  */
38 
39 struct regmap {
40 	int			rm_node;
41 	uint32_t		rm_phandle;
42 	bus_space_tag_t		rm_tag;
43 	bus_space_handle_t	rm_handle;
44 	bus_size_t		rm_size;
45 
46 	LIST_ENTRY(regmap)	rm_list;
47 };
48 
49 LIST_HEAD(, regmap) regmaps = LIST_HEAD_INITIALIZER(regmap);
50 
51 void
52 regmap_register(int node, bus_space_tag_t tag, bus_space_handle_t handle,
53     bus_size_t size)
54 {
55 	struct regmap *rm;
56 
57 	rm = malloc(sizeof(struct regmap), M_DEVBUF, M_WAITOK);
58 	rm->rm_node = node;
59 	rm->rm_phandle = OF_getpropint(node, "phandle", 0);
60 	rm->rm_tag = tag;
61 	rm->rm_handle = handle;
62 	rm->rm_size = size;
63 	LIST_INSERT_HEAD(&regmaps, rm, rm_list);
64 }
65 
66 struct regmap *
67 regmap_bycompatible(char *compatible)
68 {
69 	struct regmap *rm;
70 
71 	LIST_FOREACH(rm, &regmaps, rm_list) {
72 		if (OF_is_compatible(rm->rm_node, compatible))
73 			return rm;
74 	}
75 
76 	return NULL;
77 }
78 
79 struct regmap *
80 regmap_bynode(int node)
81 {
82 	struct regmap *rm;
83 
84 	LIST_FOREACH(rm, &regmaps, rm_list) {
85 		if (rm->rm_node == node)
86 			return rm;
87 	}
88 
89 	return NULL;
90 }
91 
92 struct regmap *
93 regmap_byphandle(uint32_t phandle)
94 {
95 	struct regmap *rm;
96 
97 	if (phandle == 0)
98 		return NULL;
99 
100 	LIST_FOREACH(rm, &regmaps, rm_list) {
101 		if (rm->rm_phandle == phandle)
102 			return rm;
103 	}
104 
105 	return NULL;
106 }
107 
108 void
109 regmap_write_4(struct regmap *rm, bus_size_t offset, uint32_t value)
110 {
111 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
112 	bus_space_write_4(rm->rm_tag, rm->rm_handle, offset, value);
113 }
114 
115 uint32_t
116 regmap_read_4(struct regmap *rm, bus_size_t offset)
117 {
118 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
119 	return bus_space_read_4(rm->rm_tag, rm->rm_handle, offset);
120 }
121 
122 
123 /*
124  * PHY support.
125  */
126 
127 LIST_HEAD(, phy_device) phy_devices =
128 	LIST_HEAD_INITIALIZER(phy_devices);
129 
130 void
131 phy_register(struct phy_device *pd)
132 {
133 	pd->pd_cells = OF_getpropint(pd->pd_node, "#phy-cells", 0);
134 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
135 	if (pd->pd_phandle == 0)
136 		return;
137 
138 	LIST_INSERT_HEAD(&phy_devices, pd, pd_list);
139 }
140 
141 int
142 phy_usb_nop_enable(int node)
143 {
144 	uint32_t vcc_supply;
145 	uint32_t *gpio;
146 	int len;
147 
148 	vcc_supply = OF_getpropint(node, "vcc-supply", 0);
149 	if (vcc_supply)
150 		regulator_enable(vcc_supply);
151 
152 	len = OF_getproplen(node, "reset-gpios");
153 	if (len <= 0)
154 		return 0;
155 
156 	/* There should only be a single GPIO pin. */
157 	gpio = malloc(len, M_TEMP, M_WAITOK);
158 	OF_getpropintarray(node, "reset-gpios", gpio, len);
159 
160 	gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
161 	gpio_controller_set_pin(gpio, 1);
162 	delay(10000);
163 	gpio_controller_set_pin(gpio, 0);
164 
165 	free(gpio, M_TEMP, len);
166 
167 	return 0;
168 }
169 
170 int
171 phy_enable_cells(uint32_t *cells)
172 {
173 	struct phy_device *pd;
174 	uint32_t phandle = cells[0];
175 	int node;
176 
177 	LIST_FOREACH(pd, &phy_devices, pd_list) {
178 		if (pd->pd_phandle == phandle)
179 			break;
180 	}
181 
182 	if (pd && pd->pd_enable)
183 		return pd->pd_enable(pd->pd_cookie, &cells[1]);
184 
185 	node = OF_getnodebyphandle(phandle);
186 	if (node == 0)
187 		return ENXIO;
188 
189 	if (OF_is_compatible(node, "usb-nop-xceiv"))
190 		return phy_usb_nop_enable(node);
191 
192 	return ENXIO;
193 }
194 
195 uint32_t *
196 phy_next_phy(uint32_t *cells)
197 {
198 	uint32_t phandle = cells[0];
199 	int node, ncells;
200 
201 	node = OF_getnodebyphandle(phandle);
202 	if (node == 0)
203 		return NULL;
204 
205 	ncells = OF_getpropint(node, "#phy-cells", 0);
206 	return cells + ncells + 1;
207 }
208 
209 int
210 phy_enable_idx(int node, int idx)
211 {
212 	uint32_t *phys;
213 	uint32_t *phy;
214 	int rv = -1;
215 	int len;
216 
217 	len = OF_getproplen(node, "phys");
218 	if (len <= 0)
219 		return -1;
220 
221 	phys = malloc(len, M_TEMP, M_WAITOK);
222 	OF_getpropintarray(node, "phys", phys, len);
223 
224 	phy = phys;
225 	while (phy && phy < phys + (len / sizeof(uint32_t))) {
226 		if (idx <= 0)
227 			rv = phy_enable_cells(phy);
228 		if (idx == 0)
229 			break;
230 		phy = phy_next_phy(phy);
231 		idx--;
232 	}
233 
234 	free(phys, M_TEMP, len);
235 	return rv;
236 }
237 
238 int
239 phy_enable(int node, const char *name)
240 {
241 	int idx;
242 
243 	idx = OF_getindex(node, name, "phy-names");
244 	if (idx == -1)
245 		return -1;
246 
247 	return phy_enable_idx(node, idx);
248 }
249 
250 /*
251  * I2C support.
252  */
253 
254 LIST_HEAD(, i2c_bus) i2c_busses =
255 	LIST_HEAD_INITIALIZER(i2c_bus);
256 
257 void
258 i2c_register(struct i2c_bus *ib)
259 {
260 	ib->ib_phandle = OF_getpropint(ib->ib_node, "phandle", 0);
261 	if (ib->ib_phandle == 0)
262 		return;
263 
264 	LIST_INSERT_HEAD(&i2c_busses, ib, ib_list);
265 }
266 
267 struct i2c_controller *
268 i2c_bynode(int node)
269 {
270 	struct i2c_bus *ib;
271 
272 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
273 		if (ib->ib_node == node)
274 			return ib->ib_ic;
275 	}
276 
277 	return NULL;
278 }
279 
280 struct i2c_controller *
281 i2c_byphandle(uint32_t phandle)
282 {
283 	struct i2c_bus *ib;
284 
285 	if (phandle == 0)
286 		return NULL;
287 
288 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
289 		if (ib->ib_phandle == phandle)
290 			return ib->ib_ic;
291 	}
292 
293 	return NULL;
294 }
295 
296 /*
297  * SFP support.
298  */
299 
300 LIST_HEAD(, sfp_device) sfp_devices =
301 	LIST_HEAD_INITIALIZER(sfp_devices);
302 
303 void
304 sfp_register(struct sfp_device *sd)
305 {
306 	sd->sd_phandle = OF_getpropint(sd->sd_node, "phandle", 0);
307 	if (sd->sd_phandle == 0)
308 		return;
309 
310 	LIST_INSERT_HEAD(&sfp_devices, sd, sd_list);
311 }
312 
313 int
314 sfp_do_enable(uint32_t phandle, int enable)
315 {
316 	struct sfp_device *sd;
317 
318 	if (phandle == 0)
319 		return ENXIO;
320 
321 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
322 		if (sd->sd_phandle == phandle)
323 			return sd->sd_enable(sd->sd_cookie, enable);
324 	}
325 
326 	return ENXIO;
327 }
328 
329 int
330 sfp_enable(uint32_t phandle)
331 {
332 	return sfp_do_enable(phandle, 1);
333 }
334 
335 int
336 sfp_disable(uint32_t phandle)
337 {
338 	return sfp_do_enable(phandle, 0);
339 }
340 
341 int
342 sfp_get_sffpage(uint32_t phandle, struct if_sffpage *sff)
343 {
344 	struct sfp_device *sd;
345 
346 	if (phandle == 0)
347 		return ENXIO;
348 
349 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
350 		if (sd->sd_phandle == phandle)
351 			return sd->sd_get_sffpage(sd->sd_cookie, sff);
352 	}
353 
354 	return ENXIO;
355 }
356 
357 #define SFF8472_TCC_XCC			3 /* 10G Ethernet Compliance Codes */
358 #define SFF8472_TCC_XCC_10G_SR		(1 << 4)
359 #define SFF8472_TCC_XCC_10G_LR		(1 << 5)
360 #define SFF8472_TCC_XCC_10G_LRM		(1 << 6)
361 #define SFF8472_TCC_XCC_10G_ER		(1 << 7)
362 #define SFF8472_TCC_ECC			6 /* Ethernet Compliance Codes */
363 #define SFF8472_TCC_ECC_1000_SX		(1 << 0)
364 #define SFF8472_TCC_ECC_1000_LX		(1 << 1)
365 #define SFF8472_TCC_ECC_1000_CX		(1 << 2)
366 #define SFF8472_TCC_ECC_1000_T		(1 << 3)
367 #define SFF8472_TCC_SCT			8 /* SFP+ Cable Technology */
368 #define SFF8472_TCC_SCT_PASSIVE		(1 << 2)
369 #define SFF8472_TCC_SCT_ACTIVE		(1 << 3)
370 
371 int
372 sfp_add_media(uint32_t phandle, struct mii_data *mii)
373 {
374 	struct if_sffpage sff;
375 	int error;
376 
377 	memset(&sff, 0, sizeof(sff));
378 	sff.sff_addr = IFSFF_ADDR_EEPROM;
379 	sff.sff_page = 0;
380 
381 	error = sfp_get_sffpage(phandle, &sff);
382 	if (error)
383 		return error;
384 
385 	/* SFP */
386 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_SX) {
387 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
388 		mii->mii_media_active = IFM_ETHER | IFM_1000_SX | IFM_FDX;
389 	}
390 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_LX) {
391 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_LX, 0, NULL);
392 		mii->mii_media_active = IFM_ETHER | IFM_1000_LX | IFM_FDX;
393 	}
394 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_CX) {
395 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_CX, 0, NULL);
396 		mii->mii_media_active = IFM_ETHER | IFM_1000_CX | IFM_FDX;
397 	}
398 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_T) {
399 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T, 0, NULL);
400 		mii->mii_media_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
401 	}
402 
403 	/* SFP+ */
404 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_SR) {
405 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
406 		mii->mii_media_active = IFM_ETHER | IFM_10G_SR | IFM_FDX;
407 	}
408 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LR) {
409 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
410 		mii->mii_media_active = IFM_ETHER | IFM_10G_LR | IFM_FDX;
411 	}
412 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LRM) {
413 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LRM, 0, NULL);
414 		mii->mii_media_active = IFM_ETHER | IFM_10G_LRM | IFM_FDX;
415 	}
416 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_ER) {
417 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_ER, 0, NULL);
418 		mii->mii_media_active = IFM_ETHER | IFM_10G_ER | IFM_FDX;
419 	}
420 
421 	/* SFP+ DAC */
422 	if (sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_PASSIVE ||
423 	    sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_ACTIVE) {
424 		ifmedia_add(&mii->mii_media,
425 		    IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
426 		mii->mii_media_active = IFM_ETHER | IFM_10G_SFP_CU | IFM_FDX;
427 	}
428 
429 	return 0;
430 }
431 
432 /*
433  * PWM support.
434  */
435 
436 LIST_HEAD(, pwm_device) pwm_devices =
437 	LIST_HEAD_INITIALIZER(pwm_devices);
438 
439 void
440 pwm_register(struct pwm_device *pd)
441 {
442 	pd->pd_cells = OF_getpropint(pd->pd_node, "#pwm-cells", 0);
443 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
444 	if (pd->pd_phandle == 0)
445 		return;
446 
447 	LIST_INSERT_HEAD(&pwm_devices, pd, pd_list);
448 
449 }
450 
451 int
452 pwm_init_state(uint32_t *cells, struct pwm_state *ps)
453 {
454 	struct pwm_device *pd;
455 
456 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
457 		if (pd->pd_phandle == cells[0]) {
458 			memset(ps, 0, sizeof(struct pwm_state));
459 			pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
460 			ps->ps_pulse_width = 0;
461 			if (pd->pd_cells >= 2)
462 				ps->ps_period = cells[2];
463 			if (pd->pd_cells >= 3)
464 				ps->ps_flags = cells[3];
465 			return 0;
466 		}
467 	}
468 
469 	return ENXIO;
470 }
471 
472 int
473 pwm_get_state(uint32_t *cells, struct pwm_state *ps)
474 {
475 	struct pwm_device *pd;
476 
477 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
478 		if (pd->pd_phandle == cells[0])
479 			return pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
480 	}
481 
482 	return ENXIO;
483 }
484 
485 int
486 pwm_set_state(uint32_t *cells, struct pwm_state *ps)
487 {
488 	struct pwm_device *pd;
489 
490 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
491 		if (pd->pd_phandle == cells[0])
492 			return pd->pd_set_state(pd->pd_cookie, &cells[1], ps);
493 	}
494 
495 	return ENXIO;
496 }
497 
498 /*
499  * Non-volatile memory support.
500  */
501 
502 LIST_HEAD(, nvmem_device) nvmem_devices =
503 	LIST_HEAD_INITIALIZER(nvmem_devices);
504 
505 struct nvmem_cell {
506 	uint32_t	nc_phandle;
507 	struct nvmem_device *nc_nd;
508 	bus_addr_t	nc_addr;
509 	bus_size_t	nc_size;
510 
511 	LIST_ENTRY(nvmem_cell) nc_list;
512 };
513 
514 LIST_HEAD(, nvmem_cell) nvmem_cells =
515 	LIST_HEAD_INITIALIZER(nvmem_cells);
516 
517 void
518 nvmem_register_child(int node, struct nvmem_device *nd)
519 {
520 	struct nvmem_cell *nc;
521 	uint32_t phandle;
522 	uint32_t reg[2];
523 
524 	phandle = OF_getpropint(node, "phandle", 0);
525 	if (phandle == 0)
526 		return;
527 
528 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
529 		return;
530 
531 	nc = malloc(sizeof(struct nvmem_cell), M_DEVBUF, M_WAITOK);
532 	nc->nc_phandle = phandle;
533 	nc->nc_nd = nd;
534 	nc->nc_addr = reg[0];
535 	nc->nc_size = reg[1];
536 	LIST_INSERT_HEAD(&nvmem_cells, nc, nc_list);
537 }
538 
539 void
540 nvmem_register(struct nvmem_device *nd)
541 {
542 	int node;
543 
544 	nd->nd_phandle = OF_getpropint(nd->nd_node, "phandle", 0);
545 	if (nd->nd_phandle)
546 		LIST_INSERT_HEAD(&nvmem_devices, nd, nd_list);
547 
548 	for (node = OF_child(nd->nd_node); node; node = OF_peer(node))
549 		nvmem_register_child(node, nd);
550 }
551 
552 int
553 nvmem_read(uint32_t phandle, bus_addr_t addr, void *data, bus_size_t size)
554 {
555 	struct nvmem_device *nd;
556 
557 	if (phandle == 0)
558 		return ENXIO;
559 
560 	LIST_FOREACH(nd, &nvmem_devices, nd_list) {
561 		if (nd->nd_phandle == phandle)
562 			return nd->nd_read(nd->nd_cookie, addr, data, size);
563 	}
564 
565 	return ENXIO;
566 }
567 
568 int
569 nvmem_read_cell(int node, const char *name, void *data, bus_size_t size)
570 {
571 	struct nvmem_device *nd;
572 	struct nvmem_cell *nc;
573 	uint32_t phandle, *phandles;
574 	int id, len;
575 
576 	id = OF_getindex(node, name, "nvmem-cell-names");
577 	if (id < 0)
578 		return ENXIO;
579 
580 	len = OF_getproplen(node, "nvmem-cells");
581 	if (len <= 0)
582 		return ENXIO;
583 
584 	phandles = malloc(len, M_TEMP, M_WAITOK);
585 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
586 	phandle = phandles[id];
587 	free(phandles, M_TEMP, len);
588 
589 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
590 		if (nc->nc_phandle == phandle)
591 			break;
592 	}
593 	if (nc == NULL)
594 		return ENXIO;
595 
596 	if (size > nc->nc_size)
597 		return EINVAL;
598 
599 	nd = nc->nc_nd;
600 	return nd->nd_read(nd->nd_cookie, nc->nc_addr, data, size);
601 }
602 
603 /* Port/endpoint interface support */
604 
605 LIST_HEAD(, endpoint) endpoints =
606 	LIST_HEAD_INITIALIZER(endpoints);
607 
608 void
609 endpoint_register(int node, struct device_port *dp, enum endpoint_type type)
610 {
611 	struct endpoint *ep;
612 
613 	ep = malloc(sizeof(*ep), M_DEVBUF, M_WAITOK);
614 	ep->ep_node = node;
615 	ep->ep_phandle = OF_getpropint(node, "phandle", 0);
616 	ep->ep_reg = OF_getpropint(node, "reg", -1);
617 	ep->ep_port = dp;
618 	ep->ep_type = type;
619 
620 	LIST_INSERT_HEAD(&endpoints, ep, ep_list);
621 	LIST_INSERT_HEAD(&dp->dp_endpoints, ep, ep_plist);
622 }
623 
624 void
625 device_port_register(int node, struct device_ports *ports,
626     enum endpoint_type type)
627 {
628 	struct device_port *dp;
629 
630 	dp = malloc(sizeof(*dp), M_DEVBUF, M_WAITOK);
631 	dp->dp_node = node;
632 	dp->dp_phandle = OF_getpropint(node, "phandle", 0);
633 	dp->dp_reg = OF_getpropint(node, "reg", -1);
634 	dp->dp_ports = ports;
635 	LIST_INIT(&dp->dp_endpoints);
636 	for (node = OF_child(node); node; node = OF_peer(node))
637 		endpoint_register(node, dp, type);
638 
639 	LIST_INSERT_HEAD(&ports->dp_ports, dp, dp_list);
640 }
641 
642 void
643 device_ports_register(struct device_ports *ports,
644     enum endpoint_type type)
645 {
646 	int node;
647 
648 	LIST_INIT(&ports->dp_ports);
649 
650 	node = OF_getnodebyname(ports->dp_node, "ports");
651 	if (node == 0) {
652 		node = OF_getnodebyname(ports->dp_node, "port");
653 		if (node == 0)
654 			return;
655 
656 		device_port_register(node, ports, type);
657 		return;
658 	}
659 
660 	for (node = OF_child(node); node; node = OF_peer(node))
661 		device_port_register(node, ports, type);
662 }
663 
664 struct device_ports *
665 device_ports_byphandle(uint32_t phandle)
666 {
667 	struct endpoint *ep;
668 
669 	if (phandle == 0)
670 		return NULL;
671 
672 	LIST_FOREACH(ep, &endpoints, ep_list) {
673 		if (ep->ep_port->dp_phandle == phandle)
674 			return ep->ep_port->dp_ports;
675 	}
676 
677 	return NULL;
678 }
679 
680 struct endpoint *
681 endpoint_byphandle(uint32_t phandle)
682 {
683 	struct endpoint *ep;
684 
685 	if (phandle == 0)
686 		return NULL;
687 
688 	LIST_FOREACH(ep, &endpoints, ep_list) {
689 		if (ep->ep_phandle == phandle)
690 			return ep;
691 	}
692 
693 	return NULL;
694 }
695 
696 struct endpoint *
697 endpoint_byreg(struct device_ports *ports, uint32_t dp_reg, uint32_t ep_reg)
698 {
699 	struct device_port *dp;
700 	struct endpoint *ep;
701 
702 	LIST_FOREACH(dp, &ports->dp_ports, dp_list) {
703 		if (dp->dp_reg != dp_reg)
704 			continue;
705 		LIST_FOREACH(ep, &dp->dp_endpoints, ep_list) {
706 			if (ep->ep_reg != ep_reg)
707 				continue;
708 			return ep;
709 		}
710 	}
711 
712 	return NULL;
713 }
714 
715 struct endpoint *
716 endpoint_remote(struct endpoint *ep)
717 {
718 	struct endpoint *rep;
719 	int phandle;
720 
721 	phandle = OF_getpropint(ep->ep_node, "remote-endpoint", 0);
722 	if (phandle == 0)
723 		return NULL;
724 
725 	LIST_FOREACH(rep, &endpoints, ep_list) {
726 		if (rep->ep_phandle == phandle)
727 			return rep;
728 	}
729 
730 	return NULL;
731 }
732 
733 int
734 endpoint_activate(struct endpoint *ep, void *arg)
735 {
736 	struct device_ports *ports = ep->ep_port->dp_ports;
737 	return ports->dp_ep_activate(ports->dp_cookie, ep, arg);
738 }
739 
740 void *
741 endpoint_get_cookie(struct endpoint *ep)
742 {
743 	struct device_ports *ports = ep->ep_port->dp_ports;
744 	return ports->dp_ep_get_cookie(ports->dp_cookie, ep);
745 }
746 
747 int
748 device_port_activate(uint32_t phandle, void *arg)
749 {
750 	struct device_port *dp = NULL;
751 	struct endpoint *ep, *rep;
752 	int count;
753 	int error;
754 
755 	if (phandle == 0)
756 		return ENXIO;
757 
758 	LIST_FOREACH(ep, &endpoints, ep_list) {
759 		if (ep->ep_port->dp_phandle == phandle) {
760 			dp = ep->ep_port;
761 			break;
762 		}
763 	}
764 	if (dp == NULL)
765 		return ENXIO;
766 
767 	count = 0;
768 	LIST_FOREACH(ep, &dp->dp_endpoints, ep_plist) {
769 		rep = endpoint_remote(ep);
770 		if (rep == NULL)
771 			continue;
772 
773 		error = endpoint_activate(ep, arg);
774 		if (error)
775 			continue;
776 		error = endpoint_activate(rep, arg);
777 		if (error)
778 			continue;
779 		count++;
780 	}
781 
782 	return count ? 0 : ENXIO;
783 }
784 
785 /* Digital audio interface support */
786 
787 LIST_HEAD(, dai_device) dai_devices =
788 	LIST_HEAD_INITIALIZER(dai_devices);
789 
790 void *
791 dai_ep_get_cookie(void *cookie, struct endpoint *ep)
792 {
793 	return cookie;
794 }
795 
796 void
797 dai_register(struct dai_device *dd)
798 {
799 	dd->dd_phandle = OF_getpropint(dd->dd_node, "phandle", 0);
800 	if (dd->dd_phandle != 0)
801 		LIST_INSERT_HEAD(&dai_devices, dd, dd_list);
802 
803 	dd->dd_ports.dp_node = dd->dd_node;
804 	dd->dd_ports.dp_cookie = dd;
805 	dd->dd_ports.dp_ep_get_cookie = dai_ep_get_cookie;
806 	device_ports_register(&dd->dd_ports, EP_DAI_DEVICE);
807 }
808 
809 struct dai_device *
810 dai_byphandle(uint32_t phandle)
811 {
812 	struct dai_device *dd;
813 
814 	if (phandle == 0)
815 		return NULL;
816 
817 	LIST_FOREACH(dd, &dai_devices, dd_list) {
818 		if (dd->dd_phandle == phandle)
819 			return dd;
820 	}
821 
822 	return NULL;
823 }
824 
825 /* MII support */
826 
827 LIST_HEAD(, mii_bus) mii_busses =
828 	LIST_HEAD_INITIALIZER(mii_busses);
829 
830 void
831 mii_register(struct mii_bus *md)
832 {
833 	LIST_INSERT_HEAD(&mii_busses, md, md_list);
834 }
835 
836 struct mii_bus *
837 mii_bynode(int node)
838 {
839 	struct mii_bus *md;
840 
841 	LIST_FOREACH(md, &mii_busses, md_list) {
842 		if (md->md_node == node)
843 			return md;
844 	}
845 
846 	return NULL;
847 }
848 
849 struct mii_bus *
850 mii_byphandle(uint32_t phandle)
851 {
852 	int node;
853 
854 	if (phandle == 0)
855 		return NULL;
856 
857 	node = OF_getnodebyphandle(phandle);
858 	if (node == 0)
859 		return NULL;
860 
861 	node = OF_parent(node);
862 	if (node == 0)
863 		return NULL;
864 
865 	return mii_bynode(node);
866 }
867 
868 /* IOMMU support */
869 
870 LIST_HEAD(, iommu_device) iommu_devices =
871 	LIST_HEAD_INITIALIZER(iommu_devices);
872 
873 void
874 iommu_device_register(struct iommu_device *id)
875 {
876 	id->id_phandle = OF_getpropint(id->id_node, "phandle", 0);
877 	if (id->id_phandle == 0)
878 		return;
879 
880 	LIST_INSERT_HEAD(&iommu_devices, id, id_list);
881 }
882 
883 bus_dma_tag_t
884 iommu_device_do_map(uint32_t phandle, uint32_t *cells, bus_dma_tag_t dmat)
885 {
886 	struct iommu_device *id;
887 
888 	if (phandle == 0)
889 		return dmat;
890 
891 	LIST_FOREACH(id, &iommu_devices, id_list) {
892 		if (id->id_phandle == phandle)
893 			return id->id_map(id->id_cookie, cells, dmat);
894 	}
895 
896 	return dmat;
897 }
898 
899 int
900 iommu_device_lookup(int node, uint32_t *phandle, uint32_t *sid)
901 {
902 	uint32_t *cell;
903 	uint32_t *map;
904 	int len, icells, ncells;
905 	int ret = 1;
906 
907 	len = OF_getproplen(node, "iommus");
908 	if (len <= 0)
909 		return ret;
910 
911 	map = malloc(len, M_TEMP, M_WAITOK);
912 	OF_getpropintarray(node, "iommus", map, len);
913 
914 	cell = map;
915 	ncells = len / sizeof(uint32_t);
916 	while (ncells > 1) {
917 		node = OF_getnodebyphandle(cell[0]);
918 		if (node == 0)
919 			goto out;
920 
921 		icells = OF_getpropint(node, "#iommu-cells", 1);
922 		if (ncells < icells + 1)
923 			goto out;
924 
925 		KASSERT(icells == 1);
926 
927 		*phandle = cell[0];
928 		*sid = cell[1];
929 		ret = 0;
930 		break;
931 
932 		cell += (1 + icells);
933 		ncells -= (1 + icells);
934 	}
935 
936 out:
937 	free(map, M_TEMP, len);
938 
939 	return ret;
940 }
941 
942 int
943 iommu_device_lookup_pci(int node, uint32_t rid, uint32_t *phandle,
944     uint32_t *sid)
945 {
946 	uint32_t sid_base;
947 	uint32_t *cell;
948 	uint32_t *map;
949 	uint32_t mask, rid_base;
950 	int len, length, icells, ncells;
951 	int ret = 1;
952 
953 	len = OF_getproplen(node, "iommu-map");
954 	if (len <= 0)
955 		return ret;
956 
957 	map = malloc(len, M_TEMP, M_WAITOK);
958 	OF_getpropintarray(node, "iommu-map", map, len);
959 
960 	mask = OF_getpropint(node, "iommu-map-mask", 0xffff);
961 	rid = rid & mask;
962 
963 	cell = map;
964 	ncells = len / sizeof(uint32_t);
965 	while (ncells > 1) {
966 		node = OF_getnodebyphandle(cell[1]);
967 		if (node == 0)
968 			goto out;
969 
970 		icells = OF_getpropint(node, "#iommu-cells", 1);
971 		if (ncells < icells + 3)
972 			goto out;
973 
974 		KASSERT(icells == 1);
975 
976 		rid_base = cell[0];
977 		sid_base = cell[2];
978 		length = cell[3];
979 		if (rid >= rid_base && rid < rid_base + length) {
980 			*sid = sid_base + (rid - rid_base);
981 			*phandle = cell[1];
982 			ret = 0;
983 			break;
984 		}
985 
986 		cell += 4;
987 		ncells -= 4;
988 	}
989 
990 out:
991 	free(map, M_TEMP, len);
992 
993 	return ret;
994 }
995 
996 bus_dma_tag_t
997 iommu_device_map(int node, bus_dma_tag_t dmat)
998 {
999 	uint32_t phandle, sid;
1000 
1001 	if (iommu_device_lookup(node, &phandle, &sid))
1002 		return dmat;
1003 
1004 	return iommu_device_do_map(phandle, &sid, dmat);
1005 }
1006 
1007 bus_dma_tag_t
1008 iommu_device_map_pci(int node, uint32_t rid, bus_dma_tag_t dmat)
1009 {
1010 	uint32_t phandle, sid;
1011 
1012 	if (iommu_device_lookup_pci(node, rid, &phandle, &sid))
1013 		return dmat;
1014 
1015 	return iommu_device_do_map(phandle, &sid, dmat);
1016 }
1017 
1018 void
1019 iommu_device_do_reserve(uint32_t phandle, uint32_t *cells, bus_addr_t addr,
1020     bus_size_t size)
1021 {
1022 	struct iommu_device *id;
1023 
1024 	if (phandle == 0)
1025 		return;
1026 
1027 	LIST_FOREACH(id, &iommu_devices, id_list) {
1028 		if (id->id_phandle == phandle) {
1029 			id->id_reserve(id->id_cookie, cells, addr, size);
1030 			break;
1031 		}
1032 	}
1033 }
1034 
1035 void
1036 iommu_reserve_region_pci(int node, uint32_t rid, bus_addr_t addr,
1037     bus_size_t size)
1038 {
1039 	uint32_t phandle, sid;
1040 
1041 	if (iommu_device_lookup_pci(node, rid, &phandle, &sid))
1042 		return;
1043 
1044 	return iommu_device_do_reserve(phandle, &sid, addr, size);
1045 }
1046