xref: /openbsd-src/sys/dev/ofw/ofw_misc.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*	$OpenBSD: ofw_misc.c,v 1.30 2021/02/28 21:09:44 patrick Exp $	*/
2 /*
3  * Copyright (c) 2017 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/device.h>
20 #include <sys/malloc.h>
21 #include <sys/systm.h>
22 
23 #include <net/if.h>
24 #include <net/if_media.h>
25 
26 #include <machine/bus.h>
27 
28 #include <dev/mii/mii.h>
29 #include <dev/mii/miivar.h>
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/ofw_gpio.h>
32 #include <dev/ofw/ofw_misc.h>
33 #include <dev/ofw/ofw_regulator.h>
34 
35 /*
36  * Register maps.
37  */
38 
39 struct regmap {
40 	int			rm_node;
41 	uint32_t		rm_phandle;
42 	bus_space_tag_t		rm_tag;
43 	bus_space_handle_t	rm_handle;
44 	bus_size_t		rm_size;
45 
46 	LIST_ENTRY(regmap)	rm_list;
47 };
48 
49 LIST_HEAD(, regmap) regmaps = LIST_HEAD_INITIALIZER(regmap);
50 
51 void
52 regmap_register(int node, bus_space_tag_t tag, bus_space_handle_t handle,
53     bus_size_t size)
54 {
55 	struct regmap *rm;
56 
57 	rm = malloc(sizeof(struct regmap), M_DEVBUF, M_WAITOK);
58 	rm->rm_node = node;
59 	rm->rm_phandle = OF_getpropint(node, "phandle", 0);
60 	rm->rm_tag = tag;
61 	rm->rm_handle = handle;
62 	rm->rm_size = size;
63 	LIST_INSERT_HEAD(&regmaps, rm, rm_list);
64 }
65 
66 struct regmap *
67 regmap_bycompatible(char *compatible)
68 {
69 	struct regmap *rm;
70 
71 	LIST_FOREACH(rm, &regmaps, rm_list) {
72 		if (OF_is_compatible(rm->rm_node, compatible))
73 			return rm;
74 	}
75 
76 	return NULL;
77 }
78 
79 struct regmap *
80 regmap_bynode(int node)
81 {
82 	struct regmap *rm;
83 
84 	LIST_FOREACH(rm, &regmaps, rm_list) {
85 		if (rm->rm_node == node)
86 			return rm;
87 	}
88 
89 	return NULL;
90 }
91 
92 struct regmap *
93 regmap_byphandle(uint32_t phandle)
94 {
95 	struct regmap *rm;
96 
97 	if (phandle == 0)
98 		return NULL;
99 
100 	LIST_FOREACH(rm, &regmaps, rm_list) {
101 		if (rm->rm_phandle == phandle)
102 			return rm;
103 	}
104 
105 	return NULL;
106 }
107 
108 void
109 regmap_write_4(struct regmap *rm, bus_size_t offset, uint32_t value)
110 {
111 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
112 	bus_space_write_4(rm->rm_tag, rm->rm_handle, offset, value);
113 }
114 
115 uint32_t
116 regmap_read_4(struct regmap *rm, bus_size_t offset)
117 {
118 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
119 	return bus_space_read_4(rm->rm_tag, rm->rm_handle, offset);
120 }
121 
122 
123 /*
124  * PHY support.
125  */
126 
127 LIST_HEAD(, phy_device) phy_devices =
128 	LIST_HEAD_INITIALIZER(phy_devices);
129 
130 void
131 phy_register(struct phy_device *pd)
132 {
133 	pd->pd_cells = OF_getpropint(pd->pd_node, "#phy-cells", 0);
134 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
135 	if (pd->pd_phandle == 0)
136 		return;
137 
138 	LIST_INSERT_HEAD(&phy_devices, pd, pd_list);
139 }
140 
141 int
142 phy_usb_nop_enable(int node)
143 {
144 	uint32_t vcc_supply;
145 	uint32_t *gpio;
146 	int len;
147 
148 	vcc_supply = OF_getpropint(node, "vcc-supply", 0);
149 	if (vcc_supply)
150 		regulator_enable(vcc_supply);
151 
152 	len = OF_getproplen(node, "reset-gpios");
153 	if (len <= 0)
154 		return 0;
155 
156 	/* There should only be a single GPIO pin. */
157 	gpio = malloc(len, M_TEMP, M_WAITOK);
158 	OF_getpropintarray(node, "reset-gpios", gpio, len);
159 
160 	gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
161 	gpio_controller_set_pin(gpio, 1);
162 	delay(10000);
163 	gpio_controller_set_pin(gpio, 0);
164 
165 	free(gpio, M_TEMP, len);
166 
167 	return 0;
168 }
169 
170 int
171 phy_enable_cells(uint32_t *cells)
172 {
173 	struct phy_device *pd;
174 	uint32_t phandle = cells[0];
175 	int node;
176 
177 	LIST_FOREACH(pd, &phy_devices, pd_list) {
178 		if (pd->pd_phandle == phandle)
179 			break;
180 	}
181 
182 	if (pd && pd->pd_enable)
183 		return pd->pd_enable(pd->pd_cookie, &cells[1]);
184 
185 	node = OF_getnodebyphandle(phandle);
186 	if (node == 0)
187 		return ENXIO;
188 
189 	if (OF_is_compatible(node, "usb-nop-xceiv"))
190 		return phy_usb_nop_enable(node);
191 
192 	return ENXIO;
193 }
194 
195 uint32_t *
196 phy_next_phy(uint32_t *cells)
197 {
198 	uint32_t phandle = cells[0];
199 	int node, ncells;
200 
201 	node = OF_getnodebyphandle(phandle);
202 	if (node == 0)
203 		return NULL;
204 
205 	ncells = OF_getpropint(node, "#phy-cells", 0);
206 	return cells + ncells + 1;
207 }
208 
209 int
210 phy_enable_idx(int node, int idx)
211 {
212 	uint32_t *phys;
213 	uint32_t *phy;
214 	int rv = -1;
215 	int len;
216 
217 	len = OF_getproplen(node, "phys");
218 	if (len <= 0)
219 		return -1;
220 
221 	phys = malloc(len, M_TEMP, M_WAITOK);
222 	OF_getpropintarray(node, "phys", phys, len);
223 
224 	phy = phys;
225 	while (phy && phy < phys + (len / sizeof(uint32_t))) {
226 		if (idx <= 0)
227 			rv = phy_enable_cells(phy);
228 		if (idx == 0)
229 			break;
230 		phy = phy_next_phy(phy);
231 		idx--;
232 	}
233 
234 	free(phys, M_TEMP, len);
235 	return rv;
236 }
237 
238 int
239 phy_enable(int node, const char *name)
240 {
241 	int idx;
242 
243 	idx = OF_getindex(node, name, "phy-names");
244 	if (idx == -1)
245 		return -1;
246 
247 	return phy_enable_idx(node, idx);
248 }
249 
250 /*
251  * I2C support.
252  */
253 
254 LIST_HEAD(, i2c_bus) i2c_busses =
255 	LIST_HEAD_INITIALIZER(i2c_bus);
256 
257 void
258 i2c_register(struct i2c_bus *ib)
259 {
260 	ib->ib_phandle = OF_getpropint(ib->ib_node, "phandle", 0);
261 	if (ib->ib_phandle == 0)
262 		return;
263 
264 	LIST_INSERT_HEAD(&i2c_busses, ib, ib_list);
265 }
266 
267 struct i2c_controller *
268 i2c_bynode(int node)
269 {
270 	struct i2c_bus *ib;
271 
272 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
273 		if (ib->ib_node == node)
274 			return ib->ib_ic;
275 	}
276 
277 	return NULL;
278 }
279 
280 struct i2c_controller *
281 i2c_byphandle(uint32_t phandle)
282 {
283 	struct i2c_bus *ib;
284 
285 	if (phandle == 0)
286 		return NULL;
287 
288 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
289 		if (ib->ib_phandle == phandle)
290 			return ib->ib_ic;
291 	}
292 
293 	return NULL;
294 }
295 
296 /*
297  * SFP support.
298  */
299 
300 LIST_HEAD(, sfp_device) sfp_devices =
301 	LIST_HEAD_INITIALIZER(sfp_devices);
302 
303 void
304 sfp_register(struct sfp_device *sd)
305 {
306 	sd->sd_phandle = OF_getpropint(sd->sd_node, "phandle", 0);
307 	if (sd->sd_phandle == 0)
308 		return;
309 
310 	LIST_INSERT_HEAD(&sfp_devices, sd, sd_list);
311 }
312 
313 int
314 sfp_do_enable(uint32_t phandle, int enable)
315 {
316 	struct sfp_device *sd;
317 
318 	if (phandle == 0)
319 		return ENXIO;
320 
321 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
322 		if (sd->sd_phandle == phandle)
323 			return sd->sd_enable(sd->sd_cookie, enable);
324 	}
325 
326 	return ENXIO;
327 }
328 
329 int
330 sfp_enable(uint32_t phandle)
331 {
332 	return sfp_do_enable(phandle, 1);
333 }
334 
335 int
336 sfp_disable(uint32_t phandle)
337 {
338 	return sfp_do_enable(phandle, 0);
339 }
340 
341 int
342 sfp_get_sffpage(uint32_t phandle, struct if_sffpage *sff)
343 {
344 	struct sfp_device *sd;
345 
346 	if (phandle == 0)
347 		return ENXIO;
348 
349 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
350 		if (sd->sd_phandle == phandle)
351 			return sd->sd_get_sffpage(sd->sd_cookie, sff);
352 	}
353 
354 	return ENXIO;
355 }
356 
357 #define SFF8472_TCC_XCC			3 /* 10G Ethernet Compliance Codes */
358 #define SFF8472_TCC_XCC_10G_SR		(1 << 4)
359 #define SFF8472_TCC_XCC_10G_LR		(1 << 5)
360 #define SFF8472_TCC_XCC_10G_LRM		(1 << 6)
361 #define SFF8472_TCC_XCC_10G_ER		(1 << 7)
362 #define SFF8472_TCC_ECC			6 /* Ethernet Compliance Codes */
363 #define SFF8472_TCC_ECC_1000_SX		(1 << 0)
364 #define SFF8472_TCC_ECC_1000_LX		(1 << 1)
365 #define SFF8472_TCC_ECC_1000_CX		(1 << 2)
366 #define SFF8472_TCC_ECC_1000_T		(1 << 3)
367 #define SFF8472_TCC_SCT			8 /* SFP+ Cable Technology */
368 #define SFF8472_TCC_SCT_PASSIVE		(1 << 2)
369 #define SFF8472_TCC_SCT_ACTIVE		(1 << 3)
370 
371 int
372 sfp_add_media(uint32_t phandle, struct mii_data *mii)
373 {
374 	struct if_sffpage sff;
375 	int error;
376 
377 	memset(&sff, 0, sizeof(sff));
378 	sff.sff_addr = IFSFF_ADDR_EEPROM;
379 	sff.sff_page = 0;
380 
381 	error = sfp_get_sffpage(phandle, &sff);
382 	if (error)
383 		return error;
384 
385 	/* SFP */
386 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_SX) {
387 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
388 		mii->mii_media_active = IFM_ETHER | IFM_1000_SX | IFM_FDX;
389 	}
390 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_LX) {
391 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_LX, 0, NULL);
392 		mii->mii_media_active = IFM_ETHER | IFM_1000_LX | IFM_FDX;
393 	}
394 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_CX) {
395 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_CX, 0, NULL);
396 		mii->mii_media_active = IFM_ETHER | IFM_1000_CX | IFM_FDX;
397 	}
398 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_T) {
399 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T, 0, NULL);
400 		mii->mii_media_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
401 	}
402 
403 	/* SFP+ */
404 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_SR) {
405 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
406 		mii->mii_media_active = IFM_ETHER | IFM_10G_SR | IFM_FDX;
407 	}
408 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LR) {
409 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
410 		mii->mii_media_active = IFM_ETHER | IFM_10G_LR | IFM_FDX;
411 	}
412 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LRM) {
413 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LRM, 0, NULL);
414 		mii->mii_media_active = IFM_ETHER | IFM_10G_LRM | IFM_FDX;
415 	}
416 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_ER) {
417 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_ER, 0, NULL);
418 		mii->mii_media_active = IFM_ETHER | IFM_10G_ER | IFM_FDX;
419 	}
420 
421 	/* SFP+ DAC */
422 	if (sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_PASSIVE ||
423 	    sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_ACTIVE) {
424 		ifmedia_add(&mii->mii_media,
425 		    IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
426 		mii->mii_media_active = IFM_ETHER | IFM_10G_SFP_CU | IFM_FDX;
427 	}
428 
429 	return 0;
430 }
431 
432 /*
433  * PWM support.
434  */
435 
436 LIST_HEAD(, pwm_device) pwm_devices =
437 	LIST_HEAD_INITIALIZER(pwm_devices);
438 
439 void
440 pwm_register(struct pwm_device *pd)
441 {
442 	pd->pd_cells = OF_getpropint(pd->pd_node, "#pwm-cells", 0);
443 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
444 	if (pd->pd_phandle == 0)
445 		return;
446 
447 	LIST_INSERT_HEAD(&pwm_devices, pd, pd_list);
448 
449 }
450 
451 int
452 pwm_init_state(uint32_t *cells, struct pwm_state *ps)
453 {
454 	struct pwm_device *pd;
455 
456 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
457 		if (pd->pd_phandle == cells[0]) {
458 			memset(ps, 0, sizeof(struct pwm_state));
459 			pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
460 			ps->ps_pulse_width = 0;
461 			if (pd->pd_cells >= 2)
462 				ps->ps_period = cells[2];
463 			if (pd->pd_cells >= 3)
464 				ps->ps_flags = cells[3];
465 			return 0;
466 		}
467 	}
468 
469 	return ENXIO;
470 }
471 
472 int
473 pwm_get_state(uint32_t *cells, struct pwm_state *ps)
474 {
475 	struct pwm_device *pd;
476 
477 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
478 		if (pd->pd_phandle == cells[0])
479 			return pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
480 	}
481 
482 	return ENXIO;
483 }
484 
485 int
486 pwm_set_state(uint32_t *cells, struct pwm_state *ps)
487 {
488 	struct pwm_device *pd;
489 
490 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
491 		if (pd->pd_phandle == cells[0])
492 			return pd->pd_set_state(pd->pd_cookie, &cells[1], ps);
493 	}
494 
495 	return ENXIO;
496 }
497 
498 /*
499  * Non-volatile memory support.
500  */
501 
502 LIST_HEAD(, nvmem_device) nvmem_devices =
503 	LIST_HEAD_INITIALIZER(nvmem_devices);
504 
505 struct nvmem_cell {
506 	uint32_t	nc_phandle;
507 	struct nvmem_device *nc_nd;
508 	bus_addr_t	nc_addr;
509 	bus_size_t	nc_size;
510 
511 	LIST_ENTRY(nvmem_cell) nc_list;
512 };
513 
514 LIST_HEAD(, nvmem_cell) nvmem_cells =
515 	LIST_HEAD_INITIALIZER(nvmem_cells);
516 
517 void
518 nvmem_register_child(int node, struct nvmem_device *nd)
519 {
520 	struct nvmem_cell *nc;
521 	uint32_t phandle;
522 	uint32_t reg[2];
523 
524 	phandle = OF_getpropint(node, "phandle", 0);
525 	if (phandle == 0)
526 		return;
527 
528 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
529 		return;
530 
531 	nc = malloc(sizeof(struct nvmem_cell), M_DEVBUF, M_WAITOK);
532 	nc->nc_phandle = phandle;
533 	nc->nc_nd = nd;
534 	nc->nc_addr = reg[0];
535 	nc->nc_size = reg[1];
536 	LIST_INSERT_HEAD(&nvmem_cells, nc, nc_list);
537 }
538 
539 void
540 nvmem_register(struct nvmem_device *nd)
541 {
542 	int node;
543 
544 	nd->nd_phandle = OF_getpropint(nd->nd_node, "phandle", 0);
545 	if (nd->nd_phandle)
546 		LIST_INSERT_HEAD(&nvmem_devices, nd, nd_list);
547 
548 	for (node = OF_child(nd->nd_node); node; node = OF_peer(node))
549 		nvmem_register_child(node, nd);
550 }
551 
552 int
553 nvmem_read(uint32_t phandle, bus_addr_t addr, void *data, bus_size_t size)
554 {
555 	struct nvmem_device *nd;
556 
557 	if (phandle == 0)
558 		return ENXIO;
559 
560 	LIST_FOREACH(nd, &nvmem_devices, nd_list) {
561 		if (nd->nd_phandle == phandle)
562 			return nd->nd_read(nd->nd_cookie, addr, data, size);
563 	}
564 
565 	return ENXIO;
566 }
567 
568 int
569 nvmem_read_cell(int node, const char *name, void *data, bus_size_t size)
570 {
571 	struct nvmem_device *nd;
572 	struct nvmem_cell *nc;
573 	uint32_t phandle, *phandles;
574 	int id, len;
575 
576 	id = OF_getindex(node, name, "nvmem-cell-names");
577 	if (id < 0)
578 		return ENXIO;
579 
580 	len = OF_getproplen(node, "nvmem-cells");
581 	if (len <= 0)
582 		return ENXIO;
583 
584 	phandles = malloc(len, M_TEMP, M_WAITOK);
585 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
586 	phandle = phandles[id];
587 	free(phandles, M_TEMP, len);
588 
589 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
590 		if (nc->nc_phandle == phandle)
591 			break;
592 	}
593 	if (nc == NULL)
594 		return ENXIO;
595 
596 	if (size > nc->nc_size)
597 		return EINVAL;
598 
599 	nd = nc->nc_nd;
600 	return nd->nd_read(nd->nd_cookie, nc->nc_addr, data, size);
601 }
602 
603 /* Port/endpoint interface support */
604 
605 LIST_HEAD(, endpoint) endpoints =
606 	LIST_HEAD_INITIALIZER(endpoints);
607 
608 void
609 endpoint_register(int node, struct device_port *dp, enum endpoint_type type)
610 {
611 	struct endpoint *ep;
612 
613 	ep = malloc(sizeof(*ep), M_DEVBUF, M_WAITOK);
614 	ep->ep_node = node;
615 	ep->ep_phandle = OF_getpropint(node, "phandle", 0);
616 	ep->ep_reg = OF_getpropint(node, "reg", -1);
617 	ep->ep_port = dp;
618 	ep->ep_type = type;
619 
620 	LIST_INSERT_HEAD(&endpoints, ep, ep_list);
621 	LIST_INSERT_HEAD(&dp->dp_endpoints, ep, ep_plist);
622 }
623 
624 void
625 device_port_register(int node, struct device_ports *ports,
626     enum endpoint_type type)
627 {
628 	struct device_port *dp;
629 
630 	dp = malloc(sizeof(*dp), M_DEVBUF, M_WAITOK);
631 	dp->dp_node = node;
632 	dp->dp_phandle = OF_getpropint(node, "phandle", 0);
633 	dp->dp_reg = OF_getpropint(node, "reg", -1);
634 	dp->dp_ports = ports;
635 	LIST_INIT(&dp->dp_endpoints);
636 	for (node = OF_child(node); node; node = OF_peer(node))
637 		endpoint_register(node, dp, type);
638 
639 	LIST_INSERT_HEAD(&ports->dp_ports, dp, dp_list);
640 }
641 
642 void
643 device_ports_register(struct device_ports *ports,
644     enum endpoint_type type)
645 {
646 	int node;
647 
648 	LIST_INIT(&ports->dp_ports);
649 
650 	node = OF_getnodebyname(ports->dp_node, "ports");
651 	if (node == 0) {
652 		node = OF_getnodebyname(ports->dp_node, "port");
653 		if (node == 0)
654 			return;
655 
656 		device_port_register(node, ports, type);
657 		return;
658 	}
659 
660 	for (node = OF_child(node); node; node = OF_peer(node))
661 		device_port_register(node, ports, type);
662 }
663 
664 struct endpoint *
665 endpoint_byphandle(uint32_t phandle)
666 {
667 	struct endpoint *ep;
668 
669 	if (phandle == 0)
670 		return NULL;
671 
672 	LIST_FOREACH(ep, &endpoints, ep_list) {
673 		if (ep->ep_phandle == phandle)
674 			return ep;
675 	}
676 
677 	return NULL;
678 }
679 
680 struct endpoint *
681 endpoint_byreg(struct device_ports *ports, uint32_t dp_reg, uint32_t ep_reg)
682 {
683 	struct device_port *dp;
684 	struct endpoint *ep;
685 
686 	LIST_FOREACH(dp, &ports->dp_ports, dp_list) {
687 		if (dp->dp_reg != dp_reg)
688 			continue;
689 		LIST_FOREACH(ep, &dp->dp_endpoints, ep_list) {
690 			if (ep->ep_reg != ep_reg)
691 				continue;
692 			return ep;
693 		}
694 	}
695 
696 	return NULL;
697 }
698 
699 struct endpoint *
700 endpoint_remote(struct endpoint *ep)
701 {
702 	struct endpoint *rep;
703 	int phandle;
704 
705 	phandle = OF_getpropint(ep->ep_node, "remote-endpoint", 0);
706 	if (phandle == 0)
707 		return NULL;
708 
709 	LIST_FOREACH(rep, &endpoints, ep_list) {
710 		if (rep->ep_phandle == phandle)
711 			return rep;
712 	}
713 
714 	return NULL;
715 }
716 
717 int
718 endpoint_activate(struct endpoint *ep, void *arg)
719 {
720 	struct device_ports *ports = ep->ep_port->dp_ports;
721 	return ports->dp_ep_activate(ports->dp_cookie, ep, arg);
722 }
723 
724 void *
725 endpoint_get_cookie(struct endpoint *ep)
726 {
727 	struct device_ports *ports = ep->ep_port->dp_ports;
728 	return ports->dp_ep_get_cookie(ports->dp_cookie, ep);
729 }
730 
731 int
732 device_port_activate(uint32_t phandle, void *arg)
733 {
734 	struct device_port *dp = NULL;
735 	struct endpoint *ep, *rep;
736 	int count;
737 	int error;
738 
739 	if (phandle == 0)
740 		return ENXIO;
741 
742 	LIST_FOREACH(ep, &endpoints, ep_list) {
743 		if (ep->ep_port->dp_phandle == phandle) {
744 			dp = ep->ep_port;
745 			break;
746 		}
747 	}
748 	if (dp == NULL)
749 		return ENXIO;
750 
751 	count = 0;
752 	LIST_FOREACH(ep, &dp->dp_endpoints, ep_plist) {
753 		rep = endpoint_remote(ep);
754 		if (rep == NULL)
755 			continue;
756 
757 		error = endpoint_activate(ep, arg);
758 		if (error)
759 			continue;
760 		error = endpoint_activate(rep, arg);
761 		if (error)
762 			continue;
763 		count++;
764 	}
765 
766 	return count ? 0 : ENXIO;
767 }
768 
769 /* Digital audio interface support */
770 
771 LIST_HEAD(, dai_device) dai_devices =
772 	LIST_HEAD_INITIALIZER(dai_devices);
773 
774 void
775 dai_register(struct dai_device *dd)
776 {
777 	dd->dd_phandle = OF_getpropint(dd->dd_node, "phandle", 0);
778 	if (dd->dd_phandle == 0)
779 		return;
780 
781 	LIST_INSERT_HEAD(&dai_devices, dd, dd_list);
782 }
783 
784 struct dai_device *
785 dai_byphandle(uint32_t phandle)
786 {
787 	struct dai_device *dd;
788 
789 	if (phandle == 0)
790 		return NULL;
791 
792 	LIST_FOREACH(dd, &dai_devices, dd_list) {
793 		if (dd->dd_phandle == phandle)
794 			return dd;
795 	}
796 
797 	return NULL;
798 }
799 
800 /* MII support */
801 
802 LIST_HEAD(, mii_bus) mii_busses =
803 	LIST_HEAD_INITIALIZER(mii_busses);
804 
805 void
806 mii_register(struct mii_bus *md)
807 {
808 	LIST_INSERT_HEAD(&mii_busses, md, md_list);
809 }
810 
811 struct mii_bus *
812 mii_bynode(int node)
813 {
814 	struct mii_bus *md;
815 
816 	LIST_FOREACH(md, &mii_busses, md_list) {
817 		if (md->md_node == node)
818 			return md;
819 	}
820 
821 	return NULL;
822 }
823 
824 struct mii_bus *
825 mii_byphandle(uint32_t phandle)
826 {
827 	int node;
828 
829 	if (phandle == 0)
830 		return NULL;
831 
832 	node = OF_getnodebyphandle(phandle);
833 	if (node == 0)
834 		return NULL;
835 
836 	node = OF_parent(node);
837 	if (node == 0)
838 		return NULL;
839 
840 	return mii_bynode(node);
841 }
842 
843 /* IOMMU support */
844 
845 LIST_HEAD(, iommu_device) iommu_devices =
846 	LIST_HEAD_INITIALIZER(iommu_devices);
847 
848 void
849 iommu_device_register(struct iommu_device *id)
850 {
851 	id->id_phandle = OF_getpropint(id->id_node, "phandle", 0);
852 	if (id->id_phandle == 0)
853 		return;
854 
855 	LIST_INSERT_HEAD(&iommu_devices, id, id_list);
856 }
857 
858 bus_dma_tag_t
859 iommu_device_do_map(uint32_t phandle, uint32_t *cells, bus_dma_tag_t dmat)
860 {
861 	struct iommu_device *id;
862 
863 	if (phandle == 0)
864 		return dmat;
865 
866 	LIST_FOREACH(id, &iommu_devices, id_list) {
867 		if (id->id_phandle == phandle)
868 			return id->id_map(id->id_cookie, cells, dmat);
869 	}
870 
871 	return dmat;
872 }
873 
874 bus_dma_tag_t
875 iommu_device_map(int node, bus_dma_tag_t dmat)
876 {
877 	uint32_t sid = 0;
878 	uint32_t phandle = 0;
879 	uint32_t *cell;
880 	uint32_t *map;
881 	int len, icells, ncells;
882 
883 	len = OF_getproplen(node, "iommus");
884 	if (len <= 0)
885 		return dmat;
886 
887 	map = malloc(len, M_TEMP, M_WAITOK);
888 	OF_getpropintarray(node, "iommus", map, len);
889 
890 	cell = map;
891 	ncells = len / sizeof(uint32_t);
892 	while (ncells > 1) {
893 		node = OF_getnodebyphandle(cell[0]);
894 		if (node == 0)
895 			goto out;
896 
897 		icells = OF_getpropint(node, "#iommu-cells", 1);
898 		if (ncells < icells + 1)
899 			goto out;
900 
901 		KASSERT(icells == 1);
902 
903 		phandle = cell[0];
904 		sid = cell[1];
905 		break;
906 
907 		cell += (1 + icells);
908 		ncells -= (1 + icells);
909 	}
910 
911 out:
912 	free(map, M_TEMP, len);
913 
914 	return iommu_device_do_map(phandle, &sid, dmat);
915 }
916 
917 bus_dma_tag_t
918 iommu_device_map_pci(int node, uint32_t rid, bus_dma_tag_t dmat)
919 {
920 	uint32_t sid_base, sid = 0;
921 	uint32_t phandle = 0;
922 	uint32_t *cell;
923 	uint32_t *map;
924 	uint32_t mask, rid_base;
925 	int len, length, icells, ncells;
926 
927 	len = OF_getproplen(node, "iommu-map");
928 	if (len <= 0)
929 		return dmat;
930 
931 	map = malloc(len, M_TEMP, M_WAITOK);
932 	OF_getpropintarray(node, "iommu-map", map, len);
933 
934 	mask = OF_getpropint(node, "msi-map-mask", 0xffff);
935 	rid = rid & mask;
936 
937 	cell = map;
938 	ncells = len / sizeof(uint32_t);
939 	while (ncells > 1) {
940 		node = OF_getnodebyphandle(cell[1]);
941 		if (node == 0)
942 			goto out;
943 
944 		icells = OF_getpropint(node, "#iommu-cells", 1);
945 		if (ncells < icells + 3)
946 			goto out;
947 
948 		KASSERT(icells == 1);
949 
950 		rid_base = cell[0];
951 		sid_base = cell[2];
952 		length = cell[3];
953 		if (rid >= rid_base && rid < rid_base + length) {
954 			sid = sid_base + (rid - rid_base);
955 			phandle = cell[1];
956 			break;
957 		}
958 
959 		cell += 4;
960 		ncells -= 4;
961 	}
962 
963 out:
964 	free(map, M_TEMP, len);
965 
966 	return iommu_device_do_map(phandle, &sid, dmat);
967 }
968