xref: /openbsd-src/sys/dev/ofw/ofw_misc.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: ofw_misc.c,v 1.36 2022/03/25 15:49:29 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2017-2021 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/device.h>
20 #include <sys/malloc.h>
21 #include <sys/systm.h>
22 
23 #include <net/if.h>
24 #include <net/if_media.h>
25 
26 #include <machine/bus.h>
27 
28 #include <dev/mii/mii.h>
29 #include <dev/mii/miivar.h>
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/ofw_gpio.h>
32 #include <dev/ofw/ofw_misc.h>
33 #include <dev/ofw/ofw_regulator.h>
34 
35 /*
36  * Register maps.
37  */
38 
39 struct regmap {
40 	int			rm_node;
41 	uint32_t		rm_phandle;
42 	bus_space_tag_t		rm_tag;
43 	bus_space_handle_t	rm_handle;
44 	bus_size_t		rm_size;
45 
46 	LIST_ENTRY(regmap)	rm_list;
47 };
48 
49 LIST_HEAD(, regmap) regmaps = LIST_HEAD_INITIALIZER(regmap);
50 
51 void
52 regmap_register(int node, bus_space_tag_t tag, bus_space_handle_t handle,
53     bus_size_t size)
54 {
55 	struct regmap *rm;
56 
57 	rm = malloc(sizeof(struct regmap), M_DEVBUF, M_WAITOK);
58 	rm->rm_node = node;
59 	rm->rm_phandle = OF_getpropint(node, "phandle", 0);
60 	rm->rm_tag = tag;
61 	rm->rm_handle = handle;
62 	rm->rm_size = size;
63 	LIST_INSERT_HEAD(&regmaps, rm, rm_list);
64 }
65 
66 struct regmap *
67 regmap_bycompatible(char *compatible)
68 {
69 	struct regmap *rm;
70 
71 	LIST_FOREACH(rm, &regmaps, rm_list) {
72 		if (OF_is_compatible(rm->rm_node, compatible))
73 			return rm;
74 	}
75 
76 	return NULL;
77 }
78 
79 struct regmap *
80 regmap_bynode(int node)
81 {
82 	struct regmap *rm;
83 
84 	LIST_FOREACH(rm, &regmaps, rm_list) {
85 		if (rm->rm_node == node)
86 			return rm;
87 	}
88 
89 	return NULL;
90 }
91 
92 struct regmap *
93 regmap_byphandle(uint32_t phandle)
94 {
95 	struct regmap *rm;
96 
97 	if (phandle == 0)
98 		return NULL;
99 
100 	LIST_FOREACH(rm, &regmaps, rm_list) {
101 		if (rm->rm_phandle == phandle)
102 			return rm;
103 	}
104 
105 	return NULL;
106 }
107 
108 void
109 regmap_write_4(struct regmap *rm, bus_size_t offset, uint32_t value)
110 {
111 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
112 	bus_space_write_4(rm->rm_tag, rm->rm_handle, offset, value);
113 }
114 
115 uint32_t
116 regmap_read_4(struct regmap *rm, bus_size_t offset)
117 {
118 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
119 	return bus_space_read_4(rm->rm_tag, rm->rm_handle, offset);
120 }
121 
122 
123 /*
124  * PHY support.
125  */
126 
127 LIST_HEAD(, phy_device) phy_devices =
128 	LIST_HEAD_INITIALIZER(phy_devices);
129 
130 void
131 phy_register(struct phy_device *pd)
132 {
133 	pd->pd_cells = OF_getpropint(pd->pd_node, "#phy-cells", 0);
134 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
135 	if (pd->pd_phandle == 0)
136 		return;
137 
138 	LIST_INSERT_HEAD(&phy_devices, pd, pd_list);
139 }
140 
141 int
142 phy_usb_nop_enable(int node)
143 {
144 	uint32_t vcc_supply;
145 	uint32_t *gpio;
146 	int len;
147 
148 	vcc_supply = OF_getpropint(node, "vcc-supply", 0);
149 	if (vcc_supply)
150 		regulator_enable(vcc_supply);
151 
152 	len = OF_getproplen(node, "reset-gpios");
153 	if (len <= 0)
154 		return 0;
155 
156 	/* There should only be a single GPIO pin. */
157 	gpio = malloc(len, M_TEMP, M_WAITOK);
158 	OF_getpropintarray(node, "reset-gpios", gpio, len);
159 
160 	gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
161 	gpio_controller_set_pin(gpio, 1);
162 	delay(10000);
163 	gpio_controller_set_pin(gpio, 0);
164 
165 	free(gpio, M_TEMP, len);
166 
167 	return 0;
168 }
169 
170 int
171 phy_enable_cells(uint32_t *cells)
172 {
173 	struct phy_device *pd;
174 	uint32_t phandle = cells[0];
175 	int node;
176 
177 	LIST_FOREACH(pd, &phy_devices, pd_list) {
178 		if (pd->pd_phandle == phandle)
179 			break;
180 	}
181 
182 	if (pd && pd->pd_enable)
183 		return pd->pd_enable(pd->pd_cookie, &cells[1]);
184 
185 	node = OF_getnodebyphandle(phandle);
186 	if (node == 0)
187 		return ENXIO;
188 
189 	if (OF_is_compatible(node, "usb-nop-xceiv"))
190 		return phy_usb_nop_enable(node);
191 
192 	return ENXIO;
193 }
194 
195 uint32_t *
196 phy_next_phy(uint32_t *cells)
197 {
198 	uint32_t phandle = cells[0];
199 	int node, ncells;
200 
201 	node = OF_getnodebyphandle(phandle);
202 	if (node == 0)
203 		return NULL;
204 
205 	ncells = OF_getpropint(node, "#phy-cells", 0);
206 	return cells + ncells + 1;
207 }
208 
209 int
210 phy_enable_idx(int node, int idx)
211 {
212 	uint32_t *phys;
213 	uint32_t *phy;
214 	int rv = -1;
215 	int len;
216 
217 	len = OF_getproplen(node, "phys");
218 	if (len <= 0)
219 		return -1;
220 
221 	phys = malloc(len, M_TEMP, M_WAITOK);
222 	OF_getpropintarray(node, "phys", phys, len);
223 
224 	phy = phys;
225 	while (phy && phy < phys + (len / sizeof(uint32_t))) {
226 		if (idx <= 0)
227 			rv = phy_enable_cells(phy);
228 		if (idx == 0)
229 			break;
230 		phy = phy_next_phy(phy);
231 		idx--;
232 	}
233 
234 	free(phys, M_TEMP, len);
235 	return rv;
236 }
237 
238 int
239 phy_enable(int node, const char *name)
240 {
241 	int idx;
242 
243 	idx = OF_getindex(node, name, "phy-names");
244 	if (idx == -1)
245 		return -1;
246 
247 	return phy_enable_idx(node, idx);
248 }
249 
250 /*
251  * I2C support.
252  */
253 
254 LIST_HEAD(, i2c_bus) i2c_busses =
255 	LIST_HEAD_INITIALIZER(i2c_bus);
256 
257 void
258 i2c_register(struct i2c_bus *ib)
259 {
260 	ib->ib_phandle = OF_getpropint(ib->ib_node, "phandle", 0);
261 	if (ib->ib_phandle == 0)
262 		return;
263 
264 	LIST_INSERT_HEAD(&i2c_busses, ib, ib_list);
265 }
266 
267 struct i2c_controller *
268 i2c_bynode(int node)
269 {
270 	struct i2c_bus *ib;
271 
272 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
273 		if (ib->ib_node == node)
274 			return ib->ib_ic;
275 	}
276 
277 	return NULL;
278 }
279 
280 struct i2c_controller *
281 i2c_byphandle(uint32_t phandle)
282 {
283 	struct i2c_bus *ib;
284 
285 	if (phandle == 0)
286 		return NULL;
287 
288 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
289 		if (ib->ib_phandle == phandle)
290 			return ib->ib_ic;
291 	}
292 
293 	return NULL;
294 }
295 
296 /*
297  * SFP support.
298  */
299 
300 LIST_HEAD(, sfp_device) sfp_devices =
301 	LIST_HEAD_INITIALIZER(sfp_devices);
302 
303 void
304 sfp_register(struct sfp_device *sd)
305 {
306 	sd->sd_phandle = OF_getpropint(sd->sd_node, "phandle", 0);
307 	if (sd->sd_phandle == 0)
308 		return;
309 
310 	LIST_INSERT_HEAD(&sfp_devices, sd, sd_list);
311 }
312 
313 int
314 sfp_do_enable(uint32_t phandle, int enable)
315 {
316 	struct sfp_device *sd;
317 
318 	if (phandle == 0)
319 		return ENXIO;
320 
321 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
322 		if (sd->sd_phandle == phandle)
323 			return sd->sd_enable(sd->sd_cookie, enable);
324 	}
325 
326 	return ENXIO;
327 }
328 
329 int
330 sfp_enable(uint32_t phandle)
331 {
332 	return sfp_do_enable(phandle, 1);
333 }
334 
335 int
336 sfp_disable(uint32_t phandle)
337 {
338 	return sfp_do_enable(phandle, 0);
339 }
340 
341 int
342 sfp_get_sffpage(uint32_t phandle, struct if_sffpage *sff)
343 {
344 	struct sfp_device *sd;
345 
346 	if (phandle == 0)
347 		return ENXIO;
348 
349 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
350 		if (sd->sd_phandle == phandle)
351 			return sd->sd_get_sffpage(sd->sd_cookie, sff);
352 	}
353 
354 	return ENXIO;
355 }
356 
357 #define SFF8472_TCC_XCC			3 /* 10G Ethernet Compliance Codes */
358 #define SFF8472_TCC_XCC_10G_SR		(1 << 4)
359 #define SFF8472_TCC_XCC_10G_LR		(1 << 5)
360 #define SFF8472_TCC_XCC_10G_LRM		(1 << 6)
361 #define SFF8472_TCC_XCC_10G_ER		(1 << 7)
362 #define SFF8472_TCC_ECC			6 /* Ethernet Compliance Codes */
363 #define SFF8472_TCC_ECC_1000_SX		(1 << 0)
364 #define SFF8472_TCC_ECC_1000_LX		(1 << 1)
365 #define SFF8472_TCC_ECC_1000_CX		(1 << 2)
366 #define SFF8472_TCC_ECC_1000_T		(1 << 3)
367 #define SFF8472_TCC_SCT			8 /* SFP+ Cable Technology */
368 #define SFF8472_TCC_SCT_PASSIVE		(1 << 2)
369 #define SFF8472_TCC_SCT_ACTIVE		(1 << 3)
370 
371 int
372 sfp_add_media(uint32_t phandle, struct mii_data *mii)
373 {
374 	struct if_sffpage sff;
375 	int error;
376 
377 	memset(&sff, 0, sizeof(sff));
378 	sff.sff_addr = IFSFF_ADDR_EEPROM;
379 	sff.sff_page = 0;
380 
381 	error = sfp_get_sffpage(phandle, &sff);
382 	if (error)
383 		return error;
384 
385 	/* SFP */
386 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_SX) {
387 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
388 		mii->mii_media_active = IFM_ETHER | IFM_1000_SX | IFM_FDX;
389 	}
390 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_LX) {
391 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_LX, 0, NULL);
392 		mii->mii_media_active = IFM_ETHER | IFM_1000_LX | IFM_FDX;
393 	}
394 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_CX) {
395 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_CX, 0, NULL);
396 		mii->mii_media_active = IFM_ETHER | IFM_1000_CX | IFM_FDX;
397 	}
398 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_T) {
399 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T, 0, NULL);
400 		mii->mii_media_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
401 	}
402 
403 	/* SFP+ */
404 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_SR) {
405 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
406 		mii->mii_media_active = IFM_ETHER | IFM_10G_SR | IFM_FDX;
407 	}
408 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LR) {
409 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
410 		mii->mii_media_active = IFM_ETHER | IFM_10G_LR | IFM_FDX;
411 	}
412 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LRM) {
413 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LRM, 0, NULL);
414 		mii->mii_media_active = IFM_ETHER | IFM_10G_LRM | IFM_FDX;
415 	}
416 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_ER) {
417 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_ER, 0, NULL);
418 		mii->mii_media_active = IFM_ETHER | IFM_10G_ER | IFM_FDX;
419 	}
420 
421 	/* SFP+ DAC */
422 	if (sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_PASSIVE ||
423 	    sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_ACTIVE) {
424 		ifmedia_add(&mii->mii_media,
425 		    IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
426 		mii->mii_media_active = IFM_ETHER | IFM_10G_SFP_CU | IFM_FDX;
427 	}
428 
429 	return 0;
430 }
431 
432 /*
433  * PWM support.
434  */
435 
436 LIST_HEAD(, pwm_device) pwm_devices =
437 	LIST_HEAD_INITIALIZER(pwm_devices);
438 
439 void
440 pwm_register(struct pwm_device *pd)
441 {
442 	pd->pd_cells = OF_getpropint(pd->pd_node, "#pwm-cells", 0);
443 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
444 	if (pd->pd_phandle == 0)
445 		return;
446 
447 	LIST_INSERT_HEAD(&pwm_devices, pd, pd_list);
448 
449 }
450 
451 int
452 pwm_init_state(uint32_t *cells, struct pwm_state *ps)
453 {
454 	struct pwm_device *pd;
455 
456 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
457 		if (pd->pd_phandle == cells[0]) {
458 			memset(ps, 0, sizeof(struct pwm_state));
459 			pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
460 			ps->ps_pulse_width = 0;
461 			if (pd->pd_cells >= 2)
462 				ps->ps_period = cells[2];
463 			if (pd->pd_cells >= 3)
464 				ps->ps_flags = cells[3];
465 			return 0;
466 		}
467 	}
468 
469 	return ENXIO;
470 }
471 
472 int
473 pwm_get_state(uint32_t *cells, struct pwm_state *ps)
474 {
475 	struct pwm_device *pd;
476 
477 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
478 		if (pd->pd_phandle == cells[0])
479 			return pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
480 	}
481 
482 	return ENXIO;
483 }
484 
485 int
486 pwm_set_state(uint32_t *cells, struct pwm_state *ps)
487 {
488 	struct pwm_device *pd;
489 
490 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
491 		if (pd->pd_phandle == cells[0])
492 			return pd->pd_set_state(pd->pd_cookie, &cells[1], ps);
493 	}
494 
495 	return ENXIO;
496 }
497 
498 /*
499  * Non-volatile memory support.
500  */
501 
502 LIST_HEAD(, nvmem_device) nvmem_devices =
503 	LIST_HEAD_INITIALIZER(nvmem_devices);
504 
505 struct nvmem_cell {
506 	uint32_t	nc_phandle;
507 	struct nvmem_device *nc_nd;
508 	bus_addr_t	nc_addr;
509 	bus_size_t	nc_size;
510 	uint32_t	nc_offset;
511 	uint32_t	nc_bitlen;
512 
513 	LIST_ENTRY(nvmem_cell) nc_list;
514 };
515 
516 LIST_HEAD(, nvmem_cell) nvmem_cells =
517 	LIST_HEAD_INITIALIZER(nvmem_cells);
518 
519 void
520 nvmem_register_child(int node, struct nvmem_device *nd)
521 {
522 	struct nvmem_cell *nc;
523 	uint32_t phandle;
524 	uint32_t reg[2], bits[2] = {};
525 
526 	phandle = OF_getpropint(node, "phandle", 0);
527 	if (phandle == 0)
528 		return;
529 
530 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
531 		return;
532 
533 	OF_getpropintarray(node, "bits", bits, sizeof(bits));
534 
535 	nc = malloc(sizeof(struct nvmem_cell), M_DEVBUF, M_WAITOK);
536 	nc->nc_phandle = phandle;
537 	nc->nc_nd = nd;
538 	nc->nc_addr = reg[0];
539 	nc->nc_size = reg[1];
540 	nc->nc_offset = bits[0];
541 	nc->nc_bitlen = bits[1];
542 	LIST_INSERT_HEAD(&nvmem_cells, nc, nc_list);
543 }
544 
545 void
546 nvmem_register(struct nvmem_device *nd)
547 {
548 	int node;
549 
550 	nd->nd_phandle = OF_getpropint(nd->nd_node, "phandle", 0);
551 	if (nd->nd_phandle)
552 		LIST_INSERT_HEAD(&nvmem_devices, nd, nd_list);
553 
554 	for (node = OF_child(nd->nd_node); node; node = OF_peer(node))
555 		nvmem_register_child(node, nd);
556 }
557 
558 int
559 nvmem_read(uint32_t phandle, bus_addr_t addr, void *data, bus_size_t size)
560 {
561 	struct nvmem_device *nd;
562 
563 	if (phandle == 0)
564 		return ENXIO;
565 
566 	LIST_FOREACH(nd, &nvmem_devices, nd_list) {
567 		if (nd->nd_phandle == phandle)
568 			return nd->nd_read(nd->nd_cookie, addr, data, size);
569 	}
570 
571 	return ENXIO;
572 }
573 
574 int
575 nvmem_read_cell(int node, const char *name, void *data, bus_size_t size)
576 {
577 	struct nvmem_device *nd;
578 	struct nvmem_cell *nc;
579 	uint32_t phandle, *phandles;
580 	uint32_t offset, bitlen;
581 	int id, len, first;
582 
583 	id = OF_getindex(node, name, "nvmem-cell-names");
584 	if (id < 0)
585 		return ENXIO;
586 
587 	len = OF_getproplen(node, "nvmem-cells");
588 	if (len <= 0)
589 		return ENXIO;
590 
591 	phandles = malloc(len, M_TEMP, M_WAITOK);
592 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
593 	phandle = phandles[id];
594 	free(phandles, M_TEMP, len);
595 
596 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
597 		if (nc->nc_phandle == phandle)
598 			break;
599 	}
600 	if (nc == NULL)
601 		return ENXIO;
602 
603 	nd = nc->nc_nd;
604 	if (nd->nd_read == NULL)
605 		return EACCES;
606 
607 	first = 1;
608 	offset = nc->nc_offset;
609 	bitlen = nc->nc_bitlen;
610 	while (bitlen > 0 && size > 0) {
611 		uint8_t *p = data;
612 		uint8_t mask, tmp;
613 		int error;
614 
615 		error = nd->nd_read(nd->nd_cookie, nc->nc_addr, &tmp, 1);
616 		if (error)
617 			return error;
618 
619 		if (bitlen >= 8)
620 			mask = 0xff;
621 		else
622 			mask = (1 << bitlen) - 1;
623 
624 		if (!first) {
625 			*p++ |= (tmp << (8 - offset)) & (mask << (8 - offset));
626 			bitlen -= MIN(offset, bitlen);
627 			size--;
628 		}
629 
630 		if (bitlen > 0 && size > 0) {
631 			*p = (tmp >> offset) & (mask >> offset);
632 			bitlen -= MIN(8 - offset, bitlen);
633 		}
634 
635 		first = 0;
636 	}
637 	if (nc->nc_bitlen > 0)
638 		return 0;
639 
640 	if (size > nc->nc_size)
641 		return EINVAL;
642 
643 	return nd->nd_read(nd->nd_cookie, nc->nc_addr, data, size);
644 }
645 
646 int
647 nvmem_write_cell(int node, const char *name, const void *data, bus_size_t size)
648 {
649 	struct nvmem_device *nd;
650 	struct nvmem_cell *nc;
651 	uint32_t phandle, *phandles;
652 	uint32_t offset, bitlen;
653 	int id, len, first;
654 
655 	id = OF_getindex(node, name, "nvmem-cell-names");
656 	if (id < 0)
657 		return ENXIO;
658 
659 	len = OF_getproplen(node, "nvmem-cells");
660 	if (len <= 0)
661 		return ENXIO;
662 
663 	phandles = malloc(len, M_TEMP, M_WAITOK);
664 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
665 	phandle = phandles[id];
666 	free(phandles, M_TEMP, len);
667 
668 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
669 		if (nc->nc_phandle == phandle)
670 			break;
671 	}
672 	if (nc == NULL)
673 		return ENXIO;
674 
675 	nd = nc->nc_nd;
676 	if (nd->nd_write == NULL)
677 		return EACCES;
678 
679 	first = 1;
680 	offset = nc->nc_offset;
681 	bitlen = nc->nc_bitlen;
682 	while (bitlen > 0 && size > 0) {
683 		const uint8_t *p = data;
684 		uint8_t mask, tmp;
685 		int error;
686 
687 		error = nd->nd_read(nd->nd_cookie, nc->nc_addr, &tmp, 1);
688 		if (error)
689 			return error;
690 
691 		if (bitlen >= 8)
692 			mask = 0xff;
693 		else
694 			mask = (1 << bitlen) - 1;
695 
696 		tmp &= ~(mask << offset);
697 		tmp |= (*p++ << offset) & (mask << offset);
698 		bitlen -= MIN(8 - offset, bitlen);
699 		size--;
700 
701 		if (!first && bitlen > 0 && size > 0) {
702 			tmp &= ~(mask >> (8 - offset));
703 			tmp |= (*p >> (8 - offset)) & (mask >> (8 - offset));
704 			bitlen -= MIN(offset, bitlen);
705 		}
706 
707 		error = nd->nd_write(nd->nd_cookie, nc->nc_addr, &tmp, 1);
708 		if (error)
709 			return error;
710 
711 		first = 0;
712 	}
713 	if (nc->nc_bitlen > 0)
714 		return 0;
715 
716 	if (size > nc->nc_size)
717 		return EINVAL;
718 
719 	return nd->nd_write(nd->nd_cookie, nc->nc_addr, data, size);
720 }
721 
722 /* Port/endpoint interface support */
723 
724 LIST_HEAD(, endpoint) endpoints =
725 	LIST_HEAD_INITIALIZER(endpoints);
726 
727 void
728 endpoint_register(int node, struct device_port *dp, enum endpoint_type type)
729 {
730 	struct endpoint *ep;
731 
732 	ep = malloc(sizeof(*ep), M_DEVBUF, M_WAITOK);
733 	ep->ep_node = node;
734 	ep->ep_phandle = OF_getpropint(node, "phandle", 0);
735 	ep->ep_reg = OF_getpropint(node, "reg", -1);
736 	ep->ep_port = dp;
737 	ep->ep_type = type;
738 
739 	LIST_INSERT_HEAD(&endpoints, ep, ep_list);
740 	LIST_INSERT_HEAD(&dp->dp_endpoints, ep, ep_plist);
741 }
742 
743 void
744 device_port_register(int node, struct device_ports *ports,
745     enum endpoint_type type)
746 {
747 	struct device_port *dp;
748 
749 	dp = malloc(sizeof(*dp), M_DEVBUF, M_WAITOK);
750 	dp->dp_node = node;
751 	dp->dp_phandle = OF_getpropint(node, "phandle", 0);
752 	dp->dp_reg = OF_getpropint(node, "reg", -1);
753 	dp->dp_ports = ports;
754 	LIST_INIT(&dp->dp_endpoints);
755 	for (node = OF_child(node); node; node = OF_peer(node))
756 		endpoint_register(node, dp, type);
757 
758 	LIST_INSERT_HEAD(&ports->dp_ports, dp, dp_list);
759 }
760 
761 void
762 device_ports_register(struct device_ports *ports,
763     enum endpoint_type type)
764 {
765 	int node;
766 
767 	LIST_INIT(&ports->dp_ports);
768 
769 	node = OF_getnodebyname(ports->dp_node, "ports");
770 	if (node == 0) {
771 		node = OF_getnodebyname(ports->dp_node, "port");
772 		if (node == 0)
773 			return;
774 
775 		device_port_register(node, ports, type);
776 		return;
777 	}
778 
779 	for (node = OF_child(node); node; node = OF_peer(node))
780 		device_port_register(node, ports, type);
781 }
782 
783 struct device_ports *
784 device_ports_byphandle(uint32_t phandle)
785 {
786 	struct endpoint *ep;
787 
788 	if (phandle == 0)
789 		return NULL;
790 
791 	LIST_FOREACH(ep, &endpoints, ep_list) {
792 		if (ep->ep_port->dp_phandle == phandle)
793 			return ep->ep_port->dp_ports;
794 	}
795 
796 	return NULL;
797 }
798 
799 struct endpoint *
800 endpoint_byphandle(uint32_t phandle)
801 {
802 	struct endpoint *ep;
803 
804 	if (phandle == 0)
805 		return NULL;
806 
807 	LIST_FOREACH(ep, &endpoints, ep_list) {
808 		if (ep->ep_phandle == phandle)
809 			return ep;
810 	}
811 
812 	return NULL;
813 }
814 
815 struct endpoint *
816 endpoint_byreg(struct device_ports *ports, uint32_t dp_reg, uint32_t ep_reg)
817 {
818 	struct device_port *dp;
819 	struct endpoint *ep;
820 
821 	LIST_FOREACH(dp, &ports->dp_ports, dp_list) {
822 		if (dp->dp_reg != dp_reg)
823 			continue;
824 		LIST_FOREACH(ep, &dp->dp_endpoints, ep_list) {
825 			if (ep->ep_reg != ep_reg)
826 				continue;
827 			return ep;
828 		}
829 	}
830 
831 	return NULL;
832 }
833 
834 struct endpoint *
835 endpoint_remote(struct endpoint *ep)
836 {
837 	struct endpoint *rep;
838 	int phandle;
839 
840 	phandle = OF_getpropint(ep->ep_node, "remote-endpoint", 0);
841 	if (phandle == 0)
842 		return NULL;
843 
844 	LIST_FOREACH(rep, &endpoints, ep_list) {
845 		if (rep->ep_phandle == phandle)
846 			return rep;
847 	}
848 
849 	return NULL;
850 }
851 
852 int
853 endpoint_activate(struct endpoint *ep, void *arg)
854 {
855 	struct device_ports *ports = ep->ep_port->dp_ports;
856 	return ports->dp_ep_activate(ports->dp_cookie, ep, arg);
857 }
858 
859 void *
860 endpoint_get_cookie(struct endpoint *ep)
861 {
862 	struct device_ports *ports = ep->ep_port->dp_ports;
863 	return ports->dp_ep_get_cookie(ports->dp_cookie, ep);
864 }
865 
866 int
867 device_port_activate(uint32_t phandle, void *arg)
868 {
869 	struct device_port *dp = NULL;
870 	struct endpoint *ep, *rep;
871 	int count;
872 	int error;
873 
874 	if (phandle == 0)
875 		return ENXIO;
876 
877 	LIST_FOREACH(ep, &endpoints, ep_list) {
878 		if (ep->ep_port->dp_phandle == phandle) {
879 			dp = ep->ep_port;
880 			break;
881 		}
882 	}
883 	if (dp == NULL)
884 		return ENXIO;
885 
886 	count = 0;
887 	LIST_FOREACH(ep, &dp->dp_endpoints, ep_plist) {
888 		rep = endpoint_remote(ep);
889 		if (rep == NULL)
890 			continue;
891 
892 		error = endpoint_activate(ep, arg);
893 		if (error)
894 			continue;
895 		error = endpoint_activate(rep, arg);
896 		if (error)
897 			continue;
898 		count++;
899 	}
900 
901 	return count ? 0 : ENXIO;
902 }
903 
904 /* Digital audio interface support */
905 
906 LIST_HEAD(, dai_device) dai_devices =
907 	LIST_HEAD_INITIALIZER(dai_devices);
908 
909 void *
910 dai_ep_get_cookie(void *cookie, struct endpoint *ep)
911 {
912 	return cookie;
913 }
914 
915 void
916 dai_register(struct dai_device *dd)
917 {
918 	dd->dd_phandle = OF_getpropint(dd->dd_node, "phandle", 0);
919 	if (dd->dd_phandle != 0)
920 		LIST_INSERT_HEAD(&dai_devices, dd, dd_list);
921 
922 	dd->dd_ports.dp_node = dd->dd_node;
923 	dd->dd_ports.dp_cookie = dd;
924 	dd->dd_ports.dp_ep_get_cookie = dai_ep_get_cookie;
925 	device_ports_register(&dd->dd_ports, EP_DAI_DEVICE);
926 }
927 
928 struct dai_device *
929 dai_byphandle(uint32_t phandle)
930 {
931 	struct dai_device *dd;
932 
933 	if (phandle == 0)
934 		return NULL;
935 
936 	LIST_FOREACH(dd, &dai_devices, dd_list) {
937 		if (dd->dd_phandle == phandle)
938 			return dd;
939 	}
940 
941 	return NULL;
942 }
943 
944 /* MII support */
945 
946 LIST_HEAD(, mii_bus) mii_busses =
947 	LIST_HEAD_INITIALIZER(mii_busses);
948 
949 void
950 mii_register(struct mii_bus *md)
951 {
952 	LIST_INSERT_HEAD(&mii_busses, md, md_list);
953 }
954 
955 struct mii_bus *
956 mii_bynode(int node)
957 {
958 	struct mii_bus *md;
959 
960 	LIST_FOREACH(md, &mii_busses, md_list) {
961 		if (md->md_node == node)
962 			return md;
963 	}
964 
965 	return NULL;
966 }
967 
968 struct mii_bus *
969 mii_byphandle(uint32_t phandle)
970 {
971 	int node;
972 
973 	if (phandle == 0)
974 		return NULL;
975 
976 	node = OF_getnodebyphandle(phandle);
977 	if (node == 0)
978 		return NULL;
979 
980 	node = OF_parent(node);
981 	if (node == 0)
982 		return NULL;
983 
984 	return mii_bynode(node);
985 }
986 
987 /* IOMMU support */
988 
989 LIST_HEAD(, iommu_device) iommu_devices =
990 	LIST_HEAD_INITIALIZER(iommu_devices);
991 
992 void
993 iommu_device_register(struct iommu_device *id)
994 {
995 	id->id_phandle = OF_getpropint(id->id_node, "phandle", 0);
996 	if (id->id_phandle == 0)
997 		return;
998 
999 	LIST_INSERT_HEAD(&iommu_devices, id, id_list);
1000 }
1001 
1002 bus_dma_tag_t
1003 iommu_device_do_map(uint32_t phandle, uint32_t *cells, bus_dma_tag_t dmat)
1004 {
1005 	struct iommu_device *id;
1006 
1007 	if (phandle == 0)
1008 		return dmat;
1009 
1010 	LIST_FOREACH(id, &iommu_devices, id_list) {
1011 		if (id->id_phandle == phandle)
1012 			return id->id_map(id->id_cookie, cells, dmat);
1013 	}
1014 
1015 	return dmat;
1016 }
1017 
1018 int
1019 iommu_device_lookup(int node, uint32_t *phandle, uint32_t *sid)
1020 {
1021 	uint32_t *cell;
1022 	uint32_t *map;
1023 	int len, icells, ncells;
1024 	int ret = 1;
1025 
1026 	len = OF_getproplen(node, "iommus");
1027 	if (len <= 0)
1028 		return ret;
1029 
1030 	map = malloc(len, M_TEMP, M_WAITOK);
1031 	OF_getpropintarray(node, "iommus", map, len);
1032 
1033 	cell = map;
1034 	ncells = len / sizeof(uint32_t);
1035 	while (ncells > 1) {
1036 		node = OF_getnodebyphandle(cell[0]);
1037 		if (node == 0)
1038 			goto out;
1039 
1040 		icells = OF_getpropint(node, "#iommu-cells", 1);
1041 		if (ncells < icells + 1)
1042 			goto out;
1043 
1044 		KASSERT(icells == 1);
1045 
1046 		*phandle = cell[0];
1047 		*sid = cell[1];
1048 		ret = 0;
1049 		break;
1050 
1051 		cell += (1 + icells);
1052 		ncells -= (1 + icells);
1053 	}
1054 
1055 out:
1056 	free(map, M_TEMP, len);
1057 
1058 	return ret;
1059 }
1060 
1061 int
1062 iommu_device_lookup_pci(int node, uint32_t rid, uint32_t *phandle,
1063     uint32_t *sid)
1064 {
1065 	uint32_t sid_base;
1066 	uint32_t *cell;
1067 	uint32_t *map;
1068 	uint32_t mask, rid_base;
1069 	int len, length, icells, ncells;
1070 	int ret = 1;
1071 
1072 	len = OF_getproplen(node, "iommu-map");
1073 	if (len <= 0)
1074 		return ret;
1075 
1076 	map = malloc(len, M_TEMP, M_WAITOK);
1077 	OF_getpropintarray(node, "iommu-map", map, len);
1078 
1079 	mask = OF_getpropint(node, "iommu-map-mask", 0xffff);
1080 	rid = rid & mask;
1081 
1082 	cell = map;
1083 	ncells = len / sizeof(uint32_t);
1084 	while (ncells > 1) {
1085 		node = OF_getnodebyphandle(cell[1]);
1086 		if (node == 0)
1087 			goto out;
1088 
1089 		icells = OF_getpropint(node, "#iommu-cells", 1);
1090 		if (ncells < icells + 3)
1091 			goto out;
1092 
1093 		KASSERT(icells == 1);
1094 
1095 		rid_base = cell[0];
1096 		sid_base = cell[2];
1097 		length = cell[3];
1098 		if (rid >= rid_base && rid < rid_base + length) {
1099 			*sid = sid_base + (rid - rid_base);
1100 			*phandle = cell[1];
1101 			ret = 0;
1102 			break;
1103 		}
1104 
1105 		cell += 4;
1106 		ncells -= 4;
1107 	}
1108 
1109 out:
1110 	free(map, M_TEMP, len);
1111 
1112 	return ret;
1113 }
1114 
1115 bus_dma_tag_t
1116 iommu_device_map(int node, bus_dma_tag_t dmat)
1117 {
1118 	uint32_t phandle, sid;
1119 
1120 	if (iommu_device_lookup(node, &phandle, &sid))
1121 		return dmat;
1122 
1123 	return iommu_device_do_map(phandle, &sid, dmat);
1124 }
1125 
1126 bus_dma_tag_t
1127 iommu_device_map_pci(int node, uint32_t rid, bus_dma_tag_t dmat)
1128 {
1129 	uint32_t phandle, sid;
1130 
1131 	if (iommu_device_lookup_pci(node, rid, &phandle, &sid))
1132 		return dmat;
1133 
1134 	return iommu_device_do_map(phandle, &sid, dmat);
1135 }
1136 
1137 void
1138 iommu_device_do_reserve(uint32_t phandle, uint32_t *cells, bus_addr_t addr,
1139     bus_size_t size)
1140 {
1141 	struct iommu_device *id;
1142 
1143 	if (phandle == 0)
1144 		return;
1145 
1146 	LIST_FOREACH(id, &iommu_devices, id_list) {
1147 		if (id->id_phandle == phandle) {
1148 			id->id_reserve(id->id_cookie, cells, addr, size);
1149 			break;
1150 		}
1151 	}
1152 }
1153 
1154 void
1155 iommu_reserve_region_pci(int node, uint32_t rid, bus_addr_t addr,
1156     bus_size_t size)
1157 {
1158 	uint32_t phandle, sid;
1159 
1160 	if (iommu_device_lookup_pci(node, rid, &phandle, &sid))
1161 		return;
1162 
1163 	return iommu_device_do_reserve(phandle, &sid, addr, size);
1164 }
1165 
1166 /*
1167  * Mailbox support.
1168  */
1169 
1170 struct mbox_channel {
1171 	struct mbox_device	*mc_md;
1172 	void			*mc_cookie;
1173 };
1174 
1175 LIST_HEAD(, mbox_device) mbox_devices =
1176 	LIST_HEAD_INITIALIZER(mbox_devices);
1177 
1178 void
1179 mbox_register(struct mbox_device *md)
1180 {
1181 	md->md_cells = OF_getpropint(md->md_node, "#mbox-cells", 0);
1182 	md->md_phandle = OF_getpropint(md->md_node, "phandle", 0);
1183 	if (md->md_phandle == 0)
1184 		return;
1185 
1186 	LIST_INSERT_HEAD(&mbox_devices, md, md_list);
1187 }
1188 
1189 struct mbox_channel *
1190 mbox_channel_cells(uint32_t *cells, struct mbox_client *client)
1191 {
1192 	struct mbox_device *md;
1193 	struct mbox_channel *mc;
1194 	uint32_t phandle = cells[0];
1195 	void *cookie;
1196 
1197 	LIST_FOREACH(md, &mbox_devices, md_list) {
1198 		if (md->md_phandle == phandle)
1199 			break;
1200 	}
1201 
1202 	if (md && md->md_channel) {
1203 		cookie = md->md_channel(md->md_cookie, &cells[1], client);
1204 		if (cookie) {
1205 			mc = malloc(sizeof(*mc), M_DEVBUF, M_WAITOK);
1206 			mc->mc_md = md;
1207 			mc->mc_cookie = cookie;
1208 			return mc;
1209 		}
1210 	}
1211 
1212 	return NULL;
1213 }
1214 
1215 uint32_t *
1216 mbox_next_mbox(uint32_t *cells)
1217 {
1218 	uint32_t phandle = cells[0];
1219 	int node, ncells;
1220 
1221 	node = OF_getnodebyphandle(phandle);
1222 	if (node == 0)
1223 		return NULL;
1224 
1225 	ncells = OF_getpropint(node, "#mbox-cells", 0);
1226 	return cells + ncells + 1;
1227 }
1228 
1229 struct mbox_channel *
1230 mbox_channel_idx(int node, int idx, struct mbox_client *client)
1231 {
1232 	struct mbox_channel *mc = NULL;
1233 	uint32_t *mboxes;
1234 	uint32_t *mbox;
1235 	int len;
1236 
1237 	len = OF_getproplen(node, "mboxes");
1238 	if (len <= 0)
1239 		return NULL;
1240 
1241 	mboxes = malloc(len, M_TEMP, M_WAITOK);
1242 	OF_getpropintarray(node, "mboxes", mboxes, len);
1243 
1244 	mbox = mboxes;
1245 	while (mbox && mbox < mboxes + (len / sizeof(uint32_t))) {
1246 		if (idx == 0) {
1247 			mc = mbox_channel_cells(mbox, client);
1248 			break;
1249 		}
1250 		mbox = mbox_next_mbox(mbox);
1251 		idx--;
1252 	}
1253 
1254 	free(mboxes, M_TEMP, len);
1255 	return mc;
1256 }
1257 
1258 struct mbox_channel *
1259 mbox_channel(int node, const char *name, struct mbox_client *client)
1260 {
1261 	int idx;
1262 
1263 	idx = OF_getindex(node, name, "mbox-names");
1264 	if (idx == -1)
1265 		return NULL;
1266 
1267 	return mbox_channel_idx(node, idx, client);
1268 }
1269 
1270 int
1271 mbox_send(struct mbox_channel *mc, const void *data, size_t len)
1272 {
1273 	struct mbox_device *md = mc->mc_md;
1274 
1275 	if (md->md_send)
1276 		return md->md_send(mc->mc_cookie, data, len);
1277 
1278 	return ENXIO;
1279 }
1280 
1281 int
1282 mbox_recv(struct mbox_channel *mc, void *data, size_t len)
1283 {
1284 	struct mbox_device *md = mc->mc_md;
1285 
1286 	if (md->md_recv)
1287 		return md->md_recv(mc->mc_cookie, data, len);
1288 
1289 	return ENXIO;
1290 }
1291