xref: /openbsd-src/sys/dev/ofw/ofw_misc.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*	$OpenBSD: ofw_misc.c,v 1.38 2022/12/17 11:54:32 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2017-2021 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/device.h>
20 #include <sys/malloc.h>
21 #include <sys/systm.h>
22 
23 #include <net/if.h>
24 #include <net/if_media.h>
25 
26 #include <machine/bus.h>
27 
28 #include <dev/mii/mii.h>
29 #include <dev/mii/miivar.h>
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/ofw_gpio.h>
32 #include <dev/ofw/ofw_misc.h>
33 #include <dev/ofw/ofw_regulator.h>
34 
35 /*
36  * Register maps.
37  */
38 
39 struct regmap {
40 	int			rm_node;
41 	uint32_t		rm_phandle;
42 	bus_space_tag_t		rm_tag;
43 	bus_space_handle_t	rm_handle;
44 	bus_size_t		rm_size;
45 
46 	LIST_ENTRY(regmap)	rm_list;
47 };
48 
49 LIST_HEAD(, regmap) regmaps = LIST_HEAD_INITIALIZER(regmap);
50 
51 void
52 regmap_register(int node, bus_space_tag_t tag, bus_space_handle_t handle,
53     bus_size_t size)
54 {
55 	struct regmap *rm;
56 
57 	rm = malloc(sizeof(struct regmap), M_DEVBUF, M_WAITOK);
58 	rm->rm_node = node;
59 	rm->rm_phandle = OF_getpropint(node, "phandle", 0);
60 	rm->rm_tag = tag;
61 	rm->rm_handle = handle;
62 	rm->rm_size = size;
63 	LIST_INSERT_HEAD(&regmaps, rm, rm_list);
64 }
65 
66 struct regmap *
67 regmap_bycompatible(char *compatible)
68 {
69 	struct regmap *rm;
70 
71 	LIST_FOREACH(rm, &regmaps, rm_list) {
72 		if (OF_is_compatible(rm->rm_node, compatible))
73 			return rm;
74 	}
75 
76 	return NULL;
77 }
78 
79 struct regmap *
80 regmap_bynode(int node)
81 {
82 	struct regmap *rm;
83 
84 	LIST_FOREACH(rm, &regmaps, rm_list) {
85 		if (rm->rm_node == node)
86 			return rm;
87 	}
88 
89 	return NULL;
90 }
91 
92 struct regmap *
93 regmap_byphandle(uint32_t phandle)
94 {
95 	struct regmap *rm;
96 
97 	if (phandle == 0)
98 		return NULL;
99 
100 	LIST_FOREACH(rm, &regmaps, rm_list) {
101 		if (rm->rm_phandle == phandle)
102 			return rm;
103 	}
104 
105 	return NULL;
106 }
107 
108 void
109 regmap_write_4(struct regmap *rm, bus_size_t offset, uint32_t value)
110 {
111 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
112 	bus_space_write_4(rm->rm_tag, rm->rm_handle, offset, value);
113 }
114 
115 uint32_t
116 regmap_read_4(struct regmap *rm, bus_size_t offset)
117 {
118 	KASSERT(offset <= rm->rm_size - sizeof(uint32_t));
119 	return bus_space_read_4(rm->rm_tag, rm->rm_handle, offset);
120 }
121 
122 
123 /*
124  * PHY support.
125  */
126 
127 LIST_HEAD(, phy_device) phy_devices =
128 	LIST_HEAD_INITIALIZER(phy_devices);
129 
130 void
131 phy_register(struct phy_device *pd)
132 {
133 	pd->pd_cells = OF_getpropint(pd->pd_node, "#phy-cells", 0);
134 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
135 	if (pd->pd_phandle == 0)
136 		return;
137 
138 	LIST_INSERT_HEAD(&phy_devices, pd, pd_list);
139 }
140 
141 int
142 phy_usb_nop_enable(int node)
143 {
144 	uint32_t vcc_supply;
145 	uint32_t *gpio;
146 	int len;
147 
148 	vcc_supply = OF_getpropint(node, "vcc-supply", 0);
149 	if (vcc_supply)
150 		regulator_enable(vcc_supply);
151 
152 	len = OF_getproplen(node, "reset-gpios");
153 	if (len <= 0)
154 		return 0;
155 
156 	/* There should only be a single GPIO pin. */
157 	gpio = malloc(len, M_TEMP, M_WAITOK);
158 	OF_getpropintarray(node, "reset-gpios", gpio, len);
159 
160 	gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
161 	gpio_controller_set_pin(gpio, 1);
162 	delay(10000);
163 	gpio_controller_set_pin(gpio, 0);
164 
165 	free(gpio, M_TEMP, len);
166 
167 	return 0;
168 }
169 
170 int
171 phy_enable_cells(uint32_t *cells)
172 {
173 	struct phy_device *pd;
174 	uint32_t phandle = cells[0];
175 	int node;
176 
177 	LIST_FOREACH(pd, &phy_devices, pd_list) {
178 		if (pd->pd_phandle == phandle)
179 			break;
180 	}
181 
182 	if (pd && pd->pd_enable)
183 		return pd->pd_enable(pd->pd_cookie, &cells[1]);
184 
185 	node = OF_getnodebyphandle(phandle);
186 	if (node == 0)
187 		return ENXIO;
188 
189 	if (OF_is_compatible(node, "usb-nop-xceiv"))
190 		return phy_usb_nop_enable(node);
191 
192 	return ENXIO;
193 }
194 
195 uint32_t *
196 phy_next_phy(uint32_t *cells)
197 {
198 	uint32_t phandle = cells[0];
199 	int node, ncells;
200 
201 	node = OF_getnodebyphandle(phandle);
202 	if (node == 0)
203 		return NULL;
204 
205 	ncells = OF_getpropint(node, "#phy-cells", 0);
206 	return cells + ncells + 1;
207 }
208 
209 int
210 phy_enable_idx(int node, int idx)
211 {
212 	uint32_t *phys;
213 	uint32_t *phy;
214 	int rv = -1;
215 	int len;
216 
217 	len = OF_getproplen(node, "phys");
218 	if (len <= 0)
219 		return -1;
220 
221 	phys = malloc(len, M_TEMP, M_WAITOK);
222 	OF_getpropintarray(node, "phys", phys, len);
223 
224 	phy = phys;
225 	while (phy && phy < phys + (len / sizeof(uint32_t))) {
226 		if (idx <= 0)
227 			rv = phy_enable_cells(phy);
228 		if (idx == 0)
229 			break;
230 		phy = phy_next_phy(phy);
231 		idx--;
232 	}
233 
234 	free(phys, M_TEMP, len);
235 	return rv;
236 }
237 
238 int
239 phy_enable(int node, const char *name)
240 {
241 	int idx;
242 
243 	idx = OF_getindex(node, name, "phy-names");
244 	if (idx == -1)
245 		return -1;
246 
247 	return phy_enable_idx(node, idx);
248 }
249 
250 /*
251  * I2C support.
252  */
253 
254 LIST_HEAD(, i2c_bus) i2c_busses =
255 	LIST_HEAD_INITIALIZER(i2c_bus);
256 
257 void
258 i2c_register(struct i2c_bus *ib)
259 {
260 	ib->ib_phandle = OF_getpropint(ib->ib_node, "phandle", 0);
261 	if (ib->ib_phandle == 0)
262 		return;
263 
264 	LIST_INSERT_HEAD(&i2c_busses, ib, ib_list);
265 }
266 
267 struct i2c_controller *
268 i2c_bynode(int node)
269 {
270 	struct i2c_bus *ib;
271 
272 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
273 		if (ib->ib_node == node)
274 			return ib->ib_ic;
275 	}
276 
277 	return NULL;
278 }
279 
280 struct i2c_controller *
281 i2c_byphandle(uint32_t phandle)
282 {
283 	struct i2c_bus *ib;
284 
285 	if (phandle == 0)
286 		return NULL;
287 
288 	LIST_FOREACH(ib, &i2c_busses, ib_list) {
289 		if (ib->ib_phandle == phandle)
290 			return ib->ib_ic;
291 	}
292 
293 	return NULL;
294 }
295 
296 /*
297  * SFP support.
298  */
299 
300 LIST_HEAD(, sfp_device) sfp_devices =
301 	LIST_HEAD_INITIALIZER(sfp_devices);
302 
303 void
304 sfp_register(struct sfp_device *sd)
305 {
306 	sd->sd_phandle = OF_getpropint(sd->sd_node, "phandle", 0);
307 	if (sd->sd_phandle == 0)
308 		return;
309 
310 	LIST_INSERT_HEAD(&sfp_devices, sd, sd_list);
311 }
312 
313 int
314 sfp_do_enable(uint32_t phandle, int enable)
315 {
316 	struct sfp_device *sd;
317 
318 	if (phandle == 0)
319 		return ENXIO;
320 
321 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
322 		if (sd->sd_phandle == phandle)
323 			return sd->sd_enable(sd->sd_cookie, enable);
324 	}
325 
326 	return ENXIO;
327 }
328 
329 int
330 sfp_enable(uint32_t phandle)
331 {
332 	return sfp_do_enable(phandle, 1);
333 }
334 
335 int
336 sfp_disable(uint32_t phandle)
337 {
338 	return sfp_do_enable(phandle, 0);
339 }
340 
341 int
342 sfp_get_sffpage(uint32_t phandle, struct if_sffpage *sff)
343 {
344 	struct sfp_device *sd;
345 
346 	if (phandle == 0)
347 		return ENXIO;
348 
349 	LIST_FOREACH(sd, &sfp_devices, sd_list) {
350 		if (sd->sd_phandle == phandle)
351 			return sd->sd_get_sffpage(sd->sd_cookie, sff);
352 	}
353 
354 	return ENXIO;
355 }
356 
357 #define SFF8472_TCC_XCC			3 /* 10G Ethernet Compliance Codes */
358 #define SFF8472_TCC_XCC_10G_SR		(1 << 4)
359 #define SFF8472_TCC_XCC_10G_LR		(1 << 5)
360 #define SFF8472_TCC_XCC_10G_LRM		(1 << 6)
361 #define SFF8472_TCC_XCC_10G_ER		(1 << 7)
362 #define SFF8472_TCC_ECC			6 /* Ethernet Compliance Codes */
363 #define SFF8472_TCC_ECC_1000_SX		(1 << 0)
364 #define SFF8472_TCC_ECC_1000_LX		(1 << 1)
365 #define SFF8472_TCC_ECC_1000_CX		(1 << 2)
366 #define SFF8472_TCC_ECC_1000_T		(1 << 3)
367 #define SFF8472_TCC_SCT			8 /* SFP+ Cable Technology */
368 #define SFF8472_TCC_SCT_PASSIVE		(1 << 2)
369 #define SFF8472_TCC_SCT_ACTIVE		(1 << 3)
370 
371 int
372 sfp_add_media(uint32_t phandle, struct mii_data *mii)
373 {
374 	struct if_sffpage sff;
375 	int error;
376 
377 	memset(&sff, 0, sizeof(sff));
378 	sff.sff_addr = IFSFF_ADDR_EEPROM;
379 	sff.sff_page = 0;
380 
381 	error = sfp_get_sffpage(phandle, &sff);
382 	if (error)
383 		return error;
384 
385 	/* SFP */
386 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_SX) {
387 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_SX, 0, NULL);
388 		mii->mii_media_active = IFM_ETHER | IFM_1000_SX | IFM_FDX;
389 	}
390 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_LX) {
391 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_LX, 0, NULL);
392 		mii->mii_media_active = IFM_ETHER | IFM_1000_LX | IFM_FDX;
393 	}
394 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_CX) {
395 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_CX, 0, NULL);
396 		mii->mii_media_active = IFM_ETHER | IFM_1000_CX | IFM_FDX;
397 	}
398 	if (sff.sff_data[SFF8472_TCC_ECC] & SFF8472_TCC_ECC_1000_T) {
399 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T, 0, NULL);
400 		mii->mii_media_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
401 	}
402 
403 	/* SFP+ */
404 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_SR) {
405 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
406 		mii->mii_media_active = IFM_ETHER | IFM_10G_SR | IFM_FDX;
407 	}
408 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LR) {
409 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
410 		mii->mii_media_active = IFM_ETHER | IFM_10G_LR | IFM_FDX;
411 	}
412 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_LRM) {
413 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_LRM, 0, NULL);
414 		mii->mii_media_active = IFM_ETHER | IFM_10G_LRM | IFM_FDX;
415 	}
416 	if (sff.sff_data[SFF8472_TCC_XCC] & SFF8472_TCC_XCC_10G_ER) {
417 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_10G_ER, 0, NULL);
418 		mii->mii_media_active = IFM_ETHER | IFM_10G_ER | IFM_FDX;
419 	}
420 
421 	/* SFP+ DAC */
422 	if (sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_PASSIVE ||
423 	    sff.sff_data[SFF8472_TCC_SCT] & SFF8472_TCC_SCT_ACTIVE) {
424 		ifmedia_add(&mii->mii_media,
425 		    IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
426 		mii->mii_media_active = IFM_ETHER | IFM_10G_SFP_CU | IFM_FDX;
427 	}
428 
429 	return 0;
430 }
431 
432 /*
433  * PWM support.
434  */
435 
436 LIST_HEAD(, pwm_device) pwm_devices =
437 	LIST_HEAD_INITIALIZER(pwm_devices);
438 
439 void
440 pwm_register(struct pwm_device *pd)
441 {
442 	pd->pd_cells = OF_getpropint(pd->pd_node, "#pwm-cells", 0);
443 	pd->pd_phandle = OF_getpropint(pd->pd_node, "phandle", 0);
444 	if (pd->pd_phandle == 0)
445 		return;
446 
447 	LIST_INSERT_HEAD(&pwm_devices, pd, pd_list);
448 
449 }
450 
451 int
452 pwm_init_state(uint32_t *cells, struct pwm_state *ps)
453 {
454 	struct pwm_device *pd;
455 
456 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
457 		if (pd->pd_phandle == cells[0]) {
458 			memset(ps, 0, sizeof(struct pwm_state));
459 			pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
460 			ps->ps_pulse_width = 0;
461 			if (pd->pd_cells >= 2)
462 				ps->ps_period = cells[2];
463 			if (pd->pd_cells >= 3)
464 				ps->ps_flags = cells[3];
465 			return 0;
466 		}
467 	}
468 
469 	return ENXIO;
470 }
471 
472 int
473 pwm_get_state(uint32_t *cells, struct pwm_state *ps)
474 {
475 	struct pwm_device *pd;
476 
477 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
478 		if (pd->pd_phandle == cells[0])
479 			return pd->pd_get_state(pd->pd_cookie, &cells[1], ps);
480 	}
481 
482 	return ENXIO;
483 }
484 
485 int
486 pwm_set_state(uint32_t *cells, struct pwm_state *ps)
487 {
488 	struct pwm_device *pd;
489 
490 	LIST_FOREACH(pd, &pwm_devices, pd_list) {
491 		if (pd->pd_phandle == cells[0])
492 			return pd->pd_set_state(pd->pd_cookie, &cells[1], ps);
493 	}
494 
495 	return ENXIO;
496 }
497 
498 /*
499  * Non-volatile memory support.
500  */
501 
502 LIST_HEAD(, nvmem_device) nvmem_devices =
503 	LIST_HEAD_INITIALIZER(nvmem_devices);
504 
505 struct nvmem_cell {
506 	uint32_t	nc_phandle;
507 	struct nvmem_device *nc_nd;
508 	bus_addr_t	nc_addr;
509 	bus_size_t	nc_size;
510 	uint32_t	nc_offset;
511 	uint32_t	nc_bitlen;
512 
513 	LIST_ENTRY(nvmem_cell) nc_list;
514 };
515 
516 LIST_HEAD(, nvmem_cell) nvmem_cells =
517 	LIST_HEAD_INITIALIZER(nvmem_cells);
518 
519 void
520 nvmem_register_child(int node, struct nvmem_device *nd)
521 {
522 	struct nvmem_cell *nc;
523 	uint32_t phandle;
524 	uint32_t reg[2], bits[2] = {};
525 
526 	phandle = OF_getpropint(node, "phandle", 0);
527 	if (phandle == 0)
528 		return;
529 
530 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
531 		return;
532 
533 	OF_getpropintarray(node, "bits", bits, sizeof(bits));
534 
535 	nc = malloc(sizeof(struct nvmem_cell), M_DEVBUF, M_WAITOK);
536 	nc->nc_phandle = phandle;
537 	nc->nc_nd = nd;
538 	nc->nc_addr = reg[0];
539 	nc->nc_size = reg[1];
540 	nc->nc_offset = bits[0];
541 	nc->nc_bitlen = bits[1];
542 	LIST_INSERT_HEAD(&nvmem_cells, nc, nc_list);
543 }
544 
545 void
546 nvmem_register(struct nvmem_device *nd)
547 {
548 	int node;
549 
550 	nd->nd_phandle = OF_getpropint(nd->nd_node, "phandle", 0);
551 	if (nd->nd_phandle)
552 		LIST_INSERT_HEAD(&nvmem_devices, nd, nd_list);
553 
554 	for (node = OF_child(nd->nd_node); node; node = OF_peer(node))
555 		nvmem_register_child(node, nd);
556 }
557 
558 int
559 nvmem_read(uint32_t phandle, bus_addr_t addr, void *data, bus_size_t size)
560 {
561 	struct nvmem_device *nd;
562 
563 	if (phandle == 0)
564 		return ENXIO;
565 
566 	LIST_FOREACH(nd, &nvmem_devices, nd_list) {
567 		if (nd->nd_phandle == phandle)
568 			return nd->nd_read(nd->nd_cookie, addr, data, size);
569 	}
570 
571 	return ENXIO;
572 }
573 
574 int
575 nvmem_read_cell(int node, const char *name, void *data, bus_size_t size)
576 {
577 	struct nvmem_device *nd;
578 	struct nvmem_cell *nc;
579 	uint8_t *p = data;
580 	bus_addr_t addr;
581 	uint32_t phandle, *phandles;
582 	uint32_t offset, bitlen;
583 	int id, len, first;
584 
585 	id = OF_getindex(node, name, "nvmem-cell-names");
586 	if (id < 0)
587 		return ENXIO;
588 
589 	len = OF_getproplen(node, "nvmem-cells");
590 	if (len <= 0)
591 		return ENXIO;
592 
593 	phandles = malloc(len, M_TEMP, M_WAITOK);
594 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
595 	phandle = phandles[id];
596 	free(phandles, M_TEMP, len);
597 
598 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
599 		if (nc->nc_phandle == phandle)
600 			break;
601 	}
602 	if (nc == NULL)
603 		return ENXIO;
604 
605 	nd = nc->nc_nd;
606 	if (nd->nd_read == NULL)
607 		return EACCES;
608 
609 	first = 1;
610 	addr = nc->nc_addr + (nc->nc_offset / 8);
611 	offset = nc->nc_offset % 8;
612 	bitlen = nc->nc_bitlen;
613 	while (bitlen > 0 && size > 0) {
614 		uint8_t mask, tmp;
615 		int error;
616 
617 		error = nd->nd_read(nd->nd_cookie, addr++, &tmp, 1);
618 		if (error)
619 			return error;
620 
621 		if (bitlen >= 8)
622 			mask = 0xff;
623 		else
624 			mask = (1 << bitlen) - 1;
625 
626 		if (!first) {
627 			*p++ |= (tmp << (8 - offset)) & (mask << (8 - offset));
628 			bitlen -= MIN(offset, bitlen);
629 			mask >>= offset;
630 			size--;
631 		}
632 
633 		if (bitlen > 0 && size > 0) {
634 			*p = (tmp >> offset) & mask;
635 			bitlen -= MIN(8 - offset, bitlen);
636 		}
637 
638 		first = 0;
639 	}
640 	if (nc->nc_bitlen > 0)
641 		return 0;
642 
643 	if (size > nc->nc_size)
644 		return EINVAL;
645 
646 	return nd->nd_read(nd->nd_cookie, nc->nc_addr, data, size);
647 }
648 
649 int
650 nvmem_write_cell(int node, const char *name, const void *data, bus_size_t size)
651 {
652 	struct nvmem_device *nd;
653 	struct nvmem_cell *nc;
654 	const uint8_t *p = data;
655 	bus_addr_t addr;
656 	uint32_t phandle, *phandles;
657 	uint32_t offset, bitlen;
658 	int id, len, first;
659 
660 	id = OF_getindex(node, name, "nvmem-cell-names");
661 	if (id < 0)
662 		return ENXIO;
663 
664 	len = OF_getproplen(node, "nvmem-cells");
665 	if (len <= 0)
666 		return ENXIO;
667 
668 	phandles = malloc(len, M_TEMP, M_WAITOK);
669 	OF_getpropintarray(node, "nvmem-cells", phandles, len);
670 	phandle = phandles[id];
671 	free(phandles, M_TEMP, len);
672 
673 	LIST_FOREACH(nc, &nvmem_cells, nc_list) {
674 		if (nc->nc_phandle == phandle)
675 			break;
676 	}
677 	if (nc == NULL)
678 		return ENXIO;
679 
680 	nd = nc->nc_nd;
681 	if (nd->nd_write == NULL)
682 		return EACCES;
683 
684 	first = 1;
685 	addr = nc->nc_addr + (nc->nc_offset / 8);
686 	offset = nc->nc_offset % 8;
687 	bitlen = nc->nc_bitlen;
688 	while (bitlen > 0 && size > 0) {
689 		uint8_t mask, tmp;
690 		int error;
691 
692 		error = nd->nd_read(nd->nd_cookie, addr, &tmp, 1);
693 		if (error)
694 			return error;
695 
696 		if (bitlen >= 8)
697 			mask = 0xff;
698 		else
699 			mask = (1 << bitlen) - 1;
700 
701 		tmp &= ~(mask << offset);
702 		tmp |= (*p++ << offset) & (mask << offset);
703 		bitlen -= MIN(8 - offset, bitlen);
704 		size--;
705 
706 		if (!first && bitlen > 0 && size > 0) {
707 			tmp &= ~(mask >> (8 - offset));
708 			tmp |= (*p >> (8 - offset)) & (mask >> (8 - offset));
709 			bitlen -= MIN(offset, bitlen);
710 		}
711 
712 		error = nd->nd_write(nd->nd_cookie, addr++, &tmp, 1);
713 		if (error)
714 			return error;
715 
716 		first = 0;
717 	}
718 	if (nc->nc_bitlen > 0)
719 		return 0;
720 
721 	if (size > nc->nc_size)
722 		return EINVAL;
723 
724 	return nd->nd_write(nd->nd_cookie, nc->nc_addr, data, size);
725 }
726 
727 /* Port/endpoint interface support */
728 
729 LIST_HEAD(, endpoint) endpoints =
730 	LIST_HEAD_INITIALIZER(endpoints);
731 
732 void
733 endpoint_register(int node, struct device_port *dp, enum endpoint_type type)
734 {
735 	struct endpoint *ep;
736 
737 	ep = malloc(sizeof(*ep), M_DEVBUF, M_WAITOK);
738 	ep->ep_node = node;
739 	ep->ep_phandle = OF_getpropint(node, "phandle", 0);
740 	ep->ep_reg = OF_getpropint(node, "reg", -1);
741 	ep->ep_port = dp;
742 	ep->ep_type = type;
743 
744 	LIST_INSERT_HEAD(&endpoints, ep, ep_list);
745 	LIST_INSERT_HEAD(&dp->dp_endpoints, ep, ep_plist);
746 }
747 
748 void
749 device_port_register(int node, struct device_ports *ports,
750     enum endpoint_type type)
751 {
752 	struct device_port *dp;
753 
754 	dp = malloc(sizeof(*dp), M_DEVBUF, M_WAITOK);
755 	dp->dp_node = node;
756 	dp->dp_phandle = OF_getpropint(node, "phandle", 0);
757 	dp->dp_reg = OF_getpropint(node, "reg", -1);
758 	dp->dp_ports = ports;
759 	LIST_INIT(&dp->dp_endpoints);
760 	for (node = OF_child(node); node; node = OF_peer(node))
761 		endpoint_register(node, dp, type);
762 
763 	LIST_INSERT_HEAD(&ports->dp_ports, dp, dp_list);
764 }
765 
766 void
767 device_ports_register(struct device_ports *ports,
768     enum endpoint_type type)
769 {
770 	int node;
771 
772 	LIST_INIT(&ports->dp_ports);
773 
774 	node = OF_getnodebyname(ports->dp_node, "ports");
775 	if (node == 0) {
776 		node = OF_getnodebyname(ports->dp_node, "port");
777 		if (node == 0)
778 			return;
779 
780 		device_port_register(node, ports, type);
781 		return;
782 	}
783 
784 	for (node = OF_child(node); node; node = OF_peer(node))
785 		device_port_register(node, ports, type);
786 }
787 
788 struct device_ports *
789 device_ports_byphandle(uint32_t phandle)
790 {
791 	struct endpoint *ep;
792 
793 	if (phandle == 0)
794 		return NULL;
795 
796 	LIST_FOREACH(ep, &endpoints, ep_list) {
797 		if (ep->ep_port->dp_phandle == phandle)
798 			return ep->ep_port->dp_ports;
799 	}
800 
801 	return NULL;
802 }
803 
804 struct endpoint *
805 endpoint_byphandle(uint32_t phandle)
806 {
807 	struct endpoint *ep;
808 
809 	if (phandle == 0)
810 		return NULL;
811 
812 	LIST_FOREACH(ep, &endpoints, ep_list) {
813 		if (ep->ep_phandle == phandle)
814 			return ep;
815 	}
816 
817 	return NULL;
818 }
819 
820 struct endpoint *
821 endpoint_byreg(struct device_ports *ports, uint32_t dp_reg, uint32_t ep_reg)
822 {
823 	struct device_port *dp;
824 	struct endpoint *ep;
825 
826 	LIST_FOREACH(dp, &ports->dp_ports, dp_list) {
827 		if (dp->dp_reg != dp_reg)
828 			continue;
829 		LIST_FOREACH(ep, &dp->dp_endpoints, ep_list) {
830 			if (ep->ep_reg != ep_reg)
831 				continue;
832 			return ep;
833 		}
834 	}
835 
836 	return NULL;
837 }
838 
839 struct endpoint *
840 endpoint_remote(struct endpoint *ep)
841 {
842 	struct endpoint *rep;
843 	int phandle;
844 
845 	phandle = OF_getpropint(ep->ep_node, "remote-endpoint", 0);
846 	if (phandle == 0)
847 		return NULL;
848 
849 	LIST_FOREACH(rep, &endpoints, ep_list) {
850 		if (rep->ep_phandle == phandle)
851 			return rep;
852 	}
853 
854 	return NULL;
855 }
856 
857 int
858 endpoint_activate(struct endpoint *ep, void *arg)
859 {
860 	struct device_ports *ports = ep->ep_port->dp_ports;
861 	return ports->dp_ep_activate(ports->dp_cookie, ep, arg);
862 }
863 
864 void *
865 endpoint_get_cookie(struct endpoint *ep)
866 {
867 	struct device_ports *ports = ep->ep_port->dp_ports;
868 	return ports->dp_ep_get_cookie(ports->dp_cookie, ep);
869 }
870 
871 int
872 device_port_activate(uint32_t phandle, void *arg)
873 {
874 	struct device_port *dp = NULL;
875 	struct endpoint *ep, *rep;
876 	int count;
877 	int error;
878 
879 	if (phandle == 0)
880 		return ENXIO;
881 
882 	LIST_FOREACH(ep, &endpoints, ep_list) {
883 		if (ep->ep_port->dp_phandle == phandle) {
884 			dp = ep->ep_port;
885 			break;
886 		}
887 	}
888 	if (dp == NULL)
889 		return ENXIO;
890 
891 	count = 0;
892 	LIST_FOREACH(ep, &dp->dp_endpoints, ep_plist) {
893 		rep = endpoint_remote(ep);
894 		if (rep == NULL)
895 			continue;
896 
897 		error = endpoint_activate(ep, arg);
898 		if (error)
899 			continue;
900 		error = endpoint_activate(rep, arg);
901 		if (error)
902 			continue;
903 		count++;
904 	}
905 
906 	return count ? 0 : ENXIO;
907 }
908 
909 /* Digital audio interface support */
910 
911 LIST_HEAD(, dai_device) dai_devices =
912 	LIST_HEAD_INITIALIZER(dai_devices);
913 
914 void *
915 dai_ep_get_cookie(void *cookie, struct endpoint *ep)
916 {
917 	return cookie;
918 }
919 
920 void
921 dai_register(struct dai_device *dd)
922 {
923 	dd->dd_phandle = OF_getpropint(dd->dd_node, "phandle", 0);
924 	if (dd->dd_phandle != 0)
925 		LIST_INSERT_HEAD(&dai_devices, dd, dd_list);
926 
927 	dd->dd_ports.dp_node = dd->dd_node;
928 	dd->dd_ports.dp_cookie = dd;
929 	dd->dd_ports.dp_ep_get_cookie = dai_ep_get_cookie;
930 	device_ports_register(&dd->dd_ports, EP_DAI_DEVICE);
931 }
932 
933 struct dai_device *
934 dai_byphandle(uint32_t phandle)
935 {
936 	struct dai_device *dd;
937 
938 	if (phandle == 0)
939 		return NULL;
940 
941 	LIST_FOREACH(dd, &dai_devices, dd_list) {
942 		if (dd->dd_phandle == phandle)
943 			return dd;
944 	}
945 
946 	return NULL;
947 }
948 
949 /* MII support */
950 
951 LIST_HEAD(, mii_bus) mii_busses =
952 	LIST_HEAD_INITIALIZER(mii_busses);
953 
954 void
955 mii_register(struct mii_bus *md)
956 {
957 	LIST_INSERT_HEAD(&mii_busses, md, md_list);
958 }
959 
960 struct mii_bus *
961 mii_bynode(int node)
962 {
963 	struct mii_bus *md;
964 
965 	LIST_FOREACH(md, &mii_busses, md_list) {
966 		if (md->md_node == node)
967 			return md;
968 	}
969 
970 	return NULL;
971 }
972 
973 struct mii_bus *
974 mii_byphandle(uint32_t phandle)
975 {
976 	int node;
977 
978 	if (phandle == 0)
979 		return NULL;
980 
981 	node = OF_getnodebyphandle(phandle);
982 	if (node == 0)
983 		return NULL;
984 
985 	node = OF_parent(node);
986 	if (node == 0)
987 		return NULL;
988 
989 	return mii_bynode(node);
990 }
991 
992 /* IOMMU support */
993 
994 LIST_HEAD(, iommu_device) iommu_devices =
995 	LIST_HEAD_INITIALIZER(iommu_devices);
996 
997 void
998 iommu_device_register(struct iommu_device *id)
999 {
1000 	id->id_phandle = OF_getpropint(id->id_node, "phandle", 0);
1001 	if (id->id_phandle == 0)
1002 		return;
1003 
1004 	LIST_INSERT_HEAD(&iommu_devices, id, id_list);
1005 }
1006 
1007 bus_dma_tag_t
1008 iommu_device_do_map(uint32_t phandle, uint32_t *cells, bus_dma_tag_t dmat)
1009 {
1010 	struct iommu_device *id;
1011 
1012 	if (phandle == 0)
1013 		return dmat;
1014 
1015 	LIST_FOREACH(id, &iommu_devices, id_list) {
1016 		if (id->id_phandle == phandle)
1017 			return id->id_map(id->id_cookie, cells, dmat);
1018 	}
1019 
1020 	return dmat;
1021 }
1022 
1023 int
1024 iommu_device_lookup(int node, uint32_t *phandle, uint32_t *cells)
1025 {
1026 	uint32_t *cell;
1027 	uint32_t *map;
1028 	int len, icells, ncells;
1029 	int ret = 1;
1030 	int i;
1031 
1032 	len = OF_getproplen(node, "iommus");
1033 	if (len <= 0)
1034 		return ret;
1035 
1036 	map = malloc(len, M_TEMP, M_WAITOK);
1037 	OF_getpropintarray(node, "iommus", map, len);
1038 
1039 	cell = map;
1040 	ncells = len / sizeof(uint32_t);
1041 	while (ncells > 1) {
1042 		node = OF_getnodebyphandle(cell[0]);
1043 		if (node == 0)
1044 			goto out;
1045 
1046 		icells = OF_getpropint(node, "#iommu-cells", 1);
1047 		if (ncells < icells + 1)
1048 			goto out;
1049 
1050 		KASSERT(icells <= 2);
1051 
1052 		*phandle = cell[0];
1053 		for (i = 0; i < icells; i++)
1054 			cells[i] = cell[1 + i];
1055 		ret = 0;
1056 		break;
1057 
1058 		cell += (1 + icells);
1059 		ncells -= (1 + icells);
1060 	}
1061 
1062 out:
1063 	free(map, M_TEMP, len);
1064 
1065 	return ret;
1066 }
1067 
1068 int
1069 iommu_device_lookup_pci(int node, uint32_t rid, uint32_t *phandle,
1070     uint32_t *cells)
1071 {
1072 	uint32_t sid_base;
1073 	uint32_t *cell;
1074 	uint32_t *map;
1075 	uint32_t mask, rid_base;
1076 	int len, length, icells, ncells;
1077 	int ret = 1;
1078 
1079 	len = OF_getproplen(node, "iommu-map");
1080 	if (len <= 0)
1081 		return ret;
1082 
1083 	map = malloc(len, M_TEMP, M_WAITOK);
1084 	OF_getpropintarray(node, "iommu-map", map, len);
1085 
1086 	mask = OF_getpropint(node, "iommu-map-mask", 0xffff);
1087 	rid = rid & mask;
1088 
1089 	cell = map;
1090 	ncells = len / sizeof(uint32_t);
1091 	while (ncells > 1) {
1092 		node = OF_getnodebyphandle(cell[1]);
1093 		if (node == 0)
1094 			goto out;
1095 
1096 		icells = OF_getpropint(node, "#iommu-cells", 1);
1097 		if (ncells < icells + 3)
1098 			goto out;
1099 
1100 		KASSERT(icells == 1);
1101 
1102 		rid_base = cell[0];
1103 		sid_base = cell[2];
1104 		length = cell[3];
1105 		if (rid >= rid_base && rid < rid_base + length) {
1106 			cells[0] = sid_base + (rid - rid_base);
1107 			*phandle = cell[1];
1108 			ret = 0;
1109 			break;
1110 		}
1111 
1112 		cell += 4;
1113 		ncells -= 4;
1114 	}
1115 
1116 out:
1117 	free(map, M_TEMP, len);
1118 
1119 	return ret;
1120 }
1121 
1122 bus_dma_tag_t
1123 iommu_device_map(int node, bus_dma_tag_t dmat)
1124 {
1125 	uint32_t phandle, cells[2] = {0};
1126 
1127 	if (iommu_device_lookup(node, &phandle, &cells[0]))
1128 		return dmat;
1129 
1130 	return iommu_device_do_map(phandle, &cells[0], dmat);
1131 }
1132 
1133 bus_dma_tag_t
1134 iommu_device_map_pci(int node, uint32_t rid, bus_dma_tag_t dmat)
1135 {
1136 	uint32_t phandle, cells[2] = {0};
1137 
1138 	if (iommu_device_lookup_pci(node, rid, &phandle, &cells[0]))
1139 		return dmat;
1140 
1141 	return iommu_device_do_map(phandle, &cells[0], dmat);
1142 }
1143 
1144 void
1145 iommu_device_do_reserve(uint32_t phandle, uint32_t *cells, bus_addr_t addr,
1146     bus_size_t size)
1147 {
1148 	struct iommu_device *id;
1149 
1150 	if (phandle == 0)
1151 		return;
1152 
1153 	LIST_FOREACH(id, &iommu_devices, id_list) {
1154 		if (id->id_phandle == phandle) {
1155 			id->id_reserve(id->id_cookie, cells, addr, size);
1156 			break;
1157 		}
1158 	}
1159 }
1160 
1161 void
1162 iommu_reserve_region_pci(int node, uint32_t rid, bus_addr_t addr,
1163     bus_size_t size)
1164 {
1165 	uint32_t phandle, cells[2] = {0};
1166 
1167 	if (iommu_device_lookup_pci(node, rid, &phandle, &cells[0]))
1168 		return;
1169 
1170 	return iommu_device_do_reserve(phandle, &cells[0], addr, size);
1171 }
1172 
1173 /*
1174  * Mailbox support.
1175  */
1176 
1177 struct mbox_channel {
1178 	struct mbox_device	*mc_md;
1179 	void			*mc_cookie;
1180 };
1181 
1182 LIST_HEAD(, mbox_device) mbox_devices =
1183 	LIST_HEAD_INITIALIZER(mbox_devices);
1184 
1185 void
1186 mbox_register(struct mbox_device *md)
1187 {
1188 	md->md_cells = OF_getpropint(md->md_node, "#mbox-cells", 0);
1189 	md->md_phandle = OF_getpropint(md->md_node, "phandle", 0);
1190 	if (md->md_phandle == 0)
1191 		return;
1192 
1193 	LIST_INSERT_HEAD(&mbox_devices, md, md_list);
1194 }
1195 
1196 struct mbox_channel *
1197 mbox_channel_cells(uint32_t *cells, struct mbox_client *client)
1198 {
1199 	struct mbox_device *md;
1200 	struct mbox_channel *mc;
1201 	uint32_t phandle = cells[0];
1202 	void *cookie;
1203 
1204 	LIST_FOREACH(md, &mbox_devices, md_list) {
1205 		if (md->md_phandle == phandle)
1206 			break;
1207 	}
1208 
1209 	if (md && md->md_channel) {
1210 		cookie = md->md_channel(md->md_cookie, &cells[1], client);
1211 		if (cookie) {
1212 			mc = malloc(sizeof(*mc), M_DEVBUF, M_WAITOK);
1213 			mc->mc_md = md;
1214 			mc->mc_cookie = cookie;
1215 			return mc;
1216 		}
1217 	}
1218 
1219 	return NULL;
1220 }
1221 
1222 uint32_t *
1223 mbox_next_mbox(uint32_t *cells)
1224 {
1225 	uint32_t phandle = cells[0];
1226 	int node, ncells;
1227 
1228 	node = OF_getnodebyphandle(phandle);
1229 	if (node == 0)
1230 		return NULL;
1231 
1232 	ncells = OF_getpropint(node, "#mbox-cells", 0);
1233 	return cells + ncells + 1;
1234 }
1235 
1236 struct mbox_channel *
1237 mbox_channel_idx(int node, int idx, struct mbox_client *client)
1238 {
1239 	struct mbox_channel *mc = NULL;
1240 	uint32_t *mboxes;
1241 	uint32_t *mbox;
1242 	int len;
1243 
1244 	len = OF_getproplen(node, "mboxes");
1245 	if (len <= 0)
1246 		return NULL;
1247 
1248 	mboxes = malloc(len, M_TEMP, M_WAITOK);
1249 	OF_getpropintarray(node, "mboxes", mboxes, len);
1250 
1251 	mbox = mboxes;
1252 	while (mbox && mbox < mboxes + (len / sizeof(uint32_t))) {
1253 		if (idx == 0) {
1254 			mc = mbox_channel_cells(mbox, client);
1255 			break;
1256 		}
1257 		mbox = mbox_next_mbox(mbox);
1258 		idx--;
1259 	}
1260 
1261 	free(mboxes, M_TEMP, len);
1262 	return mc;
1263 }
1264 
1265 struct mbox_channel *
1266 mbox_channel(int node, const char *name, struct mbox_client *client)
1267 {
1268 	int idx;
1269 
1270 	idx = OF_getindex(node, name, "mbox-names");
1271 	if (idx == -1)
1272 		return NULL;
1273 
1274 	return mbox_channel_idx(node, idx, client);
1275 }
1276 
1277 int
1278 mbox_send(struct mbox_channel *mc, const void *data, size_t len)
1279 {
1280 	struct mbox_device *md = mc->mc_md;
1281 
1282 	if (md->md_send)
1283 		return md->md_send(mc->mc_cookie, data, len);
1284 
1285 	return ENXIO;
1286 }
1287 
1288 int
1289 mbox_recv(struct mbox_channel *mc, void *data, size_t len)
1290 {
1291 	struct mbox_device *md = mc->mc_md;
1292 
1293 	if (md->md_recv)
1294 		return md->md_recv(mc->mc_cookie, data, len);
1295 
1296 	return ENXIO;
1297 }
1298