xref: /openbsd-src/sys/dev/fdt/qcpas.c (revision d5abdd01d7a5f24fb6f9b0aab446ef59a9e9067a)
1 /*	$OpenBSD: qcpas.c,v 1.1 2023/06/10 18:31:38 patrick Exp $	*/
2 /*
3  * Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/malloc.h>
22 #include <sys/atomic.h>
23 #include <sys/exec_elf.h>
24 #include <sys/task.h>
25 
26 #include <machine/apmvar.h>
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 #include <uvm/uvm_extern.h>
30 
31 #include <dev/ofw/openfirm.h>
32 #include <dev/ofw/ofw_clock.h>
33 #include <dev/ofw/ofw_misc.h>
34 #include <dev/ofw/ofw_power.h>
35 #include <dev/ofw/fdt.h>
36 
37 #include "apm.h"
38 
39 #define MDT_TYPE_MASK				(7 << 24)
40 #define MDT_TYPE_HASH				(2 << 24)
41 #define MDT_RELOCATABLE				(1 << 27)
42 
43 #define HREAD4(sc, reg)							\
44 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
45 #define HWRITE4(sc, reg, val)						\
46 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
47 
48 struct qcpas_dmamem {
49 	bus_dmamap_t		tdm_map;
50 	bus_dma_segment_t	tdm_seg;
51 	size_t			tdm_size;
52 	caddr_t			tdm_kva;
53 };
54 #define QCPAS_DMA_MAP(_tdm)	((_tdm)->tdm_map)
55 #define QCPAS_DMA_LEN(_tdm)	((_tdm)->tdm_size)
56 #define QCPAS_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
57 #define QCPAS_DMA_KVA(_tdm)	((void *)(_tdm)->tdm_kva)
58 
59 struct qcpas_softc {
60 	struct device		sc_dev;
61 	bus_space_tag_t		sc_iot;
62 	bus_space_handle_t	sc_ioh;
63 	bus_dma_tag_t		sc_dmat;
64 	int			sc_node;
65 
66 	void			*sc_ih[6];
67 
68 	paddr_t			sc_mem_phys;
69 	size_t			sc_mem_size;
70 	void			*sc_mem_region;
71 	vaddr_t			sc_mem_reloc;
72 
73 	uint32_t		sc_pas_id;
74 	char			*sc_load_state;
75 
76 	struct qcpas_dmamem	*sc_metadata;
77 
78 	/* GLINK */
79 	volatile uint32_t	*sc_tx_tail;
80 	volatile uint32_t	*sc_tx_head;
81 	volatile uint32_t	*sc_rx_tail;
82 	volatile uint32_t	*sc_rx_head;
83 
84 	uint32_t		sc_tx_off;
85 	uint32_t		sc_rx_off;
86 
87 	uint8_t			*sc_tx_fifo;
88 	int			sc_tx_fifolen;
89 	uint8_t			*sc_rx_fifo;
90 	int			sc_rx_fifolen;
91 	void			*sc_glink_ih;
92 
93 	struct mbox_channel	*sc_mc;
94 
95 	struct task		sc_glink_rx;
96 	uint32_t		sc_glink_max_channel;
97 	TAILQ_HEAD(,qcpas_glink_channel) sc_glink_channels;
98 };
99 
100 int	qcpas_match(struct device *, void *, void *);
101 void	qcpas_attach(struct device *, struct device *, void *);
102 
103 const struct cfattach qcpas_ca = {
104 	sizeof (struct qcpas_softc), qcpas_match, qcpas_attach
105 };
106 
107 struct cfdriver qcpas_cd = {
108 	NULL, "qcpas", DV_DULL
109 };
110 
111 void	qcpas_mountroot(struct device *);
112 int	qcpas_map_memory(struct qcpas_softc *);
113 int	qcpas_mdt_init(struct qcpas_softc *, u_char *, size_t);
114 void	qcpas_glink_attach(struct qcpas_softc *, int);
115 
116 struct qcpas_dmamem *
117 	qcpas_dmamem_alloc(struct qcpas_softc *, bus_size_t, bus_size_t);
118 void	qcpas_dmamem_free(struct qcpas_softc *, struct qcpas_dmamem *);
119 
120 void	qcpas_intr_establish(struct qcpas_softc *, int, char *, void *);
121 int	qcpas_intr_wdog(void *);
122 int	qcpas_intr_fatal(void *);
123 int	qcpas_intr_ready(void *);
124 int	qcpas_intr_handover(void *);
125 int	qcpas_intr_stop_ack(void *);
126 int	qcpas_intr_shutdown_ack(void *);
127 
128 int
129 qcpas_match(struct device *parent, void *match, void *aux)
130 {
131 	struct fdt_attach_args *faa = aux;
132 
133 	return OF_is_compatible(faa->fa_node, "qcom,sc8280xp-adsp-pas");
134 }
135 
136 void
137 qcpas_attach(struct device *parent, struct device *self, void *aux)
138 {
139 	struct qcpas_softc *sc = (struct qcpas_softc *)self;
140 	struct fdt_attach_args *faa = aux;
141 
142 	if (faa->fa_nreg < 1) {
143 		printf(": no registers\n");
144 		return;
145 	}
146 
147 	sc->sc_iot = faa->fa_iot;
148 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
149 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
150 		printf(": can't map registers\n");
151 		return;
152 	}
153 	sc->sc_dmat = faa->fa_dmat;
154 	sc->sc_node = faa->fa_node;
155 
156 	if (OF_is_compatible(faa->fa_node, "qcom,sc8280xp-adsp-pas")) {
157 		sc->sc_pas_id = 1;
158 		sc->sc_load_state = "adsp";
159 	}
160 	if (OF_is_compatible(faa->fa_node, "qcom,sc8280xp-nsp0-pas")) {
161 		sc->sc_pas_id = 18;
162 	}
163 	if (OF_is_compatible(faa->fa_node, "qcom,sc8280xp-nsp1-pas")) {
164 		sc->sc_pas_id = 30;
165 	}
166 
167 	qcpas_intr_establish(sc, 0, "wdog", qcpas_intr_wdog);
168 	qcpas_intr_establish(sc, 1, "fatal", qcpas_intr_fatal);
169 	qcpas_intr_establish(sc, 2, "ready", qcpas_intr_ready);
170 	qcpas_intr_establish(sc, 3, "handover", qcpas_intr_handover);
171 	qcpas_intr_establish(sc, 4, "stop-ack", qcpas_intr_stop_ack);
172 	qcpas_intr_establish(sc, 5, "shutdown-ack", qcpas_intr_shutdown_ack);
173 
174 	printf("\n");
175 
176 	config_mountroot(self, qcpas_mountroot);
177 }
178 
179 extern int qcaoss_send(char *, size_t);
180 
181 void
182 qcpas_mountroot(struct device *self)
183 {
184 	struct qcpas_softc *sc = (struct qcpas_softc *)self;
185 	char fwname[64];
186 	size_t fwlen;
187 	u_char *fw;
188 	int node, ret;
189 
190 	if (qcpas_map_memory(sc) != 0)
191 		return;
192 
193 	if (OF_getproplen(sc->sc_node, "firmware-name") <= 0)
194 		return;
195 	OF_getprop(sc->sc_node, "firmware-name", fwname, sizeof(fwname));
196 	fwname[sizeof(fwname) - 1] = '\0';
197 
198 	if (loadfirmware(fwname, &fw, &fwlen) != 0) {
199 		printf("%s: failed to load %s\n",
200 		    sc->sc_dev.dv_xname, fwname);
201 		return;
202 	}
203 
204 	if (sc->sc_load_state) {
205 		char buf[64];
206 		snprintf(buf, sizeof(buf),
207 		    "{class: image, res: load_state, name: %s, val: on}",
208 		    sc->sc_load_state);
209 		ret = qcaoss_send(buf, sizeof(buf));
210 		if (ret != 0) {
211 			printf("%s: failed to toggle load state\n",
212 			    sc->sc_dev.dv_xname);
213 			return;
214 		}
215 	}
216 
217 	power_domain_enable_all(sc->sc_node);
218 	clock_enable(sc->sc_node, "xo");
219 
220 	ret = qcpas_mdt_init(sc, fw, fwlen);
221 	free(fw, M_DEVBUF, fwlen);
222 	if (ret != 0) {
223 		printf("%s: failed to boot coprocessor\n",
224 		    sc->sc_dev.dv_xname);
225 		return;
226 	}
227 
228 	node = OF_getnodebyname(sc->sc_node, "glink-edge");
229 	if (node)
230 		qcpas_glink_attach(sc, node);
231 }
232 
233 int
234 qcpas_map_memory(struct qcpas_softc *sc)
235 {
236 	uint32_t phandle, reg[4];
237 	size_t off;
238 	int node;
239 
240 	phandle = OF_getpropint(sc->sc_node, "memory-region", 0);
241 	if (phandle == 0)
242 		return EINVAL;
243 	node = OF_getnodebyphandle(phandle);
244 	if (node == 0)
245 		return EINVAL;
246 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
247 		return EINVAL;
248 
249 	sc->sc_mem_phys = (uint64_t)reg[0] << 32 | reg[1];
250 	KASSERT((sc->sc_mem_phys & PAGE_MASK) == 0);
251 	sc->sc_mem_size = (uint64_t)reg[2] << 32 | reg[3];
252 	KASSERT((sc->sc_mem_size & PAGE_MASK) == 0);
253 
254 	sc->sc_mem_region = km_alloc(sc->sc_mem_size, &kv_any, &kp_none,
255 	    &kd_nowait);
256 	if (!sc->sc_mem_region)
257 		return ENOMEM;
258 
259 	for (off = 0; off < sc->sc_mem_size; off += PAGE_SIZE) {
260 		pmap_kenter_cache((vaddr_t)sc->sc_mem_region + off,
261 		    sc->sc_mem_phys + off, PROT_READ | PROT_WRITE,
262 		    PMAP_CACHE_DEV_NGNRNE);
263 	}
264 
265 	return 0;
266 }
267 
268 extern int qcscm_pas_init_image(uint32_t, paddr_t);
269 extern int qcscm_pas_mem_setup(uint32_t, paddr_t, size_t);
270 extern int qcscm_pas_auth_and_reset(uint32_t);
271 
272 int
273 qcpas_mdt_init(struct qcpas_softc *sc, u_char *fw, size_t fwlen)
274 {
275 	Elf32_Ehdr *ehdr;
276 	Elf32_Phdr *phdr;
277 	paddr_t minpa = -1, maxpa = 0;
278 	int i, hashseg = 0, relocate = 0;
279 	int error;
280 	ssize_t off;
281 
282 	ehdr = (Elf32_Ehdr *)fw;
283 	phdr = (Elf32_Phdr *)&ehdr[1];
284 
285 	if (ehdr->e_phnum < 2 || phdr[0].p_type == PT_LOAD)
286 		return EINVAL;
287 
288 	for (i = 0; i < ehdr->e_phnum; i++) {
289 		if ((phdr[i].p_flags & MDT_TYPE_MASK) == MDT_TYPE_HASH) {
290 			if (i > 0 && !hashseg)
291 				hashseg = i;
292 			continue;
293 		}
294 		if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
295 			continue;
296 		if (phdr[i].p_flags & MDT_RELOCATABLE)
297 			relocate = 1;
298 		if (phdr[i].p_paddr < minpa)
299 			minpa = phdr[i].p_paddr;
300 		if (phdr[i].p_paddr + phdr[i].p_memsz > maxpa)
301 			maxpa =
302 			    roundup(phdr[i].p_paddr + phdr[i].p_memsz,
303 			    PAGE_SIZE);
304 	}
305 
306 	if (!hashseg)
307 		return EINVAL;
308 
309 	sc->sc_metadata = qcpas_dmamem_alloc(sc, phdr[0].p_filesz +
310 	    phdr[hashseg].p_filesz, PAGE_SIZE);
311 	if (sc->sc_metadata == NULL)
312 		return EINVAL;
313 
314 	memcpy(QCPAS_DMA_KVA(sc->sc_metadata), fw, phdr[0].p_filesz);
315 	if (phdr[0].p_filesz + phdr[hashseg].p_filesz == fwlen) {
316 		memcpy(QCPAS_DMA_KVA(sc->sc_metadata) + phdr[0].p_filesz,
317 		    fw + phdr[0].p_filesz, phdr[hashseg].p_filesz);
318 	} else if (phdr[hashseg].p_offset + phdr[hashseg].p_filesz <= fwlen) {
319 		memcpy(QCPAS_DMA_KVA(sc->sc_metadata) + phdr[0].p_filesz,
320 		    fw + phdr[hashseg].p_offset, phdr[hashseg].p_filesz);
321 	} else {
322 		printf("%s: metadata split segment not supported\n",
323 		    sc->sc_dev.dv_xname);
324 		return EINVAL;
325 	}
326 
327 	membar_producer();
328 
329 	if (qcscm_pas_init_image(sc->sc_pas_id,
330 	    QCPAS_DMA_DVA(sc->sc_metadata)) != 0) {
331 		printf("%s: init image failed\n", sc->sc_dev.dv_xname);
332 		qcpas_dmamem_free(sc, sc->sc_metadata);
333 		return EINVAL;
334 	}
335 
336 	if (qcscm_pas_mem_setup(sc->sc_pas_id,
337 	    sc->sc_mem_phys, maxpa - minpa) != 0) {
338 		printf("%s: mem setup failed\n", sc->sc_dev.dv_xname);
339 		qcpas_dmamem_free(sc, sc->sc_metadata);
340 		return EINVAL;
341 	}
342 
343 	sc->sc_mem_reloc = relocate ? minpa : sc->sc_mem_phys;
344 
345 	for (i = 0; i < ehdr->e_phnum; i++) {
346 		if ((phdr[i].p_flags & MDT_TYPE_MASK) == MDT_TYPE_HASH ||
347 		    phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
348 			continue;
349 		off = phdr[i].p_paddr - sc->sc_mem_reloc;
350 		if (off < 0 || off + phdr[i].p_memsz > sc->sc_mem_size)
351 			return EINVAL;
352 		if (phdr[i].p_filesz > phdr[i].p_memsz)
353 			return EINVAL;
354 
355 		if (phdr[i].p_filesz && phdr[i].p_offset < fwlen &&
356 		    phdr[i].p_offset + phdr[i].p_filesz <= fwlen) {
357 			memcpy(sc->sc_mem_region + off, fw + phdr[i].p_offset,
358 			    phdr[i].p_filesz);
359 		} else if (phdr[i].p_filesz) {
360 			printf("%s: firmware split segment not supported\n",
361 			    sc->sc_dev.dv_xname);
362 			return EINVAL;
363 		}
364 
365 		if (phdr[i].p_memsz > phdr[i].p_filesz)
366 			memset(sc->sc_mem_region + off + phdr[i].p_filesz, 0,
367 			    phdr[i].p_memsz - phdr[i].p_filesz);
368 	}
369 
370 	membar_producer();
371 
372 	if (qcscm_pas_auth_and_reset(sc->sc_pas_id) != 0) {
373 		printf("%s: auth and reset failed\n", sc->sc_dev.dv_xname);
374 		qcpas_dmamem_free(sc, sc->sc_metadata);
375 		return EINVAL;
376 	}
377 
378 	error = tsleep_nsec(sc, PWAIT, "qcpas", SEC_TO_NSEC(5));
379 	if (error) {
380 		printf("%s: failed to receive ready signal\n",
381 		    sc->sc_dev.dv_xname);
382 		return error;
383 	}
384 
385 	/* XXX: free metadata ? */
386 
387 	return 0;
388 }
389 
390 struct qcpas_dmamem *
391 qcpas_dmamem_alloc(struct qcpas_softc *sc, bus_size_t size, bus_size_t align)
392 {
393 	struct qcpas_dmamem *tdm;
394 	int nsegs;
395 
396 	tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
397 	tdm->tdm_size = size;
398 
399 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
400 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
401 		goto tdmfree;
402 
403 	if (bus_dmamem_alloc_range(sc->sc_dmat, size, align, 0,
404 	    &tdm->tdm_seg, 1, &nsegs, BUS_DMA_WAITOK, 0, 0xffffffff) != 0)
405 		goto destroy;
406 
407 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
408 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
409 		goto free;
410 
411 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
412 	    NULL, BUS_DMA_WAITOK) != 0)
413 		goto unmap;
414 
415 	bzero(tdm->tdm_kva, size);
416 
417 	return (tdm);
418 
419 unmap:
420 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
421 free:
422 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
423 destroy:
424 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
425 tdmfree:
426 	free(tdm, M_DEVBUF, 0);
427 
428 	return (NULL);
429 }
430 
431 void
432 qcpas_dmamem_free(struct qcpas_softc *sc, struct qcpas_dmamem *tdm)
433 {
434 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
435 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
436 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
437 	free(tdm, M_DEVBUF, 0);
438 }
439 
440 void
441 qcpas_intr_establish(struct qcpas_softc *sc, int i, char *name, void *handler)
442 {
443 	int idx;
444 
445 	idx = OF_getindex(sc->sc_node, name, "interrupt-names");
446 	if (idx >= 0)
447 		sc->sc_ih[i] =
448 		    fdt_intr_establish_idx(sc->sc_node, idx, IPL_BIO,
449 		    handler, sc, sc->sc_dev.dv_xname);
450 }
451 
452 int
453 qcpas_intr_wdog(void *cookie)
454 {
455 	return 0;
456 }
457 
458 int
459 qcpas_intr_fatal(void *cookie)
460 {
461 	return 0;
462 }
463 
464 int
465 qcpas_intr_ready(void *cookie)
466 {
467 	struct qcpas_softc *sc = cookie;
468 
469 	wakeup(sc);
470 	return 0;
471 }
472 
473 int
474 qcpas_intr_handover(void *cookie)
475 {
476 	return 0;
477 }
478 
479 int
480 qcpas_intr_stop_ack(void *cookie)
481 {
482 	return 0;
483 }
484 
485 int
486 qcpas_intr_shutdown_ack(void *cookie)
487 {
488 	return 0;
489 }
490 
491 /* GLINK */
492 
493 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR	478
494 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0		479
495 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1		480
496 
497 struct glink_msg {
498 	uint16_t cmd;
499 	uint16_t param1;
500 	uint32_t param2;
501 	uint8_t data[];
502 } __packed;
503 
504 struct qcpas_glink_intent_pair {
505 	uint32_t size;
506 	uint32_t iid;
507 } __packed;
508 
509 struct qcpas_glink_intent {
510 	TAILQ_ENTRY(qcpas_glink_intent) it_q;
511 	uint32_t it_id;
512 	uint32_t it_size;
513 	int it_inuse;
514 };
515 
516 struct qcpas_glink_channel {
517 	TAILQ_ENTRY(qcpas_glink_channel) ch_q;
518 	struct qcpas_softc *ch_sc;
519 	struct qcpas_glink_protocol *ch_proto;
520 	uint32_t ch_rcid;
521 	uint32_t ch_lcid;
522 	uint32_t ch_max_intent;
523 	TAILQ_HEAD(,qcpas_glink_intent) ch_l_intents;
524 	TAILQ_HEAD(,qcpas_glink_intent) ch_r_intents;
525 };
526 
527 #define GLINK_CMD_VERSION		0
528 #define GLINK_CMD_VERSION_ACK		1
529 #define  GLINK_VERSION				1
530 #define  GLINK_FEATURE_INTENT_REUSE		(1 << 0)
531 #define GLINK_CMD_OPEN			2
532 #define GLINK_CMD_CLOSE			3
533 #define GLINK_CMD_OPEN_ACK		4
534 #define GLINK_CMD_INTENT		5
535 #define GLINK_CMD_RX_DONE		6
536 #define GLINK_CMD_RX_INTENT_REQ		7
537 #define GLINK_CMD_RX_INTENT_REQ_ACK	8
538 #define GLINK_CMD_TX_DATA		9
539 #define GLINK_CMD_CLOSE_ACK		11
540 #define GLINK_CMD_TX_DATA_CONT		12
541 #define GLINK_CMD_READ_NOTIF		13
542 #define GLINK_CMD_RX_DONE_W_REUSE	14
543 
544 void	qcpas_glink_recv(void *);
545 int	qcpas_glink_intr(void *);
546 
547 void	qcpas_glink_tx(struct qcpas_softc *, uint8_t *, int);
548 void	qcpas_glink_tx_commit(struct qcpas_softc *);
549 void	qcpas_glink_rx(struct qcpas_softc *, uint8_t *, int);
550 void	qcpas_glink_rx_commit(struct qcpas_softc *);
551 
552 void	qcpas_glink_send(void *, void *, int);
553 
554 extern int qcsmem_alloc(int, int, int);
555 extern void *qcsmem_get(int, int, int *);
556 
557 int	qcpas_pmic_rtr_init(void *);
558 int	qcpas_pmic_rtr_recv(void *, uint8_t *, int);
559 int	qcpas_pmic_rtr_apminfo(struct apm_power_info *);
560 
561 struct qcpas_glink_protocol {
562 	char *name;
563 	int (*init)(void *cookie);
564 	int (*recv)(void *cookie, uint8_t *buf, int len);
565 } qcpas_glink_protocols[] = {
566 	{ "PMIC_RTR_ADSP_APPS", qcpas_pmic_rtr_init , qcpas_pmic_rtr_recv },
567 };
568 
569 void
570 qcpas_glink_attach(struct qcpas_softc *sc, int node)
571 {
572 	uint32_t remote;
573 	uint32_t *descs;
574 	int size;
575 
576 	remote = OF_getpropint(node, "qcom,remote-pid", -1);
577 	if (remote == -1)
578 		return;
579 
580 	if (qcsmem_alloc(remote, SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32) != 0 ||
581 	    qcsmem_alloc(remote, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 16384) != 0)
582 		return;
583 
584 	descs = qcsmem_get(remote, SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
585 	if (descs == NULL || size != 32)
586 		return;
587 
588 	sc->sc_tx_tail = &descs[0];
589 	sc->sc_tx_head = &descs[1];
590 	sc->sc_rx_tail = &descs[2];
591 	sc->sc_rx_head = &descs[3];
592 
593 	sc->sc_tx_fifo = qcsmem_get(remote, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
594 	    &sc->sc_tx_fifolen);
595 	if (sc->sc_tx_fifo == NULL)
596 		return;
597 	sc->sc_rx_fifo = qcsmem_get(remote, SMEM_GLINK_NATIVE_XPRT_FIFO_1,
598 	    &sc->sc_rx_fifolen);
599 	if (sc->sc_rx_fifo == NULL)
600 		return;
601 
602 	sc->sc_mc = mbox_channel_idx(node, 0, NULL);
603 	if (sc->sc_mc == NULL)
604 		return;
605 
606 	TAILQ_INIT(&sc->sc_glink_channels);
607 	task_set(&sc->sc_glink_rx, qcpas_glink_recv, sc);
608 
609 	sc->sc_glink_ih = fdt_intr_establish(node, IPL_BIO,
610 	    qcpas_glink_intr, sc, sc->sc_dev.dv_xname);
611 	if (sc->sc_glink_ih == NULL)
612 		return;
613 
614 	/* Expect peer to send initial message */
615 }
616 
617 void
618 qcpas_glink_rx(struct qcpas_softc *sc, uint8_t *buf, int len)
619 {
620 	uint32_t head, tail;
621 	int avail;
622 
623 	head = *sc->sc_rx_head;
624 	tail = *sc->sc_rx_tail + sc->sc_rx_off;
625 	if (tail >= sc->sc_rx_fifolen)
626 		tail -= sc->sc_rx_fifolen;
627 
628 	/* Checked by caller */
629 	KASSERT(head != tail);
630 
631 	if (head >= tail)
632 		avail = head - tail;
633 	else
634 		avail = (sc->sc_rx_fifolen - tail) + head;
635 
636 	/* Dumb, but should do. */
637 	KASSERT(avail >= len);
638 
639 	while (len > 0) {
640 		*buf = sc->sc_rx_fifo[tail];
641 		tail++;
642 		if (tail >= sc->sc_rx_fifolen)
643 			tail -= sc->sc_rx_fifolen;
644 		buf++;
645 		sc->sc_rx_off++;
646 		len--;
647 	}
648 }
649 
650 void
651 qcpas_glink_rx_commit(struct qcpas_softc *sc)
652 {
653 	uint32_t tail;
654 
655 	tail = *sc->sc_rx_tail + roundup(sc->sc_rx_off, 8);
656 	if (tail >= sc->sc_rx_fifolen)
657 		tail -= sc->sc_rx_fifolen;
658 
659 	membar_producer();
660 	*sc->sc_rx_tail = tail;
661 	sc->sc_rx_off = 0;
662 }
663 
664 void
665 qcpas_glink_tx(struct qcpas_softc *sc, uint8_t *buf, int len)
666 {
667 	uint32_t head, tail;
668 	int avail;
669 
670 	head = *sc->sc_tx_head + sc->sc_tx_off;
671 	if (head >= sc->sc_tx_fifolen)
672 		head -= sc->sc_tx_fifolen;
673 	tail = *sc->sc_tx_tail;
674 
675 	if (head < tail)
676 		avail = tail - head;
677 	else
678 		avail = (sc->sc_rx_fifolen - head) + tail;
679 
680 	/* Dumb, but should do. */
681 	KASSERT(avail >= len);
682 
683 	while (len > 0) {
684 		sc->sc_tx_fifo[head] = *buf;
685 		head++;
686 		if (head >= sc->sc_tx_fifolen)
687 			head -= sc->sc_tx_fifolen;
688 		buf++;
689 		sc->sc_tx_off++;
690 		len--;
691 	}
692 }
693 
694 void
695 qcpas_glink_tx_commit(struct qcpas_softc *sc)
696 {
697 	uint32_t head;
698 
699 	head = *sc->sc_tx_head + roundup(sc->sc_tx_off, 8);
700 	if (head >= sc->sc_tx_fifolen)
701 		head -= sc->sc_tx_fifolen;
702 
703 	membar_producer();
704 	*sc->sc_tx_head = head;
705 	sc->sc_tx_off = 0;
706 	mbox_send(sc->sc_mc, NULL, 0);
707 }
708 
709 void
710 qcpas_glink_send(void *cookie, void *buf, int len)
711 {
712 	struct qcpas_glink_channel *ch = cookie;
713 	struct qcpas_softc *sc = ch->ch_sc;
714 	struct qcpas_glink_intent *it;
715 	struct glink_msg msg;
716 	uint32_t chunk_size, left_size;
717 
718 	TAILQ_FOREACH(it, &ch->ch_r_intents, it_q) {
719 		if (!it->it_inuse)
720 			break;
721 		if (it->it_size < len)
722 			continue;
723 	}
724 	if (it == NULL) {
725 		printf("%s: all intents in use\n",
726 		    sc->sc_dev.dv_xname);
727 		return;
728 	}
729 	it->it_inuse = 1;
730 
731 	msg.cmd = GLINK_CMD_TX_DATA;
732 	msg.param1 = ch->ch_lcid;
733 	msg.param2 = it->it_id;
734 
735 	chunk_size = len;
736 	left_size = 0;
737 
738 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
739 	qcpas_glink_tx(sc, (char *)&chunk_size, sizeof(chunk_size));
740 	qcpas_glink_tx(sc, (char *)&left_size, sizeof(left_size));
741 	qcpas_glink_tx(sc, buf, len);
742 	qcpas_glink_tx_commit(sc);
743 }
744 
745 void
746 qcpas_glink_recv_version(struct qcpas_softc *sc, uint32_t version,
747     uint32_t features)
748 {
749 	struct glink_msg msg;
750 
751 	if (version != GLINK_VERSION) {
752 		printf("%s: unsupported glink version %u\n",
753 		    sc->sc_dev.dv_xname, version);
754 		return;
755 	}
756 
757 	msg.cmd = GLINK_CMD_VERSION_ACK;
758 	msg.param1 = GLINK_VERSION;
759 	msg.param2 = features & GLINK_FEATURE_INTENT_REUSE;
760 
761 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
762 	qcpas_glink_tx_commit(sc);
763 }
764 
765 void
766 qcpas_glink_recv_open(struct qcpas_softc *sc, uint32_t rcid, uint32_t namelen)
767 {
768 	struct qcpas_glink_protocol *proto = NULL;
769 	struct qcpas_glink_channel *ch;
770 	struct glink_msg msg;
771 	char *name;
772 	int i, err;
773 
774 	name = malloc(namelen, M_TEMP, M_WAITOK);
775 	qcpas_glink_rx(sc, name, namelen);
776 	qcpas_glink_rx_commit(sc);
777 
778 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
779 		if (ch->ch_rcid == rcid) {
780 			printf("%s: duplicate open for %s\n",
781 			    sc->sc_dev.dv_xname, name);
782 			free(name, M_TEMP, namelen);
783 			return;
784 		}
785 	}
786 
787 	for (i = 0; i < nitems(qcpas_glink_protocols); i++) {
788 		if (strcmp(qcpas_glink_protocols[i].name, name) != 0)
789 			continue;
790 		proto = &qcpas_glink_protocols[i];
791 		break;
792 	}
793 	if (proto == NULL) {
794 		free(name, M_TEMP, namelen);
795 		return;
796 	}
797 
798 	/* Assume we can leave HW dangling if proto init fails */
799 	err = proto->init(NULL);
800 	if (err) {
801 		free(name, M_TEMP, namelen);
802 		return;
803 	}
804 
805 	ch = malloc(sizeof(*ch), M_DEVBUF, M_WAITOK | M_ZERO);
806 	ch->ch_sc = sc;
807 	ch->ch_proto = proto;
808 	ch->ch_rcid = rcid;
809 	ch->ch_lcid = ++sc->sc_glink_max_channel;
810 	TAILQ_INIT(&ch->ch_l_intents);
811 	TAILQ_INIT(&ch->ch_r_intents);
812 	TAILQ_INSERT_TAIL(&sc->sc_glink_channels, ch, ch_q);
813 
814 	msg.cmd = GLINK_CMD_OPEN_ACK;
815 	msg.param1 = ch->ch_rcid;
816 	msg.param2 = 0;
817 
818 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
819 	qcpas_glink_tx_commit(sc);
820 
821 	msg.cmd = GLINK_CMD_OPEN;
822 	msg.param1 = ch->ch_lcid;
823 	msg.param2 = strlen(name) + 1;
824 
825 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
826 	qcpas_glink_tx(sc, name, strlen(name) + 1);
827 	qcpas_glink_tx_commit(sc);
828 
829 	free(name, M_TEMP, namelen);
830 }
831 
832 void
833 qcpas_glink_recv_open_ack(struct qcpas_softc *sc, uint32_t lcid)
834 {
835 	struct qcpas_glink_channel *ch;
836 	struct glink_msg msg;
837 	struct qcpas_glink_intent_pair intent;
838 	int i;
839 
840 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
841 		if (ch->ch_lcid == lcid)
842 			break;
843 	}
844 	if (ch == NULL) {
845 		printf("%s: unknown channel %u for OPEN_ACK\n",
846 		    sc->sc_dev.dv_xname, lcid);
847 		return;
848 	}
849 
850 	/* Respond with default intent now that channel is open */
851 	for (i = 0; i < 5; i++) {
852 		struct qcpas_glink_intent *it;
853 
854 		it = malloc(sizeof(*it), M_DEVBUF, M_WAITOK | M_ZERO);
855 		it->it_id = ++ch->ch_max_intent;
856 		it->it_size = 1024;
857 		TAILQ_INSERT_TAIL(&ch->ch_l_intents, it, it_q);
858 
859 		msg.cmd = GLINK_CMD_INTENT;
860 		msg.param1 = ch->ch_lcid;
861 		msg.param2 = 1;
862 		intent.size = it->it_size;
863 		intent.iid = it->it_id;
864 	}
865 
866 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
867 	qcpas_glink_tx(sc, (char *)&intent, sizeof(intent));
868 	qcpas_glink_tx_commit(sc);
869 }
870 
871 void
872 qcpas_glink_recv_intent(struct qcpas_softc *sc, uint32_t rcid, uint32_t count)
873 {
874 	struct qcpas_glink_intent_pair *intents;
875 	struct qcpas_glink_channel *ch;
876 	struct qcpas_glink_intent *it;
877 	int i;
878 
879 	intents = malloc(sizeof(*intents) * count, M_TEMP, M_WAITOK);
880 	qcpas_glink_rx(sc, (char *)intents, sizeof(*intents) * count);
881 	qcpas_glink_rx_commit(sc);
882 
883 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
884 		if (ch->ch_rcid == rcid)
885 			break;
886 	}
887 	if (ch == NULL) {
888 		printf("%s: unknown channel %u for INTENT\n",
889 		    sc->sc_dev.dv_xname, rcid);
890 		free(intents, M_TEMP, sizeof(*intents) * count);
891 		return;
892 	}
893 
894 	for (i = 0; i < count; i++) {
895 		it = malloc(sizeof(*it), M_DEVBUF, M_WAITOK | M_ZERO);
896 		it->it_id = intents[i].iid;
897 		it->it_size = intents[i].size;
898 		TAILQ_INSERT_TAIL(&ch->ch_r_intents, it, it_q);
899 	}
900 
901 	free(intents, M_TEMP, sizeof(*intents) * count);
902 }
903 
904 void
905 qcpas_glink_recv_tx_data(struct qcpas_softc *sc, uint32_t rcid, uint32_t liid)
906 {
907 	struct qcpas_glink_channel *ch;
908 	struct qcpas_glink_intent *it;
909 	struct glink_msg msg;
910 	uint32_t chunk_size, left_size;
911 	char *buf;
912 
913 	qcpas_glink_rx(sc, (char *)&chunk_size, sizeof(chunk_size));
914 	qcpas_glink_rx(sc, (char *)&left_size, sizeof(left_size));
915 	qcpas_glink_rx_commit(sc);
916 
917 	buf = malloc(chunk_size, M_TEMP, M_WAITOK);
918 	qcpas_glink_rx(sc, buf, chunk_size);
919 	qcpas_glink_rx_commit(sc);
920 
921 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
922 		if (ch->ch_rcid == rcid)
923 			break;
924 	}
925 	if (ch == NULL) {
926 		printf("%s: unknown channel %u for TX_DATA\n",
927 		    sc->sc_dev.dv_xname, rcid);
928 		free(buf, M_TEMP, chunk_size);
929 		return;
930 	}
931 
932 	TAILQ_FOREACH(it, &ch->ch_l_intents, it_q) {
933 		if (it->it_id == liid)
934 			break;
935 	}
936 	if (it == NULL) {
937 		printf("%s: unknown intent %u for TX_DATA\n",
938 		    sc->sc_dev.dv_xname, liid);
939 		free(buf, M_TEMP, chunk_size);
940 		return;
941 	}
942 
943 	/* FIXME: handle message chunking */
944 	KASSERT(left_size == 0);
945 
946 	ch->ch_proto->recv(ch, buf, chunk_size);
947 	free(buf, M_TEMP, chunk_size);
948 
949 	if (!left_size) {
950 		msg.cmd = GLINK_CMD_RX_DONE_W_REUSE;
951 		msg.param1 = ch->ch_lcid;
952 		msg.param2 = it->it_id;
953 
954 		qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
955 		qcpas_glink_tx_commit(sc);
956 	}
957 }
958 
959 void
960 qcpas_glink_recv_rx_done(struct qcpas_softc *sc, uint32_t rcid, uint32_t riid,
961     int reuse)
962 {
963 	struct qcpas_glink_channel *ch;
964 	struct qcpas_glink_intent *it;
965 
966 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
967 		if (ch->ch_rcid == rcid)
968 			break;
969 	}
970 	if (ch == NULL) {
971 		printf("%s: unknown channel %u for RX_DONE\n",
972 		    sc->sc_dev.dv_xname, rcid);
973 		return;
974 	}
975 
976 	TAILQ_FOREACH(it, &ch->ch_r_intents, it_q) {
977 		if (it->it_id == riid)
978 			break;
979 	}
980 	if (it == NULL) {
981 		printf("%s: unknown intent %u for RX_DONE\n",
982 		    sc->sc_dev.dv_xname, riid);
983 		return;
984 	}
985 
986 	/* FIXME: handle non-reuse */
987 	KASSERT(reuse);
988 
989 	KASSERT(it->it_inuse);
990 	it->it_inuse = 0;
991 }
992 
993 void
994 qcpas_glink_recv(void *cookie)
995 {
996 	struct qcpas_softc *sc = cookie;
997 	struct glink_msg msg;
998 
999 	while (*sc->sc_rx_tail != *sc->sc_rx_head) {
1000 		membar_consumer();
1001 		qcpas_glink_rx(sc, (uint8_t *)&msg, sizeof(msg));
1002 		qcpas_glink_rx_commit(sc);
1003 
1004 		switch (msg.cmd) {
1005 		case GLINK_CMD_VERSION:
1006 			qcpas_glink_recv_version(sc, msg.param1, msg.param2);
1007 			break;
1008 		case GLINK_CMD_OPEN:
1009 			qcpas_glink_recv_open(sc, msg.param1, msg.param2);
1010 			break;
1011 		case GLINK_CMD_OPEN_ACK:
1012 			qcpas_glink_recv_open_ack(sc, msg.param1);
1013 			break;
1014 		case GLINK_CMD_INTENT:
1015 			qcpas_glink_recv_intent(sc, msg.param1, msg.param2);
1016 			break;
1017 		case GLINK_CMD_RX_INTENT_REQ:
1018 			/* Nothing to do so far */
1019 			break;
1020 		case GLINK_CMD_TX_DATA:
1021 			qcpas_glink_recv_tx_data(sc, msg.param1, msg.param2);
1022 			break;
1023 		case GLINK_CMD_RX_DONE:
1024 			qcpas_glink_recv_rx_done(sc, msg.param1, msg.param2, 0);
1025 			break;
1026 		case GLINK_CMD_RX_DONE_W_REUSE:
1027 			qcpas_glink_recv_rx_done(sc, msg.param1, msg.param2, 1);
1028 			break;
1029 		default:
1030 			printf("%s: unknown cmd %u\n", __func__, msg.cmd);
1031 			return;
1032 		}
1033 	}
1034 }
1035 
1036 int
1037 qcpas_glink_intr(void *cookie)
1038 {
1039 	struct qcpas_softc *sc = cookie;
1040 
1041 	task_add(systq, &sc->sc_glink_rx);
1042 	return 1;
1043 }
1044 
1045 /* GLINK PMIC Router */
1046 
1047 struct pmic_glink_hdr {
1048 	uint32_t owner;
1049 #define PMIC_GLINK_OWNER_BATTMGR	32778
1050 #define PMIC_GLINK_OWNER_USBC		32779
1051 #define PMIC_GLINK_OWNER_USBC_PAN	32780
1052 	uint32_t type;
1053 #define PMIC_GLINK_TYPE_REQ_RESP	1
1054 #define PMIC_GLINK_TYPE_NOTIFY		2
1055 	uint32_t opcode;
1056 };
1057 
1058 #define BATTMGR_OPCODE_BAT_STATUS		0x1
1059 #define BATTMGR_OPCODR_REQUEST_NOTIFICATION	0x4
1060 #define BATTMGR_OPCODE_NOTIF			0x7
1061 #define BATTMGR_OPCODE_BAT_INFO			0x9
1062 #define BATTMGR_OPCODE_BAT_DISCHARGE_TIME	0xc
1063 #define BATTMGR_OPCODE_BAT_CHARGE_TIME		0xd
1064 
1065 #define BATTMGR_NOTIF_BAT_PROPERTY		0x30
1066 #define BATTMGR_NOTIF_USB_PROPERTY		0x32
1067 #define BATTMGR_NOTIF_WLS_PROPERTY		0x34
1068 #define BATTMGR_NOTIF_BAT_STATUS		0x80
1069 #define BATTMGR_NOTIF_BAT_INFO			0x81
1070 
1071 #define BATTMGR_CHEMISTRY_LEN			4
1072 #define BATTMGR_STRING_LEN			128
1073 
1074 struct battmgr_bat_info {
1075 	uint32_t power_unit;
1076 	uint32_t design_capacity;
1077 	uint32_t last_full_capacity;
1078 	uint32_t battery_tech;
1079 	uint32_t design_voltage;
1080 	uint32_t capacity_low;
1081 	uint32_t capacity_warning;
1082 	uint32_t cycle_count;
1083 	uint32_t accuracy;
1084 	uint32_t max_sample_time_ms;
1085 	uint32_t min_sample_time_ms;
1086 	uint32_t max_average_interval_ms;
1087 	uint32_t min_averae_interval_ms;
1088 	uint32_t capacity_granularity1;
1089 	uint32_t capacity_granularity2;
1090 	uint32_t swappable;
1091 	uint32_t capabilities;
1092 	char model_number[BATTMGR_STRING_LEN];
1093 	char serial_number[BATTMGR_STRING_LEN];
1094 	char battery_type[BATTMGR_STRING_LEN];
1095 	char oem_info[BATTMGR_STRING_LEN];
1096 	char battery_chemistry[BATTMGR_CHEMISTRY_LEN];
1097 	char uid[BATTMGR_STRING_LEN];
1098 	uint32_t critical_bias;
1099 	uint8_t day;
1100 	uint8_t month;
1101 	uint16_t year;
1102 	uint32_t battery_id;
1103 };
1104 
1105 struct battmgr_bat_status {
1106 	uint32_t battery_state;
1107 #define BATTMGR_BAT_STATE_DISCHARGE	(1 << 0)
1108 #define BATTMGR_BAT_STATE_CHARGING	(1 << 1)
1109 #define BATTMGR_BAT_STATE_CRITICAL_LOW	(1 << 2)
1110 	uint32_t capacity;
1111 	uint32_t rate;
1112 	uint32_t battery_voltage;
1113 	uint32_t power_state;
1114 	uint32_t charging_source;
1115 #define BATTMGR_CHARGING_SOURCE_AC		1
1116 #define BATTMGR_CHARGING_SOURCE_USB		2
1117 #define BATTMGR_CHARGING_SOURCE_WIRELESS	3
1118 	uint32_t temperature;
1119 };
1120 
1121 void
1122 qcpas_pmic_rtr_battmgr_req_info(void *cookie)
1123 {
1124 	struct {
1125 		struct pmic_glink_hdr hdr;
1126 		uint32_t battery_id;
1127 	} msg;
1128 
1129 	msg.hdr.owner = PMIC_GLINK_OWNER_BATTMGR;
1130 	msg.hdr.type = PMIC_GLINK_TYPE_REQ_RESP;
1131 	msg.hdr.opcode = BATTMGR_OPCODE_BAT_INFO;
1132 	msg.battery_id = 0;
1133 	qcpas_glink_send(cookie, &msg, sizeof(msg));
1134 }
1135 
1136 void
1137 qcpas_pmic_rtr_battmgr_req_status(void *cookie)
1138 {
1139 	struct {
1140 		struct pmic_glink_hdr hdr;
1141 		uint32_t battery_id;
1142 	} msg;
1143 
1144 	msg.hdr.owner = PMIC_GLINK_OWNER_BATTMGR;
1145 	msg.hdr.type = PMIC_GLINK_TYPE_REQ_RESP;
1146 	msg.hdr.opcode = BATTMGR_OPCODE_BAT_STATUS;
1147 	msg.battery_id = 0;
1148 	qcpas_glink_send(cookie, &msg, sizeof(msg));
1149 }
1150 
1151 #if NAPM > 0
1152 struct apm_power_info qcpas_pmic_rtr_apm_power_info;
1153 uint32_t qcpas_pmic_rtr_last_full_capacity;
1154 #endif
1155 
1156 int
1157 qcpas_pmic_rtr_init(void *cookie)
1158 {
1159 #if NAPM > 0
1160 	struct apm_power_info *info;
1161 
1162 	info = &qcpas_pmic_rtr_apm_power_info;
1163 	info->battery_state = APM_BATT_UNKNOWN;
1164 	info->ac_state = APM_AC_UNKNOWN;
1165 	info->battery_life = 0;
1166 	info->minutes_left = -1;
1167 
1168 	apm_setinfohook(qcpas_pmic_rtr_apminfo);
1169 #endif
1170 	return 0;
1171 }
1172 
1173 int
1174 qcpas_pmic_rtr_recv(void *cookie, uint8_t *buf, int len)
1175 {
1176 	struct pmic_glink_hdr hdr;
1177 	uint32_t notification;
1178 
1179 	if (len < sizeof(hdr)) {
1180 		printf("%s: pmic glink message too small\n",
1181 		    __func__);
1182 		return 0;
1183 	}
1184 
1185 	memcpy(&hdr, buf, sizeof(hdr));
1186 
1187 	switch (hdr.owner) {
1188 	case PMIC_GLINK_OWNER_BATTMGR:
1189 		switch (hdr.opcode) {
1190 		case BATTMGR_OPCODE_NOTIF:
1191 			if (len - sizeof(hdr) != sizeof(uint32_t)) {
1192 				printf("%s: invalid battgmr notification\n",
1193 				    __func__);
1194 				return 0;
1195 			}
1196 			memcpy(&notification, buf + sizeof(hdr),
1197 			    sizeof(uint32_t));
1198 			switch (notification) {
1199 			case BATTMGR_NOTIF_BAT_INFO:
1200 				qcpas_pmic_rtr_battmgr_req_info(cookie);
1201 				/* FALLTHROUGH */
1202 			case BATTMGR_NOTIF_BAT_STATUS:
1203 			case BATTMGR_NOTIF_BAT_PROPERTY:
1204 				qcpas_pmic_rtr_battmgr_req_status(cookie);
1205 				break;
1206 			default:
1207 				printf("%s: unknown battmgr notification"
1208 				    " 0x%02x\n", __func__, notification);
1209 				break;
1210 			}
1211 			break;
1212 		case BATTMGR_OPCODE_BAT_INFO: {
1213 			struct battmgr_bat_info *bat;
1214 			if (len - sizeof(hdr) != sizeof(*bat)) {
1215 				printf("%s: invalid battgmr bat info\n",
1216 				    __func__);
1217 				return 0;
1218 			}
1219 			bat = malloc(sizeof(*bat), M_TEMP, M_WAITOK);
1220 			memcpy((void *)bat, buf + sizeof(hdr), sizeof(*bat));
1221 #if NAPM > 0
1222 			qcpas_pmic_rtr_last_full_capacity =
1223 			    bat->last_full_capacity;
1224 #endif
1225 			free(bat, M_TEMP, sizeof(*bat));
1226 			break;
1227 		}
1228 		case BATTMGR_OPCODE_BAT_STATUS: {
1229 			struct battmgr_bat_status *bat;
1230 #if NAPM > 0
1231 			struct apm_power_info *info;
1232 #endif
1233 			if (len - sizeof(hdr) != sizeof(*bat)) {
1234 				printf("%s: invalid battgmr bat status\n",
1235 				    __func__);
1236 				return 0;
1237 			}
1238 #if NAPM > 0
1239 			/* Needs BAT_INFO fist */
1240 			if (!qcpas_pmic_rtr_last_full_capacity)
1241 				return 0;
1242 #endif
1243 			bat = malloc(sizeof(*bat), M_TEMP, M_WAITOK);
1244 			memcpy((void *)bat, buf + sizeof(hdr), sizeof(*bat));
1245 #if NAPM > 0
1246 			info = &qcpas_pmic_rtr_apm_power_info;
1247 			info->battery_life = ((bat->capacity * 100) /
1248 			    qcpas_pmic_rtr_last_full_capacity);
1249 			if (info->battery_life > 50)
1250 				info->battery_state = APM_BATT_HIGH;
1251 			else if (info->battery_life > 25)
1252 				info->battery_state = APM_BATT_LOW;
1253 			else
1254 				info->battery_state = APM_BATT_CRITICAL;
1255 			if (bat->battery_state & BATTMGR_BAT_STATE_CHARGING)
1256 				info->battery_state = APM_BATT_CHARGING;
1257 			else if (bat->battery_state & BATTMGR_BAT_STATE_CRITICAL_LOW)
1258 				info->battery_state = APM_BATT_CRITICAL;
1259 #endif
1260 			free(bat, M_TEMP, sizeof(*bat));
1261 			break;
1262 		}
1263 		default:
1264 			printf("%s: unknown battmgr opcode 0x%02x\n",
1265 			    __func__, hdr.opcode);
1266 			break;
1267 		}
1268 		break;
1269 	default:
1270 		printf("%s: unknown pmic glink owner 0x%04x\n",
1271 		    __func__, hdr.owner);
1272 		break;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 #if NAPM > 0
1279 int
1280 qcpas_pmic_rtr_apminfo(struct apm_power_info *info)
1281 {
1282 	memcpy(info, &qcpas_pmic_rtr_apm_power_info, sizeof(*info));
1283 
1284 	return 0;
1285 }
1286 #endif
1287