xref: /netbsd-src/sys/dev/acpi/qcompas.c (revision c95a3ae2317896c4d793c18beffdb76c65ba8b57)
1 /* $NetBSD: qcompas.c,v 1.1 2024/12/30 12:31:10 jmcneill Exp $ */
2 /*	$OpenBSD: qcpas.c,v 1.8 2024/11/08 21:13:34 landry Exp $	*/
3 /*
4  * Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/kmem.h>
23 #include <sys/mutex.h>
24 #include <sys/condvar.h>
25 #include <sys/callout.h>
26 #include <sys/exec_elf.h>
27 
28 #include <dev/firmload.h>
29 #include <dev/sysmon/sysmonvar.h>
30 #include <dev/sysmon/sysmon_taskq.h>
31 
32 #include <dev/acpi/acpivar.h>
33 #include <dev/acpi/acpi_intr.h>
34 #include <dev/acpi/qcomipcc.h>
35 #include <dev/acpi/qcompep.h>
36 #include <dev/acpi/qcomscm.h>
37 #include <dev/acpi/qcomsmem.h>
38 #include <dev/acpi/qcomsmptp.h>
39 
40 #define DRIVER_NAME		"qcompas"
41 
42 #define MDT_TYPE_MASK				(7 << 24)
43 #define MDT_TYPE_HASH				(2 << 24)
44 #define MDT_RELOCATABLE				(1 << 27)
45 
46 extern struct arm32_bus_dma_tag arm_generic_dma_tag;
47 
48 enum qcpas_batt_sensor {
49 	/* Battery sensors (must be first) */
50 	QCPAS_DVOLTAGE,
51 	QCPAS_VOLTAGE,
52 	QCPAS_DCAPACITY,
53 	QCPAS_LFCCAPACITY,
54 	QCPAS_CAPACITY,
55 	QCPAS_CHARGERATE,
56 	QCPAS_DISCHARGERATE,
57 	QCPAS_CHARGING,
58 	QCPAS_CHARGE_STATE,
59 	QCPAS_DCYCLES,
60 	QCPAS_TEMPERATURE,
61 	/* AC adapter sensors */
62 	QCPAS_ACADAPTER,
63 	/* Total number of sensors */
64 	QCPAS_NUM_SENSORS
65 };
66 
67 struct qcpas_dmamem {
68 	bus_dmamap_t		tdm_map;
69 	bus_dma_segment_t	tdm_seg;
70 	size_t			tdm_size;
71 	void			*tdm_kva;
72 };
73 #define QCPAS_DMA_MAP(_tdm)	((_tdm)->tdm_map)
74 #define QCPAS_DMA_LEN(_tdm)	((_tdm)->tdm_size)
75 #define QCPAS_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
76 #define QCPAS_DMA_KVA(_tdm)	((_tdm)->tdm_kva)
77 
78 struct qcpas_softc {
79 	device_t		sc_dev;
80 	bus_dma_tag_t		sc_dmat;
81 
82 	char			*sc_sub;
83 
84 	void			*sc_ih[5];
85 
86 	kmutex_t		sc_ready_lock;
87 	kcondvar_t		sc_ready_cv;
88 	bool			sc_ready;
89 
90 	paddr_t			sc_mem_phys[2];
91 	size_t			sc_mem_size[2];
92 	uint8_t			*sc_mem_region[2];
93 	vaddr_t			sc_mem_reloc[2];
94 
95 	const char		*sc_fwname;
96 	const char		*sc_dtb_fwname;
97 	uint32_t		sc_pas_id;
98 	uint32_t		sc_dtb_pas_id;
99 	uint32_t		sc_lite_pas_id;
100 	const char		*sc_load_state;
101 	uint32_t		sc_glink_remote_pid;
102 	uint32_t		sc_crash_reason;
103 
104 	struct qcpas_dmamem	*sc_metadata[2];
105 
106 	/* GLINK */
107 	volatile uint32_t	*sc_tx_tail;
108 	volatile uint32_t	*sc_tx_head;
109 	volatile uint32_t	*sc_rx_tail;
110 	volatile uint32_t	*sc_rx_head;
111 
112 	uint32_t		sc_tx_off;
113 	uint32_t		sc_rx_off;
114 
115 	uint8_t			*sc_tx_fifo;
116 	int			sc_tx_fifolen;
117 	uint8_t			*sc_rx_fifo;
118 	int			sc_rx_fifolen;
119 	void			*sc_glink_ih;
120 
121 	void			*sc_ipcc;
122 
123 	uint32_t		sc_glink_max_channel;
124 	TAILQ_HEAD(,qcpas_glink_channel) sc_glink_channels;
125 
126 	uint32_t		sc_warning_capacity;
127 	uint32_t		sc_low_capacity;
128 	uint32_t		sc_power_state;
129 	struct sysmon_envsys	*sc_sme;
130 	envsys_data_t		sc_sens[QCPAS_NUM_SENSORS];
131 	struct sysmon_envsys	*sc_sme_acadapter;
132 	struct sysmon_pswitch	sc_smpsw_acadapter;
133 	callout_t		sc_rtr_refresh;
134 };
135 
136 static int	qcpas_match(device_t, cfdata_t, void *);
137 static void	qcpas_attach(device_t, device_t, void *);
138 
139 CFATTACH_DECL_NEW(qcompas, sizeof(struct qcpas_softc),
140     qcpas_match, qcpas_attach, NULL, NULL);
141 
142 static void	qcpas_mountroot(device_t);
143 static void	qcpas_firmload(void *);
144 static int	qcpas_map_memory(struct qcpas_softc *);
145 static int	qcpas_mdt_init(struct qcpas_softc *, int, u_char *, size_t);
146 static void	qcpas_glink_attach(struct qcpas_softc *);
147 static void	qcpas_glink_recv(void *);
148 static void	qcpas_get_limits(struct sysmon_envsys *, envsys_data_t *,
149 				 sysmon_envsys_lim_t *, uint32_t *);
150 
151 static struct qcpas_dmamem *
152 		qcpas_dmamem_alloc(struct qcpas_softc *, bus_size_t, bus_size_t);
153 static void	qcpas_dmamem_free(struct qcpas_softc *, struct qcpas_dmamem *);
154 
155 static int	qcpas_intr_wdog(void *);
156 static int	qcpas_intr_fatal(void *);
157 static int	qcpas_intr_ready(void *);
158 static int	qcpas_intr_handover(void *);
159 static int	qcpas_intr_stop_ack(void *);
160 
161 struct qcpas_mem_region {
162 	bus_addr_t		start;
163 	bus_size_t		size;
164 };
165 
166 struct qcpas_data {
167 	bus_addr_t		reg_addr;
168 	bus_size_t		reg_size;
169 	uint32_t		pas_id;
170 	uint32_t		dtb_pas_id;
171 	uint32_t		lite_pas_id;
172 	const char		*load_state;
173 	uint32_t		glink_remote_pid;
174 	struct qcpas_mem_region	mem_region[2];
175 	const char		*fwname;
176 	const char		*dtb_fwname;
177 	uint32_t		crash_reason;
178 };
179 
180 static struct qcpas_data qcpas_x1e_data = {
181 	.reg_addr = 0x30000000,
182 	.reg_size = 0x100,
183 	.pas_id = 1,
184 	.dtb_pas_id = 36,
185 	.lite_pas_id = 31,
186 	.load_state = "adsp",
187 	.glink_remote_pid = 2,
188 	.mem_region = {
189 		[0] = { .start = 0x87e00000, .size = 0x3a00000 },
190 		[1] = { .start = 0x8b800000, .size = 0x80000 },
191 	},
192 	.fwname = "qcadsp8380.mbn",
193 	.dtb_fwname = "adsp_dtbs.elf",
194 	.crash_reason = 423,
195 };
196 
197 #define IPCC_CLIENT_LPASS       	3
198 #define IPCC_MPROC_SIGNAL_GLINK_QMP	0
199 
200 static const struct device_compatible_entry compat_data[] = {
201         { .compat = "QCOM0C1B",         .data = &qcpas_x1e_data },
202         DEVICE_COMPAT_EOL
203 };
204 
205 static int
206 qcpas_match(device_t parent, cfdata_t match, void *aux)
207 {
208 	struct acpi_attach_args *aa = aux;
209 
210 	return acpi_compatible_match(aa, compat_data);
211 }
212 
213 static void
214 qcpas_attach(device_t parent, device_t self, void *aux)
215 {
216 	struct qcpas_softc *sc = device_private(self);
217 	struct acpi_attach_args *aa = aux;
218 	const struct qcpas_data *data;
219 	struct acpi_resources res;
220 	ACPI_STATUS rv;
221 	int i;
222 
223 	rv = acpi_resource_parse(self, aa->aa_node->ad_handle, "_CRS", &res,
224 	    &acpi_resource_parse_ops_default);
225 	if (ACPI_FAILURE(rv)) {
226 		return;
227 	}
228 	acpi_resource_cleanup(&res);
229 
230 	data = acpi_compatible_lookup(aa, compat_data)->data;
231 
232 	sc->sc_dev = self;
233 	sc->sc_dmat = &arm_generic_dma_tag;
234 	mutex_init(&sc->sc_ready_lock, MUTEX_DEFAULT, IPL_VM);
235 	cv_init(&sc->sc_ready_cv, "qcpasrdy");
236 
237 	sc->sc_fwname = data->fwname;
238 	sc->sc_dtb_fwname = data->dtb_fwname;
239 	sc->sc_pas_id = data->pas_id;
240 	sc->sc_dtb_pas_id = data->dtb_pas_id;
241 	sc->sc_lite_pas_id = data->lite_pas_id;
242 	sc->sc_load_state = data->load_state;
243 	sc->sc_glink_remote_pid = data->glink_remote_pid;
244 	sc->sc_crash_reason = data->crash_reason;
245 	for (i = 0; i < __arraycount(sc->sc_mem_phys); i++) {
246 		sc->sc_mem_phys[i] = data->mem_region[i].start;
247 		KASSERT((sc->sc_mem_phys[i] & PAGE_MASK) == 0);
248 		sc->sc_mem_size[i] = data->mem_region[i].size;
249 		KASSERT((sc->sc_mem_size[i] & PAGE_MASK) == 0);
250 	}
251 
252 	rv = acpi_eval_string(aa->aa_node->ad_handle, "_SUB", &sc->sc_sub);
253 	if (ACPI_FAILURE(rv)) {
254 		aprint_error_dev(self, "failed to evaluate _SUB: %s\n",
255 		    AcpiFormatException(rv));
256 		return;
257 	}
258 	aprint_verbose_dev(self, "subsystem ID %s\n", sc->sc_sub);
259 
260 	sc->sc_ih[0] = acpi_intr_establish(self,
261 	    (uint64_t)(uintptr_t)aa->aa_node->ad_handle,
262 	    IPL_VM, false, qcpas_intr_wdog, sc, device_xname(self));
263 	sc->sc_ih[1] =
264 	    qcsmptp_intr_establish(0, qcpas_intr_fatal, sc);
265 	sc->sc_ih[2] =
266 	    qcsmptp_intr_establish(1, qcpas_intr_ready, sc);
267 	sc->sc_ih[3] =
268 	    qcsmptp_intr_establish(2, qcpas_intr_handover, sc);
269 	sc->sc_ih[4] =
270 	    qcsmptp_intr_establish(3, qcpas_intr_stop_ack, sc);
271 
272 	if (qcpas_map_memory(sc) != 0)
273 		return;
274 
275 	config_mountroot(self, qcpas_mountroot);
276 }
277 
278 static void
279 qcpas_firmload(void *arg)
280 {
281 	struct qcpas_softc *sc = arg;
282 	firmware_handle_t fwh = NULL, dtb_fwh = NULL;
283 	char fwname[128];
284 	size_t fwlen = 0, dtb_fwlen = 0;
285 	u_char *fw = NULL, *dtb_fw = NULL;
286 	int ret, error;
287 
288 	snprintf(fwname, sizeof(fwname), "%s/%s", sc->sc_sub, sc->sc_fwname);
289 	error = firmware_open(DRIVER_NAME, fwname, &fwh);
290 	if (error == 0) {
291 		fwlen = firmware_get_size(fwh);
292 		fw = fwlen ? firmware_malloc(fwlen) : NULL;
293 		error = fw == NULL ? ENOMEM :
294 			firmware_read(fwh, 0, fw, fwlen);
295 	}
296 	if (error) {
297 		device_printf(sc->sc_dev, "failed to load %s/%s: %d\n",
298 		    DRIVER_NAME, fwname, error);
299 		goto cleanup;
300 	}
301 	aprint_normal_dev(sc->sc_dev, "loading %s/%s\n", DRIVER_NAME, fwname);
302 
303 	if (sc->sc_lite_pas_id) {
304 		if (qcscm_pas_shutdown(sc->sc_lite_pas_id)) {
305 			device_printf(sc->sc_dev,
306 			    "failed to shutdown lite firmware\n");
307 		}
308 	}
309 
310 	if (sc->sc_dtb_pas_id) {
311 		snprintf(fwname, sizeof(fwname), "%s/%s", sc->sc_sub,
312 		    sc->sc_dtb_fwname);
313 		error = firmware_open(DRIVER_NAME, fwname, &dtb_fwh);
314 		if (error == 0) {
315 			dtb_fwlen = firmware_get_size(dtb_fwh);
316 			dtb_fw = dtb_fwlen ? firmware_malloc(dtb_fwlen) : NULL;
317 			error = dtb_fw == NULL ? ENOMEM :
318 				firmware_read(dtb_fwh, 0, dtb_fw, dtb_fwlen);
319 		}
320 		if (error) {
321 			device_printf(sc->sc_dev, "failed to load %s/%s: %d\n",
322 			    DRIVER_NAME, fwname, error);
323 			goto cleanup;
324 		}
325 		aprint_normal_dev(sc->sc_dev, "loading %s/%s\n", DRIVER_NAME, fwname);
326 	}
327 
328 	if (sc->sc_load_state) {
329 		char buf[64];
330 		snprintf(buf, sizeof(buf),
331 		    "{class: image, res: load_state, name: %s, val: on}",
332 		    sc->sc_load_state);
333 		ret = qcaoss_send(buf, sizeof(buf));
334 		if (ret != 0) {
335 			device_printf(sc->sc_dev, "failed to toggle load state\n");
336 			goto cleanup;
337 		}
338 	}
339 
340 	if (sc->sc_dtb_pas_id) {
341 		qcpas_mdt_init(sc, sc->sc_dtb_pas_id, dtb_fw, dtb_fwlen);
342 	}
343 
344 	ret = qcpas_mdt_init(sc, sc->sc_pas_id, fw, fwlen);
345 	if (ret != 0) {
346 		device_printf(sc->sc_dev, "failed to boot coprocessor\n");
347 		goto cleanup;
348 	}
349 
350 	qcpas_glink_attach(sc);
351 
352 	/* Battery sensors */
353 	sc->sc_sme = sysmon_envsys_create();
354 	sc->sc_sme->sme_name = "battery";
355 	sc->sc_sme->sme_cookie = sc;
356 	sc->sc_sme->sme_flags = SME_DISABLE_REFRESH;
357 	sc->sc_sme->sme_class = SME_CLASS_BATTERY;
358 	sc->sc_sme->sme_get_limits = qcpas_get_limits;
359 
360 	/* AC adapter sensors */
361 	sc->sc_sme_acadapter = sysmon_envsys_create();
362 	sc->sc_sme_acadapter->sme_name = "charger";
363 	sc->sc_sme_acadapter->sme_cookie = sc;
364 	sc->sc_sme_acadapter->sme_flags = SME_DISABLE_REFRESH;
365 	sc->sc_sme_acadapter->sme_class = SME_CLASS_ACADAPTER;
366 
367 #define INIT_SENSOR(sme, idx, unit, str)				\
368 	do {								\
369 		strlcpy(sc->sc_sens[idx].desc, str,			\
370 		    sizeof(sc->sc_sens[0].desc));			\
371 		sc->sc_sens[idx].units = unit;				\
372 		sc->sc_sens[idx].state = ENVSYS_SINVALID;		\
373 		sysmon_envsys_sensor_attach(sme,			\
374 		    &sc->sc_sens[idx]);					\
375 	} while (0)
376 
377 	INIT_SENSOR(sc->sc_sme, QCPAS_DVOLTAGE, ENVSYS_SVOLTS_DC, "design voltage");
378 	INIT_SENSOR(sc->sc_sme, QCPAS_VOLTAGE, ENVSYS_SVOLTS_DC, "voltage");
379 	INIT_SENSOR(sc->sc_sme, QCPAS_DCAPACITY, ENVSYS_SWATTHOUR, "design cap");
380 	INIT_SENSOR(sc->sc_sme, QCPAS_LFCCAPACITY, ENVSYS_SWATTHOUR, "last full cap");
381 	INIT_SENSOR(sc->sc_sme, QCPAS_CAPACITY, ENVSYS_SWATTHOUR, "charge");
382 	INIT_SENSOR(sc->sc_sme, QCPAS_CHARGERATE, ENVSYS_SWATTS, "charge rate");
383 	INIT_SENSOR(sc->sc_sme, QCPAS_DISCHARGERATE, ENVSYS_SWATTS, "discharge rate");
384 	INIT_SENSOR(sc->sc_sme, QCPAS_CHARGING, ENVSYS_BATTERY_CHARGE, "charging");
385 	INIT_SENSOR(sc->sc_sme, QCPAS_CHARGE_STATE, ENVSYS_BATTERY_CAPACITY, "charge state");
386 	INIT_SENSOR(sc->sc_sme, QCPAS_DCYCLES, ENVSYS_INTEGER, "discharge cycles");
387 	INIT_SENSOR(sc->sc_sme, QCPAS_TEMPERATURE, ENVSYS_STEMP, "temperature");
388 	INIT_SENSOR(sc->sc_sme_acadapter, QCPAS_ACADAPTER, ENVSYS_INDICATOR, "connected");
389 
390 #undef INIT_SENSOR
391 
392 	sc->sc_sens[QCPAS_CHARGE_STATE].value_cur =
393 	    ENVSYS_BATTERY_CAPACITY_NORMAL;
394 	sc->sc_sens[QCPAS_CAPACITY].flags |=
395 	    ENVSYS_FPERCENT | ENVSYS_FVALID_MAX | ENVSYS_FMONLIMITS;
396 	sc->sc_sens[QCPAS_CHARGE_STATE].flags |=
397 	    ENVSYS_FMONSTCHANGED;
398 
399 	sc->sc_sens[QCPAS_VOLTAGE].flags = ENVSYS_FMONNOTSUPP;
400 	sc->sc_sens[QCPAS_CHARGERATE].flags = ENVSYS_FMONNOTSUPP;
401 	sc->sc_sens[QCPAS_DISCHARGERATE].flags = ENVSYS_FMONNOTSUPP;
402 	sc->sc_sens[QCPAS_DCAPACITY].flags = ENVSYS_FMONNOTSUPP;
403 	sc->sc_sens[QCPAS_LFCCAPACITY].flags = ENVSYS_FMONNOTSUPP;
404 	sc->sc_sens[QCPAS_DVOLTAGE].flags = ENVSYS_FMONNOTSUPP;
405 
406 	sc->sc_sens[QCPAS_CHARGERATE].flags |= ENVSYS_FHAS_ENTROPY;
407 	sc->sc_sens[QCPAS_DISCHARGERATE].flags |= ENVSYS_FHAS_ENTROPY;
408 
409 	sysmon_envsys_register(sc->sc_sme);
410 	sysmon_envsys_register(sc->sc_sme_acadapter);
411 
412 	sc->sc_smpsw_acadapter.smpsw_name = "acpiacad0";
413 	sc->sc_smpsw_acadapter.smpsw_type = PSWITCH_TYPE_ACADAPTER;
414 	sysmon_pswitch_register(&sc->sc_smpsw_acadapter);
415 
416 cleanup:
417 	if (dtb_fw != NULL) {
418 		firmware_free(dtb_fw, dtb_fwlen);
419 	}
420 	if (fw != NULL) {
421 		firmware_free(fw, fwlen);
422 	}
423 	if (dtb_fwh != NULL) {
424 		firmware_close(dtb_fwh);
425 	}
426 	if (fwh != NULL) {
427 		firmware_close(fwh);
428 	}
429 }
430 
431 static void
432 qcpas_mountroot(device_t self)
433 {
434 	struct qcpas_softc *sc = device_private(self);
435 
436 	sysmon_task_queue_sched(0, qcpas_firmload, sc);
437 }
438 
439 static int
440 qcpas_map_memory(struct qcpas_softc *sc)
441 {
442 	int i;
443 
444 	for (i = 0; i < __arraycount(sc->sc_mem_phys); i++) {
445 		paddr_t pa, epa;
446 		vaddr_t va;
447 
448 		if (sc->sc_mem_size[i] == 0)
449 			break;
450 
451 		va = uvm_km_alloc(kernel_map, sc->sc_mem_size[i], 0, UVM_KMF_VAONLY);
452 		KASSERT(va != 0);
453 		sc->sc_mem_region[i] = (void *)va;
454 
455 		for (pa = sc->sc_mem_phys[i], epa = sc->sc_mem_phys[i] + sc->sc_mem_size[i];
456 		     pa < epa;
457 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
458 			pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, PMAP_WRITE_COMBINE);
459 		}
460 		pmap_update(pmap_kernel());
461 	}
462 
463 	return 0;
464 }
465 
466 static int
467 qcpas_mdt_init(struct qcpas_softc *sc, int pas_id, u_char *fw, size_t fwlen)
468 {
469 	Elf32_Ehdr *ehdr;
470 	Elf32_Phdr *phdr;
471 	paddr_t minpa = -1, maxpa = 0;
472 	int i, hashseg = 0, relocate = 0;
473 	uint8_t *metadata;
474 	int error;
475 	ssize_t off;
476 	int idx;
477 
478 	if (pas_id == sc->sc_dtb_pas_id)
479 		idx = 1;
480 	else
481 		idx = 0;
482 
483 	ehdr = (Elf32_Ehdr *)fw;
484 	phdr = (Elf32_Phdr *)&ehdr[1];
485 
486 	if (ehdr->e_phnum < 2 || phdr[0].p_type == PT_LOAD)
487 		return EINVAL;
488 
489 	for (i = 0; i < ehdr->e_phnum; i++) {
490 		if ((phdr[i].p_flags & MDT_TYPE_MASK) == MDT_TYPE_HASH) {
491 			if (i > 0 && !hashseg)
492 				hashseg = i;
493 			continue;
494 		}
495 		if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
496 			continue;
497 		if (phdr[i].p_flags & MDT_RELOCATABLE)
498 			relocate = 1;
499 		if (phdr[i].p_paddr < minpa)
500 			minpa = phdr[i].p_paddr;
501 		if (phdr[i].p_paddr + phdr[i].p_memsz > maxpa)
502 			maxpa =
503 			    roundup(phdr[i].p_paddr + phdr[i].p_memsz,
504 			    PAGE_SIZE);
505 	}
506 
507 	if (!hashseg)
508 		return EINVAL;
509 
510 	if (sc->sc_metadata[idx] == NULL) {
511 		sc->sc_metadata[idx] = qcpas_dmamem_alloc(sc, phdr[0].p_filesz +
512 		    phdr[hashseg].p_filesz, PAGE_SIZE);
513 		if (sc->sc_metadata[idx] == NULL) {
514 			return EINVAL;
515 		}
516 	}
517 
518 	metadata = QCPAS_DMA_KVA(sc->sc_metadata[idx]);
519 
520 	memcpy(metadata, fw, phdr[0].p_filesz);
521 	if (phdr[0].p_filesz + phdr[hashseg].p_filesz == fwlen) {
522 		memcpy(metadata + phdr[0].p_filesz,
523 		    fw + phdr[0].p_filesz, phdr[hashseg].p_filesz);
524 	} else if (phdr[hashseg].p_offset + phdr[hashseg].p_filesz <= fwlen) {
525 		memcpy(metadata + phdr[0].p_filesz,
526 		    fw + phdr[hashseg].p_offset, phdr[hashseg].p_filesz);
527 	} else {
528 		device_printf(sc->sc_dev, "metadata split segment not supported\n");
529 		return EINVAL;
530 	}
531 
532 	cpu_drain_writebuf();
533 
534 	error = qcscm_pas_init_image(pas_id,
535 	    QCPAS_DMA_DVA(sc->sc_metadata[idx]));
536 	if (error != 0) {
537 		device_printf(sc->sc_dev, "init image failed: %d\n", error);
538 		qcpas_dmamem_free(sc, sc->sc_metadata[idx]);
539 		return error;
540 	}
541 
542 	if (relocate) {
543 		if (qcscm_pas_mem_setup(pas_id,
544 		    sc->sc_mem_phys[idx], maxpa - minpa) != 0) {
545 			device_printf(sc->sc_dev, "mem setup failed\n");
546 			qcpas_dmamem_free(sc, sc->sc_metadata[idx]);
547 			return EINVAL;
548 		}
549 	}
550 
551 	sc->sc_mem_reloc[idx] = relocate ? minpa : sc->sc_mem_phys[idx];
552 
553 	for (i = 0; i < ehdr->e_phnum; i++) {
554 		if ((phdr[i].p_flags & MDT_TYPE_MASK) == MDT_TYPE_HASH ||
555 		    phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
556 			continue;
557 		off = phdr[i].p_paddr - sc->sc_mem_reloc[idx];
558 		if (off < 0 || off + phdr[i].p_memsz > sc->sc_mem_size[0])
559 			return EINVAL;
560 		if (phdr[i].p_filesz > phdr[i].p_memsz)
561 			return EINVAL;
562 
563 		if (phdr[i].p_filesz && phdr[i].p_offset < fwlen &&
564 		    phdr[i].p_offset + phdr[i].p_filesz <= fwlen) {
565 			memcpy(sc->sc_mem_region[idx] + off,
566 			    fw + phdr[i].p_offset, phdr[i].p_filesz);
567 		} else if (phdr[i].p_filesz) {
568 			device_printf(sc->sc_dev, "firmware split segment not supported\n");
569 			return EINVAL;
570 		}
571 
572 		if (phdr[i].p_memsz > phdr[i].p_filesz)
573 			memset(sc->sc_mem_region[idx] + off + phdr[i].p_filesz,
574 			    0, phdr[i].p_memsz - phdr[i].p_filesz);
575 	}
576 
577 	cpu_drain_writebuf();
578 
579 	if (qcscm_pas_auth_and_reset(pas_id) != 0) {
580 		device_printf(sc->sc_dev, "auth and reset failed\n");
581 		qcpas_dmamem_free(sc, sc->sc_metadata[idx]);
582 		return EINVAL;
583 	}
584 
585 	if (pas_id == sc->sc_dtb_pas_id)
586 		return 0;
587 
588 	mutex_enter(&sc->sc_ready_lock);
589 	while (!sc->sc_ready) {
590 		error = cv_timedwait(&sc->sc_ready_cv, &sc->sc_ready_lock,
591 		    hz * 5);
592 		if (error == EWOULDBLOCK) {
593 			break;
594 		}
595 	}
596 	mutex_exit(&sc->sc_ready_lock);
597 	if (!sc->sc_ready) {
598 		device_printf(sc->sc_dev, "timeout waiting for ready signal\n");
599 		return ETIMEDOUT;
600 	}
601 
602 	/* XXX: free metadata ? */
603 
604 	return 0;
605 }
606 
607 static struct qcpas_dmamem *
608 qcpas_dmamem_alloc(struct qcpas_softc *sc, bus_size_t size, bus_size_t align)
609 {
610 	struct qcpas_dmamem *tdm;
611 	int nsegs;
612 
613 	tdm = kmem_zalloc(sizeof(*tdm), KM_SLEEP);
614 	tdm->tdm_size = size;
615 
616 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
617 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
618 		goto tdmfree;
619 
620 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0,
621 	    &tdm->tdm_seg, 1, &nsegs, BUS_DMA_WAITOK) != 0)
622 		goto destroy;
623 
624 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
625 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_PREFETCHABLE) != 0)
626 		goto free;
627 
628 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
629 	    NULL, BUS_DMA_WAITOK) != 0)
630 		goto unmap;
631 
632 	memset(tdm->tdm_kva, 0, size);
633 
634 	return (tdm);
635 
636 unmap:
637 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
638 free:
639 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
640 destroy:
641 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
642 tdmfree:
643 	kmem_free(tdm, sizeof(*tdm));
644 
645 	return (NULL);
646 }
647 
648 static void
649 qcpas_dmamem_free(struct qcpas_softc *sc, struct qcpas_dmamem *tdm)
650 {
651 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
652 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
653 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
654 	kmem_free(tdm, sizeof(*tdm));
655 }
656 
657 static void
658 qcpas_report_crash(struct qcpas_softc *sc, const char *source)
659 {
660 	char *msg;
661 	int size;
662 
663 	msg = qcsmem_get(-1, sc->sc_crash_reason, &size);
664 	if (msg == NULL || size <= 0) {
665 		device_printf(sc->sc_dev, "%s\n", source);
666 	} else {
667 		device_printf(sc->sc_dev, "%s: \"%s\"\n", source, msg);
668 	}
669 }
670 
671 static int
672 qcpas_intr_wdog(void *cookie)
673 {
674 	struct qcpas_softc *sc = cookie;
675 
676 	qcpas_report_crash(sc, "watchdog");
677 
678 	return 0;
679 }
680 
681 static int
682 qcpas_intr_fatal(void *cookie)
683 {
684 	struct qcpas_softc *sc = cookie;
685 
686 	qcpas_report_crash(sc, "fatal error");
687 
688 	return 0;
689 }
690 
691 static int
692 qcpas_intr_ready(void *cookie)
693 {
694 	struct qcpas_softc *sc = cookie;
695 
696 	aprint_debug_dev(sc->sc_dev, "%s\n", __func__);
697 
698 	mutex_enter(&sc->sc_ready_lock);
699 	sc->sc_ready = true;
700 	cv_broadcast(&sc->sc_ready_cv);
701 	mutex_exit(&sc->sc_ready_lock);
702 
703 	return 0;
704 }
705 
706 static int
707 qcpas_intr_handover(void *cookie)
708 {
709 	struct qcpas_softc *sc = cookie;
710 
711 	aprint_debug_dev(sc->sc_dev, "%s\n", __func__);
712 
713 	return 0;
714 }
715 
716 static int
717 qcpas_intr_stop_ack(void *cookie)
718 {
719 	struct qcpas_softc *sc = cookie;
720 
721 	aprint_debug_dev(sc->sc_dev, "%s\n", __func__);
722 
723 	return 0;
724 }
725 
726 /* GLINK */
727 
728 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR	478
729 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0		479
730 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1		480
731 
732 struct glink_msg {
733 	uint16_t cmd;
734 	uint16_t param1;
735 	uint32_t param2;
736 	uint8_t data[];
737 } __packed;
738 
739 struct qcpas_glink_intent_pair {
740 	uint32_t size;
741 	uint32_t iid;
742 } __packed;
743 
744 struct qcpas_glink_intent {
745 	TAILQ_ENTRY(qcpas_glink_intent) it_q;
746 	uint32_t it_id;
747 	uint32_t it_size;
748 	int it_inuse;
749 };
750 
751 struct qcpas_glink_channel {
752 	TAILQ_ENTRY(qcpas_glink_channel) ch_q;
753 	struct qcpas_softc *ch_sc;
754 	struct qcpas_glink_protocol *ch_proto;
755 	uint32_t ch_rcid;
756 	uint32_t ch_lcid;
757 	uint32_t ch_max_intent;
758 	TAILQ_HEAD(,qcpas_glink_intent) ch_l_intents;
759 	TAILQ_HEAD(,qcpas_glink_intent) ch_r_intents;
760 };
761 
762 #define GLINK_CMD_VERSION		0
763 #define GLINK_CMD_VERSION_ACK		1
764 #define  GLINK_VERSION				1
765 #define  GLINK_FEATURE_INTENT_REUSE		(1 << 0)
766 #define GLINK_CMD_OPEN			2
767 #define GLINK_CMD_CLOSE			3
768 #define GLINK_CMD_OPEN_ACK		4
769 #define GLINK_CMD_INTENT		5
770 #define GLINK_CMD_RX_DONE		6
771 #define GLINK_CMD_RX_INTENT_REQ		7
772 #define GLINK_CMD_RX_INTENT_REQ_ACK	8
773 #define GLINK_CMD_TX_DATA		9
774 #define GLINK_CMD_CLOSE_ACK		11
775 #define GLINK_CMD_TX_DATA_CONT		12
776 #define GLINK_CMD_READ_NOTIF		13
777 #define GLINK_CMD_RX_DONE_W_REUSE	14
778 
779 static int	qcpas_glink_intr(void *);
780 
781 static void	qcpas_glink_tx(struct qcpas_softc *, uint8_t *, int);
782 static void	qcpas_glink_tx_commit(struct qcpas_softc *);
783 static void	qcpas_glink_rx(struct qcpas_softc *, uint8_t *, int);
784 static void	qcpas_glink_rx_commit(struct qcpas_softc *);
785 
786 static void	qcpas_glink_send(void *, void *, int);
787 
788 static int	qcpas_pmic_rtr_init(void *);
789 static int	qcpas_pmic_rtr_recv(void *, uint8_t *, int);
790 
791 struct qcpas_glink_protocol {
792 	const char *name;
793 	int (*init)(void *cookie);
794 	int (*recv)(void *cookie, uint8_t *buf, int len);
795 } qcpas_glink_protocols[] = {
796 	{ "PMIC_RTR_ADSP_APPS", qcpas_pmic_rtr_init , qcpas_pmic_rtr_recv },
797 };
798 
799 static void
800 qcpas_glink_attach(struct qcpas_softc *sc)
801 {
802 	uint32_t remote = sc->sc_glink_remote_pid;
803 	uint32_t *descs;
804 	int size;
805 
806 	if (qcsmem_alloc(remote, SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32) != 0 ||
807 	    qcsmem_alloc(remote, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 16384) != 0)
808 		return;
809 
810 	descs = qcsmem_get(remote, SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
811 	if (descs == NULL || size != 32)
812 		return;
813 
814 	sc->sc_tx_tail = &descs[0];
815 	sc->sc_tx_head = &descs[1];
816 	sc->sc_rx_tail = &descs[2];
817 	sc->sc_rx_head = &descs[3];
818 
819 	sc->sc_tx_fifo = qcsmem_get(remote, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
820 	    &sc->sc_tx_fifolen);
821 	if (sc->sc_tx_fifo == NULL)
822 		return;
823 	sc->sc_rx_fifo = qcsmem_get(remote, SMEM_GLINK_NATIVE_XPRT_FIFO_1,
824 	    &sc->sc_rx_fifolen);
825 	if (sc->sc_rx_fifo == NULL)
826 		return;
827 
828 	sc->sc_ipcc = qcipcc_channel(IPCC_CLIENT_LPASS,
829 	    IPCC_MPROC_SIGNAL_GLINK_QMP);
830 	if (sc->sc_ipcc == NULL)
831 		return;
832 
833 	TAILQ_INIT(&sc->sc_glink_channels);
834 
835 	sc->sc_glink_ih = qcipcc_intr_establish(IPCC_CLIENT_LPASS,
836 	    IPCC_MPROC_SIGNAL_GLINK_QMP, IPL_VM, qcpas_glink_intr, sc);
837 	if (sc->sc_glink_ih == NULL)
838 		return;
839 
840 	/* Expect peer to send initial message */
841 }
842 
843 static void
844 qcpas_glink_rx(struct qcpas_softc *sc, uint8_t *buf, int len)
845 {
846 	uint32_t head, tail;
847 	int avail;
848 
849 	head = *sc->sc_rx_head;
850 	tail = *sc->sc_rx_tail + sc->sc_rx_off;
851 	if (tail >= sc->sc_rx_fifolen)
852 		tail -= sc->sc_rx_fifolen;
853 
854 	/* Checked by caller */
855 	KASSERT(head != tail);
856 
857 	if (head >= tail)
858 		avail = head - tail;
859 	else
860 		avail = (sc->sc_rx_fifolen - tail) + head;
861 
862 	/* Dumb, but should do. */
863 	KASSERT(avail >= len);
864 
865 	while (len > 0) {
866 		*buf = sc->sc_rx_fifo[tail];
867 		tail++;
868 		if (tail >= sc->sc_rx_fifolen)
869 			tail -= sc->sc_rx_fifolen;
870 		buf++;
871 		sc->sc_rx_off++;
872 		len--;
873 	}
874 }
875 
876 static void
877 qcpas_glink_rx_commit(struct qcpas_softc *sc)
878 {
879 	uint32_t tail;
880 
881 	tail = *sc->sc_rx_tail + roundup(sc->sc_rx_off, 8);
882 	if (tail >= sc->sc_rx_fifolen)
883 		tail -= sc->sc_rx_fifolen;
884 
885 	membar_producer();
886 	*sc->sc_rx_tail = tail;
887 	sc->sc_rx_off = 0;
888 }
889 
890 static void
891 qcpas_glink_tx(struct qcpas_softc *sc, uint8_t *buf, int len)
892 {
893 	uint32_t head, tail;
894 	int avail;
895 
896 	head = *sc->sc_tx_head + sc->sc_tx_off;
897 	if (head >= sc->sc_tx_fifolen)
898 		head -= sc->sc_tx_fifolen;
899 	tail = *sc->sc_tx_tail;
900 
901 	if (head < tail)
902 		avail = tail - head;
903 	else
904 		avail = (sc->sc_rx_fifolen - head) + tail;
905 
906 	/* Dumb, but should do. */
907 	KASSERT(avail >= len);
908 
909 	while (len > 0) {
910 		sc->sc_tx_fifo[head] = *buf;
911 		head++;
912 		if (head >= sc->sc_tx_fifolen)
913 			head -= sc->sc_tx_fifolen;
914 		buf++;
915 		sc->sc_tx_off++;
916 		len--;
917 	}
918 }
919 
920 static void
921 qcpas_glink_tx_commit(struct qcpas_softc *sc)
922 {
923 	uint32_t head;
924 
925 	head = *sc->sc_tx_head + roundup(sc->sc_tx_off, 8);
926 	if (head >= sc->sc_tx_fifolen)
927 		head -= sc->sc_tx_fifolen;
928 
929 	membar_producer();
930 	*sc->sc_tx_head = head;
931 	sc->sc_tx_off = 0;
932 	qcipcc_send(sc->sc_ipcc);
933 }
934 
935 static void
936 qcpas_glink_send(void *cookie, void *buf, int len)
937 {
938 	struct qcpas_glink_channel *ch = cookie;
939 	struct qcpas_softc *sc = ch->ch_sc;
940 	struct qcpas_glink_intent *it;
941 	struct glink_msg msg;
942 	uint32_t chunk_size, left_size;
943 
944 	TAILQ_FOREACH(it, &ch->ch_r_intents, it_q) {
945 		if (!it->it_inuse)
946 			break;
947 		if (it->it_size < len)
948 			continue;
949 	}
950 	if (it == NULL) {
951 		device_printf(sc->sc_dev, "all intents in use\n");
952 		return;
953 	}
954 	it->it_inuse = 1;
955 
956 	msg.cmd = GLINK_CMD_TX_DATA;
957 	msg.param1 = ch->ch_lcid;
958 	msg.param2 = it->it_id;
959 
960 	chunk_size = len;
961 	left_size = 0;
962 
963 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
964 	qcpas_glink_tx(sc, (char *)&chunk_size, sizeof(chunk_size));
965 	qcpas_glink_tx(sc, (char *)&left_size, sizeof(left_size));
966 	qcpas_glink_tx(sc, buf, len);
967 	qcpas_glink_tx_commit(sc);
968 }
969 
970 static void
971 qcpas_glink_recv_version(struct qcpas_softc *sc, uint32_t ver,
972     uint32_t features)
973 {
974 	struct glink_msg msg;
975 
976 	if (ver != GLINK_VERSION) {
977 		device_printf(sc->sc_dev,
978 		    "unsupported glink version %u\n", ver);
979 		return;
980 	}
981 
982 	msg.cmd = GLINK_CMD_VERSION_ACK;
983 	msg.param1 = GLINK_VERSION;
984 	msg.param2 = features & GLINK_FEATURE_INTENT_REUSE;
985 
986 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
987 	qcpas_glink_tx_commit(sc);
988 }
989 
990 static void
991 qcpas_glink_recv_open(struct qcpas_softc *sc, uint32_t rcid, uint32_t namelen)
992 {
993 	struct qcpas_glink_protocol *proto = NULL;
994 	struct qcpas_glink_channel *ch;
995 	struct glink_msg msg;
996 	char *name;
997 	int i, err;
998 
999 	name = kmem_zalloc(namelen, KM_SLEEP);
1000 	qcpas_glink_rx(sc, name, namelen);
1001 	qcpas_glink_rx_commit(sc);
1002 
1003 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
1004 		if (ch->ch_rcid == rcid) {
1005 			device_printf(sc->sc_dev, "duplicate open for %s\n",
1006 			    name);
1007 			kmem_free(name, namelen);
1008 			return;
1009 		}
1010 	}
1011 
1012 	for (i = 0; i < __arraycount(qcpas_glink_protocols); i++) {
1013 		if (strcmp(qcpas_glink_protocols[i].name, name) != 0)
1014 			continue;
1015 		proto = &qcpas_glink_protocols[i];
1016 		break;
1017 	}
1018 	if (proto == NULL) {
1019 		kmem_free(name, namelen);
1020 		return;
1021 	}
1022 
1023 	ch = kmem_zalloc(sizeof(*ch), KM_SLEEP);
1024 	ch->ch_sc = sc;
1025 	ch->ch_proto = proto;
1026 	ch->ch_rcid = rcid;
1027 	ch->ch_lcid = ++sc->sc_glink_max_channel;
1028 	TAILQ_INIT(&ch->ch_l_intents);
1029 	TAILQ_INIT(&ch->ch_r_intents);
1030 	TAILQ_INSERT_TAIL(&sc->sc_glink_channels, ch, ch_q);
1031 
1032 	/* Assume we can leave HW dangling if proto init fails */
1033 	err = proto->init(ch);
1034 	if (err) {
1035 		TAILQ_REMOVE(&sc->sc_glink_channels, ch, ch_q);
1036 		kmem_free(ch, sizeof(*ch));
1037 		kmem_free(name, namelen);
1038 		return;
1039 	}
1040 
1041 	msg.cmd = GLINK_CMD_OPEN_ACK;
1042 	msg.param1 = ch->ch_rcid;
1043 	msg.param2 = 0;
1044 
1045 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
1046 	qcpas_glink_tx_commit(sc);
1047 
1048 	msg.cmd = GLINK_CMD_OPEN;
1049 	msg.param1 = ch->ch_lcid;
1050 	msg.param2 = strlen(name) + 1;
1051 
1052 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
1053 	qcpas_glink_tx(sc, name, strlen(name) + 1);
1054 	qcpas_glink_tx_commit(sc);
1055 
1056 	kmem_free(name, namelen);
1057 }
1058 
1059 static void
1060 qcpas_glink_recv_open_ack(struct qcpas_softc *sc, uint32_t lcid)
1061 {
1062 	struct qcpas_glink_channel *ch;
1063 	struct glink_msg msg;
1064 	struct qcpas_glink_intent_pair intent;
1065 	int i;
1066 
1067 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
1068 		if (ch->ch_lcid == lcid)
1069 			break;
1070 	}
1071 	if (ch == NULL) {
1072 		device_printf(sc->sc_dev, "unknown channel %u for OPEN_ACK\n",
1073 		    lcid);
1074 		return;
1075 	}
1076 
1077 	/* Respond with default intent now that channel is open */
1078 	for (i = 0; i < 5; i++) {
1079 		struct qcpas_glink_intent *it;
1080 
1081 		it = kmem_zalloc(sizeof(*it), KM_SLEEP);
1082 		it->it_id = ++ch->ch_max_intent;
1083 		it->it_size = 1024;
1084 		TAILQ_INSERT_TAIL(&ch->ch_l_intents, it, it_q);
1085 
1086 		msg.cmd = GLINK_CMD_INTENT;
1087 		msg.param1 = ch->ch_lcid;
1088 		msg.param2 = 1;
1089 		intent.size = it->it_size;
1090 		intent.iid = it->it_id;
1091 	}
1092 
1093 	qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
1094 	qcpas_glink_tx(sc, (char *)&intent, sizeof(intent));
1095 	qcpas_glink_tx_commit(sc);
1096 }
1097 
1098 static void
1099 qcpas_glink_recv_intent(struct qcpas_softc *sc, uint32_t rcid, uint32_t count)
1100 {
1101 	struct qcpas_glink_intent_pair *intents;
1102 	struct qcpas_glink_channel *ch;
1103 	struct qcpas_glink_intent *it;
1104 	int i;
1105 
1106 	intents = kmem_zalloc(sizeof(*intents) * count, KM_SLEEP);
1107 	qcpas_glink_rx(sc, (char *)intents, sizeof(*intents) * count);
1108 	qcpas_glink_rx_commit(sc);
1109 
1110 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
1111 		if (ch->ch_rcid == rcid)
1112 			break;
1113 	}
1114 	if (ch == NULL) {
1115 		device_printf(sc->sc_dev, "unknown channel %u for INTENT\n",
1116 		    rcid);
1117 		kmem_free(intents, sizeof(*intents) * count);
1118 		return;
1119 	}
1120 
1121 	for (i = 0; i < count; i++) {
1122 		it = kmem_zalloc(sizeof(*it), KM_SLEEP);
1123 		it->it_id = intents[i].iid;
1124 		it->it_size = intents[i].size;
1125 		TAILQ_INSERT_TAIL(&ch->ch_r_intents, it, it_q);
1126 	}
1127 
1128 	kmem_free(intents, sizeof(*intents) * count);
1129 }
1130 
1131 static void
1132 qcpas_glink_recv_tx_data(struct qcpas_softc *sc, uint32_t rcid, uint32_t liid)
1133 {
1134 	struct qcpas_glink_channel *ch;
1135 	struct qcpas_glink_intent *it;
1136 	struct glink_msg msg;
1137 	uint32_t chunk_size, left_size;
1138 	char *buf;
1139 
1140 	qcpas_glink_rx(sc, (char *)&chunk_size, sizeof(chunk_size));
1141 	qcpas_glink_rx(sc, (char *)&left_size, sizeof(left_size));
1142 	qcpas_glink_rx_commit(sc);
1143 
1144 	buf = kmem_zalloc(chunk_size, KM_SLEEP);
1145 	qcpas_glink_rx(sc, buf, chunk_size);
1146 	qcpas_glink_rx_commit(sc);
1147 
1148 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
1149 		if (ch->ch_rcid == rcid)
1150 			break;
1151 	}
1152 	if (ch == NULL) {
1153 		device_printf(sc->sc_dev, "unknown channel %u for TX_DATA\n",
1154 		    rcid);
1155 		kmem_free(buf, chunk_size);
1156 		return;
1157 	}
1158 
1159 	TAILQ_FOREACH(it, &ch->ch_l_intents, it_q) {
1160 		if (it->it_id == liid)
1161 			break;
1162 	}
1163 	if (it == NULL) {
1164 		device_printf(sc->sc_dev, "unknown intent %u for TX_DATA\n",
1165 		    liid);
1166 		kmem_free(buf, chunk_size);
1167 		return;
1168 	}
1169 
1170 	/* FIXME: handle message chunking */
1171 	KASSERT(left_size == 0);
1172 
1173 	ch->ch_proto->recv(ch, buf, chunk_size);
1174 	kmem_free(buf, chunk_size);
1175 
1176 	if (!left_size) {
1177 		msg.cmd = GLINK_CMD_RX_DONE_W_REUSE;
1178 		msg.param1 = ch->ch_lcid;
1179 		msg.param2 = it->it_id;
1180 
1181 		qcpas_glink_tx(sc, (char *)&msg, sizeof(msg));
1182 		qcpas_glink_tx_commit(sc);
1183 	}
1184 }
1185 
1186 static void
1187 qcpas_glink_recv_rx_done(struct qcpas_softc *sc, uint32_t rcid, uint32_t riid,
1188     int reuse)
1189 {
1190 	struct qcpas_glink_channel *ch;
1191 	struct qcpas_glink_intent *it;
1192 
1193 	TAILQ_FOREACH(ch, &sc->sc_glink_channels, ch_q) {
1194 		if (ch->ch_rcid == rcid)
1195 			break;
1196 	}
1197 	if (ch == NULL) {
1198 		device_printf(sc->sc_dev, "unknown channel %u for RX_DONE\n",
1199 		    rcid);
1200 		return;
1201 	}
1202 
1203 	TAILQ_FOREACH(it, &ch->ch_r_intents, it_q) {
1204 		if (it->it_id == riid)
1205 			break;
1206 	}
1207 	if (it == NULL) {
1208 		device_printf(sc->sc_dev, "unknown intent %u for RX_DONE\n",
1209 		    riid);
1210 		return;
1211 	}
1212 
1213 	/* FIXME: handle non-reuse */
1214 	KASSERT(reuse);
1215 
1216 	KASSERT(it->it_inuse);
1217 	it->it_inuse = 0;
1218 }
1219 
1220 static void
1221 qcpas_glink_recv(void *arg)
1222 {
1223 	struct qcpas_softc *sc = arg;
1224 	struct glink_msg msg;
1225 
1226 	while (*sc->sc_rx_tail != *sc->sc_rx_head) {
1227 		membar_consumer();
1228 		qcpas_glink_rx(sc, (uint8_t *)&msg, sizeof(msg));
1229 		qcpas_glink_rx_commit(sc);
1230 
1231 		switch (msg.cmd) {
1232 		case GLINK_CMD_VERSION:
1233 			qcpas_glink_recv_version(sc, msg.param1, msg.param2);
1234 			break;
1235 		case GLINK_CMD_OPEN:
1236 			qcpas_glink_recv_open(sc, msg.param1, msg.param2);
1237 			break;
1238 		case GLINK_CMD_OPEN_ACK:
1239 			qcpas_glink_recv_open_ack(sc, msg.param1);
1240 			break;
1241 		case GLINK_CMD_INTENT:
1242 			qcpas_glink_recv_intent(sc, msg.param1, msg.param2);
1243 			break;
1244 		case GLINK_CMD_RX_INTENT_REQ:
1245 			/* Nothing to do so far */
1246 			break;
1247 		case GLINK_CMD_TX_DATA:
1248 			qcpas_glink_recv_tx_data(sc, msg.param1, msg.param2);
1249 			break;
1250 		case GLINK_CMD_RX_DONE:
1251 			qcpas_glink_recv_rx_done(sc, msg.param1, msg.param2, 0);
1252 			break;
1253 		case GLINK_CMD_RX_DONE_W_REUSE:
1254 			qcpas_glink_recv_rx_done(sc, msg.param1, msg.param2, 1);
1255 			break;
1256 		default:
1257 			device_printf(sc->sc_dev, "unknown cmd %u\n", msg.cmd);
1258 			return;
1259 		}
1260 	}
1261 }
1262 
1263 static int
1264 qcpas_glink_intr(void *cookie)
1265 {
1266 	struct qcpas_softc *sc = cookie;
1267 
1268 	sysmon_task_queue_sched(0, qcpas_glink_recv, sc);
1269 
1270 	return 1;
1271 }
1272 
1273 /* GLINK PMIC Router */
1274 
1275 struct pmic_glink_hdr {
1276 	uint32_t owner;
1277 #define PMIC_GLINK_OWNER_BATTMGR	32778
1278 #define PMIC_GLINK_OWNER_USBC		32779
1279 #define PMIC_GLINK_OWNER_USBC_PAN	32780
1280 	uint32_t type;
1281 #define PMIC_GLINK_TYPE_REQ_RESP	1
1282 #define PMIC_GLINK_TYPE_NOTIFY		2
1283 	uint32_t opcode;
1284 };
1285 
1286 #define BATTMGR_OPCODE_BAT_STATUS		0x1
1287 #define BATTMGR_OPCODR_REQUEST_NOTIFICATION	0x4
1288 #define BATTMGR_OPCODE_NOTIF			0x7
1289 #define BATTMGR_OPCODE_BAT_INFO			0x9
1290 #define BATTMGR_OPCODE_BAT_DISCHARGE_TIME	0xc
1291 #define BATTMGR_OPCODE_BAT_CHARGE_TIME		0xd
1292 
1293 #define BATTMGR_NOTIF_BAT_PROPERTY		0x30
1294 #define BATTMGR_NOTIF_USB_PROPERTY		0x32
1295 #define BATTMGR_NOTIF_WLS_PROPERTY		0x34
1296 #define BATTMGR_NOTIF_BAT_STATUS		0x80
1297 #define BATTMGR_NOTIF_BAT_INFO			0x81
1298 
1299 #define BATTMGR_CHEMISTRY_LEN			4
1300 #define BATTMGR_STRING_LEN			128
1301 
1302 struct battmgr_bat_info {
1303 	uint32_t power_unit;
1304 	uint32_t design_capacity;
1305 	uint32_t last_full_capacity;
1306 	uint32_t battery_tech;
1307 	uint32_t design_voltage;
1308 	uint32_t capacity_low;
1309 	uint32_t capacity_warning;
1310 	uint32_t cycle_count;
1311 	uint32_t accuracy;
1312 	uint32_t max_sample_time_ms;
1313 	uint32_t min_sample_time_ms;
1314 	uint32_t max_average_interval_ms;
1315 	uint32_t min_average_interval_ms;
1316 	uint32_t capacity_granularity1;
1317 	uint32_t capacity_granularity2;
1318 	uint32_t swappable;
1319 	uint32_t capabilities;
1320 	char model_number[BATTMGR_STRING_LEN];
1321 	char serial_number[BATTMGR_STRING_LEN];
1322 	char battery_type[BATTMGR_STRING_LEN];
1323 	char oem_info[BATTMGR_STRING_LEN];
1324 	char battery_chemistry[BATTMGR_CHEMISTRY_LEN];
1325 	char uid[BATTMGR_STRING_LEN];
1326 	uint32_t critical_bias;
1327 	uint8_t day;
1328 	uint8_t month;
1329 	uint16_t year;
1330 	uint32_t battery_id;
1331 };
1332 
1333 struct battmgr_bat_status {
1334 	uint32_t battery_state;
1335 #define BATTMGR_BAT_STATE_DISCHARGE	(1 << 0)
1336 #define BATTMGR_BAT_STATE_CHARGING	(1 << 1)
1337 #define BATTMGR_BAT_STATE_CRITICAL_LOW	(1 << 2)
1338 	uint32_t capacity;
1339 	int32_t rate;
1340 	uint32_t battery_voltage;
1341 	uint32_t power_state;
1342 #define BATTMGR_PWR_STATE_AC_ON			(1 << 0)
1343 	uint32_t charging_source;
1344 #define BATTMGR_CHARGING_SOURCE_AC		1
1345 #define BATTMGR_CHARGING_SOURCE_USB		2
1346 #define BATTMGR_CHARGING_SOURCE_WIRELESS	3
1347 	uint32_t temperature;
1348 };
1349 
1350 static void	qcpas_pmic_rtr_refresh(void *);
1351 static void	qcpas_pmic_rtr_bat_info(struct qcpas_softc *,
1352 		    struct battmgr_bat_info *);
1353 static void	qcpas_pmic_rtr_bat_status(struct qcpas_softc *,
1354 		    struct battmgr_bat_status *);
1355 
1356 static void
1357 qcpas_pmic_rtr_battmgr_req_info(void *cookie)
1358 {
1359 	struct {
1360 		struct pmic_glink_hdr hdr;
1361 		uint32_t battery_id;
1362 	} msg;
1363 
1364 	msg.hdr.owner = PMIC_GLINK_OWNER_BATTMGR;
1365 	msg.hdr.type = PMIC_GLINK_TYPE_REQ_RESP;
1366 	msg.hdr.opcode = BATTMGR_OPCODE_BAT_INFO;
1367 	msg.battery_id = 0;
1368 	qcpas_glink_send(cookie, &msg, sizeof(msg));
1369 }
1370 
1371 static void
1372 qcpas_pmic_rtr_battmgr_req_status(void *cookie)
1373 {
1374 	struct {
1375 		struct pmic_glink_hdr hdr;
1376 		uint32_t battery_id;
1377 	} msg;
1378 
1379 	msg.hdr.owner = PMIC_GLINK_OWNER_BATTMGR;
1380 	msg.hdr.type = PMIC_GLINK_TYPE_REQ_RESP;
1381 	msg.hdr.opcode = BATTMGR_OPCODE_BAT_STATUS;
1382 	msg.battery_id = 0;
1383 	qcpas_glink_send(cookie, &msg, sizeof(msg));
1384 }
1385 
1386 static int
1387 qcpas_pmic_rtr_init(void *cookie)
1388 {
1389 	struct qcpas_glink_channel *ch = cookie;
1390 	struct qcpas_softc *sc = ch->ch_sc;
1391 
1392 	callout_init(&sc->sc_rtr_refresh, 0);
1393 	callout_setfunc(&sc->sc_rtr_refresh, qcpas_pmic_rtr_refresh, ch);
1394 
1395 	callout_schedule(&sc->sc_rtr_refresh, hz * 5);
1396 
1397 	return 0;
1398 }
1399 
1400 static int
1401 qcpas_pmic_rtr_recv(void *cookie, uint8_t *buf, int len)
1402 {
1403 	struct qcpas_glink_channel *ch = cookie;
1404 	struct qcpas_softc *sc = ch->ch_sc;
1405 	struct pmic_glink_hdr hdr;
1406 	uint32_t notification;
1407 
1408 	if (len < sizeof(hdr)) {
1409 		device_printf(sc->sc_dev, "pmic glink message too small\n");
1410 		return 0;
1411 	}
1412 
1413 	memcpy(&hdr, buf, sizeof(hdr));
1414 
1415 	switch (hdr.owner) {
1416 	case PMIC_GLINK_OWNER_BATTMGR:
1417 		switch (hdr.opcode) {
1418 		case BATTMGR_OPCODE_NOTIF:
1419 			if (len - sizeof(hdr) != sizeof(uint32_t)) {
1420 				device_printf(sc->sc_dev,
1421 				    "invalid battgmr notification\n");
1422 				return 0;
1423 			}
1424 			memcpy(&notification, buf + sizeof(hdr),
1425 			    sizeof(uint32_t));
1426 			switch (notification) {
1427 			case BATTMGR_NOTIF_BAT_INFO:
1428 				qcpas_pmic_rtr_battmgr_req_info(cookie);
1429 				/* FALLTHROUGH */
1430 			case BATTMGR_NOTIF_BAT_STATUS:
1431 			case BATTMGR_NOTIF_BAT_PROPERTY:
1432 				qcpas_pmic_rtr_battmgr_req_status(cookie);
1433 				break;
1434 			default:
1435 				aprint_debug_dev(sc->sc_dev,
1436 				    "unknown battmgr notification 0x%02x\n",
1437 				    notification);
1438 				break;
1439 			}
1440 			break;
1441 		case BATTMGR_OPCODE_BAT_INFO: {
1442 			struct battmgr_bat_info *bat;
1443 			if (len - sizeof(hdr) < sizeof(*bat)) {
1444 				device_printf(sc->sc_dev,
1445 				    "invalid battgmr bat info\n");
1446 				return 0;
1447 			}
1448 			bat = kmem_alloc(sizeof(*bat), KM_SLEEP);
1449 			memcpy(bat, buf + sizeof(hdr), sizeof(*bat));
1450 			qcpas_pmic_rtr_bat_info(sc, bat);
1451 			kmem_free(bat, sizeof(*bat));
1452 			break;
1453 		}
1454 		case BATTMGR_OPCODE_BAT_STATUS: {
1455 			struct battmgr_bat_status *bat;
1456 			if (len - sizeof(hdr) != sizeof(*bat)) {
1457 				device_printf(sc->sc_dev,
1458 				    "invalid battgmr bat status\n");
1459 				return 0;
1460 			}
1461 			bat = kmem_alloc(sizeof(*bat), KM_SLEEP);
1462 			memcpy(bat, buf + sizeof(hdr), sizeof(*bat));
1463 			qcpas_pmic_rtr_bat_status(sc, bat);
1464 			kmem_free(bat, sizeof(*bat));
1465 			break;
1466 		}
1467 		default:
1468 			device_printf(sc->sc_dev,
1469 			    "unknown battmgr opcode 0x%02x\n",
1470 			    hdr.opcode);
1471 			break;
1472 		}
1473 		break;
1474 	default:
1475 		device_printf(sc->sc_dev,
1476 		    "unknown pmic glink owner 0x%04x\n",
1477 		    hdr.owner);
1478 		break;
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static void
1485 qcpas_pmic_rtr_refresh(void *arg)
1486 {
1487 	struct qcpas_glink_channel *ch = arg;
1488 	struct qcpas_softc *sc = ch->ch_sc;
1489 
1490 	qcpas_pmic_rtr_battmgr_req_status(ch);
1491 
1492 	callout_schedule(&sc->sc_rtr_refresh, hz * 5);
1493 }
1494 
1495 static void
1496 qcpas_pmic_rtr_bat_info(struct qcpas_softc *sc, struct battmgr_bat_info *bat)
1497 {
1498 	sc->sc_warning_capacity = bat->capacity_warning;
1499 	sc->sc_low_capacity = bat->capacity_low;
1500 
1501 	sc->sc_sens[QCPAS_DCAPACITY].value_cur =
1502 	    bat->design_capacity * 1000;
1503 	sc->sc_sens[QCPAS_DCAPACITY].state = ENVSYS_SVALID;
1504 
1505 	sc->sc_sens[QCPAS_LFCCAPACITY].value_cur =
1506 	    bat->last_full_capacity * 1000;
1507 	sc->sc_sens[QCPAS_LFCCAPACITY].state = ENVSYS_SVALID;
1508 
1509 	sc->sc_sens[QCPAS_DVOLTAGE].value_cur =
1510 	    bat->design_voltage * 1000;
1511 	sc->sc_sens[QCPAS_DVOLTAGE].state = ENVSYS_SVALID;
1512 
1513 	sc->sc_sens[QCPAS_DCYCLES].value_cur =
1514 	    bat->cycle_count;
1515 	sc->sc_sens[QCPAS_DCYCLES].state = ENVSYS_SVALID;
1516 
1517 	sc->sc_sens[QCPAS_CAPACITY].value_max =
1518 	    bat->last_full_capacity * 1000;
1519 	sysmon_envsys_update_limits(sc->sc_sme,
1520 	    &sc->sc_sens[QCPAS_CAPACITY]);
1521 }
1522 
1523 void
1524 qcpas_pmic_rtr_bat_status(struct qcpas_softc *sc,
1525     struct battmgr_bat_status *bat)
1526 {
1527 	sc->sc_sens[QCPAS_CHARGING].value_cur =
1528 	    (bat->battery_state & BATTMGR_BAT_STATE_CHARGING) != 0;
1529 	sc->sc_sens[QCPAS_CHARGING].state = ENVSYS_SVALID;
1530 	if ((bat->battery_state & BATTMGR_BAT_STATE_CHARGING) != 0) {
1531 		sc->sc_sens[QCPAS_CHARGERATE].value_cur =
1532 		    abs(bat->rate) * 1000;
1533 		sc->sc_sens[QCPAS_CHARGERATE].state = ENVSYS_SVALID;
1534 		sc->sc_sens[QCPAS_DISCHARGERATE].state = ENVSYS_SINVALID;
1535 	} else if ((bat->battery_state & BATTMGR_BAT_STATE_DISCHARGE) != 0) {
1536 		sc->sc_sens[QCPAS_CHARGERATE].state = ENVSYS_SINVALID;
1537 		sc->sc_sens[QCPAS_DISCHARGERATE].value_cur =
1538 		    abs(bat->rate) * 1000;
1539 		sc->sc_sens[QCPAS_DISCHARGERATE].state = ENVSYS_SVALID;
1540 	} else {
1541 		sc->sc_sens[QCPAS_DISCHARGERATE].state = ENVSYS_SINVALID;
1542 		sc->sc_sens[QCPAS_CHARGERATE].state = ENVSYS_SINVALID;
1543 	}
1544 
1545 	sc->sc_sens[QCPAS_VOLTAGE].value_cur =
1546 	    bat->battery_voltage * 1000;
1547 	sc->sc_sens[QCPAS_VOLTAGE].state = ENVSYS_SVALID;
1548 
1549 	sc->sc_sens[QCPAS_TEMPERATURE].value_cur =
1550 	    (bat->temperature * 10000) + 273150000;
1551 	sc->sc_sens[QCPAS_TEMPERATURE].state = ENVSYS_SVALID;
1552 
1553 	sc->sc_sens[QCPAS_CAPACITY].value_cur =
1554 	    bat->capacity * 1000;
1555 	sc->sc_sens[QCPAS_CAPACITY].state = ENVSYS_SVALID;
1556 
1557 	sc->sc_sens[QCPAS_CHARGE_STATE].value_cur =
1558 	    ENVSYS_BATTERY_CAPACITY_NORMAL;
1559 	sc->sc_sens[QCPAS_CHARGE_STATE].state = ENVSYS_SVALID;
1560 
1561 	if (bat->capacity < sc->sc_warning_capacity) {
1562 		sc->sc_sens[QCPAS_CAPACITY].state = ENVSYS_SWARNUNDER;
1563 		sc->sc_sens[QCPAS_CHARGE_STATE].value_cur =
1564 		    ENVSYS_BATTERY_CAPACITY_WARNING;
1565 	}
1566 
1567 	if (bat->capacity < sc->sc_low_capacity) {
1568 		sc->sc_sens[QCPAS_CAPACITY].state = ENVSYS_SCRITUNDER;
1569 		sc->sc_sens[QCPAS_CHARGE_STATE].value_cur =
1570 		    ENVSYS_BATTERY_CAPACITY_LOW;
1571 	}
1572 
1573 	if ((bat->battery_state & BATTMGR_BAT_STATE_CRITICAL_LOW) != 0) {
1574 		sc->sc_sens[QCPAS_CAPACITY].state = ENVSYS_SCRITICAL;
1575 		sc->sc_sens[QCPAS_CHARGE_STATE].value_cur =
1576 		    ENVSYS_BATTERY_CAPACITY_CRITICAL;
1577 	}
1578 
1579 	if ((bat->power_state & BATTMGR_PWR_STATE_AC_ON) !=
1580 	    (sc->sc_power_state & BATTMGR_PWR_STATE_AC_ON)) {
1581 		sysmon_pswitch_event(&sc->sc_smpsw_acadapter,
1582 		    (bat->power_state & BATTMGR_PWR_STATE_AC_ON) != 0 ?
1583 		    PSWITCH_EVENT_PRESSED : PSWITCH_EVENT_RELEASED);
1584 
1585 		aprint_debug_dev(sc->sc_dev, "AC adapter %sconnected\n",
1586 		    (bat->power_state & BATTMGR_PWR_STATE_AC_ON) == 0 ?
1587 		    "not " : "");
1588 	}
1589 
1590 	sc->sc_power_state = bat->power_state;
1591 	sc->sc_sens[QCPAS_ACADAPTER].value_cur =
1592 	    (bat->power_state & BATTMGR_PWR_STATE_AC_ON) != 0;
1593 	sc->sc_sens[QCPAS_ACADAPTER].state = ENVSYS_SVALID;
1594 }
1595 
1596 static void
1597 qcpas_get_limits(struct sysmon_envsys *sme, envsys_data_t *edata,
1598     sysmon_envsys_lim_t *limits, uint32_t *props)
1599 {
1600 	struct qcpas_softc *sc = sme->sme_cookie;
1601 
1602 	if (edata->sensor != QCPAS_CAPACITY) {
1603 		return;
1604 	}
1605 
1606 	limits->sel_critmin = sc->sc_low_capacity * 1000;
1607 	limits->sel_warnmin = sc->sc_warning_capacity * 1000;
1608 
1609 	*props |= PROP_BATTCAP | PROP_BATTWARN | PROP_DRIVER_LIMITS;
1610 }
1611