xref: /netbsd-src/sys/dev/acpi/qcomsmem.c (revision c95a3ae2317896c4d793c18beffdb76c65ba8b57)
1 /* $NetBSD: qcomsmem.c,v 1.1 2024/12/30 12:31:10 jmcneill Exp $ */
2 /*	$OpenBSD: qcsmem.c,v 1.1 2023/05/19 21:13:49 patrick Exp $	*/
3 /*
4  * Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/kmem.h>
23 
24 #include <dev/acpi/acpivar.h>
25 #include <dev/acpi/qcomsmem.h>
26 
27 #define QCSMEM_ITEM_FIXED	8
28 #define QCSMEM_ITEM_COUNT	512
29 #define QCSMEM_HOST_COUNT	15
30 
31 struct qcsmem_proc_comm {
32 	uint32_t command;
33 	uint32_t status;
34 	uint32_t params[2];
35 };
36 
37 struct qcsmem_global_entry {
38 	uint32_t allocated;
39 	uint32_t offset;
40 	uint32_t size;
41 	uint32_t aux_base;
42 #define QCSMEM_GLOBAL_ENTRY_AUX_BASE_MASK	0xfffffffc
43 };
44 
45 struct qcsmem_header {
46 	struct qcsmem_proc_comm proc_comm[4];
47 	uint32_t version[32];
48 #define QCSMEM_HEADER_VERSION_MASTER_SBL_IDX	7
49 #define QCSMEM_HEADER_VERSION_GLOBAL_HEAP	11
50 #define QCSMEM_HEADER_VERSION_GLOBAL_PART	12
51 	uint32_t initialized;
52 	uint32_t free_offset;
53 	uint32_t available;
54 	uint32_t reserved;
55 	struct qcsmem_global_entry toc[QCSMEM_ITEM_COUNT];
56 };
57 
58 struct qcsmem_ptable_entry {
59 	uint32_t offset;
60 	uint32_t size;
61 	uint32_t flags;
62 	uint16_t host[2];
63 #define QCSMEM_LOCAL_HOST			0
64 #define QCSMEM_GLOBAL_HOST			0xfffe
65 	uint32_t cacheline;
66 	uint32_t reserved[7];
67 };
68 
69 struct qcsmem_ptable {
70 	uint32_t magic;
71 #define QCSMEM_PTABLE_MAGIC	0x434f5424
72 	uint32_t version;
73 #define QCSMEM_PTABLE_VERSION	1
74 	uint32_t num_entries;
75 	uint32_t reserved[5];
76 	struct qcsmem_ptable_entry entry[];
77 };
78 
79 struct qcsmem_partition_header {
80 	uint32_t magic;
81 #define QCSMEM_PART_HDR_MAGIC	0x54525024
82 	uint16_t host[2];
83 	uint32_t size;
84 	uint32_t offset_free_uncached;
85 	uint32_t offset_free_cached;
86 	uint32_t reserved[3];
87 };
88 
89 struct qcsmem_partition {
90 	struct qcsmem_partition_header *phdr;
91 	size_t cacheline;
92 	size_t size;
93 };
94 
95 struct qcsmem_private_entry {
96 	uint16_t canary;
97 #define QCSMEM_PRIV_ENTRY_CANARY	0xa5a5
98 	uint16_t item;
99 	uint32_t size;
100 	uint16_t padding_data;
101 	uint16_t padding_hdr;
102 	uint32_t reserved;
103 };
104 
105 struct qcsmem_info {
106 	uint32_t magic;
107 #define QCSMEM_INFO_MAGIC	0x49494953
108 	uint32_t size;
109 	uint32_t base_addr;
110 	uint32_t reserved;
111 	uint32_t num_items;
112 };
113 
114 struct qcsmem_softc {
115 	device_t		sc_dev;
116 	bus_space_tag_t		sc_iot;
117 	void			*sc_smem;
118 	bus_space_handle_t	sc_mtx_ioh;
119 
120 	bus_addr_t		sc_aux_base;
121 	bus_size_t		sc_aux_size;
122 
123 	int			sc_item_count;
124 	struct qcsmem_partition	sc_global_partition;
125 	struct qcsmem_partition	sc_partitions[QCSMEM_HOST_COUNT];
126 };
127 
128 #define QCMTX_OFF(idx)		((idx) * 0x1000)
129 #define QCMTX_NUM_LOCKS		32
130 #define QCMTX_APPS_PROC_ID	1
131 
132 #define MTXREAD4(sc, reg)						\
133 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mtx_ioh, (reg))
134 #define MTXWRITE4(sc, reg, val)						\
135 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mtx_ioh, (reg), (val))
136 
137 struct qcsmem_softc *qcsmem_sc;
138 
139 #define QCSMEM_X1E_BASE		0xffe00000
140 #define QCSMEM_X1E_SIZE		0x200000
141 
142 #define QCMTX_X1E_BASE		0x01f40000
143 #define QCMTX_X1E_SIZE		0x20000
144 
145 #define QCSMEM_X1E_LOCK_IDX	3
146 
147 static const struct device_compatible_entry compat_data[] = {
148 	{ .compat = "QCOM0C84" },
149 	DEVICE_COMPAT_EOL
150 };
151 
152 static int	qcsmem_match(device_t, cfdata_t, void *);
153 static void	qcsmem_attach(device_t, device_t, void *);
154 static int	qcmtx_lock(struct qcsmem_softc *, u_int, u_int);
155 static void	qcmtx_unlock(struct qcsmem_softc *, u_int);
156 
157 CFATTACH_DECL_NEW(qcomsmem, sizeof(struct qcsmem_softc),
158     qcsmem_match, qcsmem_attach, NULL, NULL);
159 
160 static int
161 qcsmem_match(device_t parent, cfdata_t match, void *aux)
162 {
163 	struct acpi_attach_args *aa = aux;
164 
165 	return acpi_compatible_match(aa, compat_data);
166 }
167 
168 static void
169 qcsmem_attach(device_t parent, device_t self, void *aux)
170 {
171 	struct qcsmem_softc *sc = device_private(self);
172 	struct acpi_attach_args *aa = aux;
173 	struct qcsmem_header *header;
174 	struct qcsmem_ptable *ptable;
175 	struct qcsmem_ptable_entry *pte;
176 	struct qcsmem_info *info;
177 	struct qcsmem_partition *part;
178 	struct qcsmem_partition_header *phdr;
179 	uintptr_t smem_va;
180 	uint32_t hdr_version;
181 	int i;
182 
183 	sc->sc_dev = self;
184 	sc->sc_iot = aa->aa_memt;
185 	sc->sc_smem = AcpiOsMapMemory(QCSMEM_X1E_BASE, QCSMEM_X1E_SIZE);
186 	KASSERT(sc->sc_smem != NULL);
187 
188 	sc->sc_aux_base = QCSMEM_X1E_BASE;
189 	sc->sc_aux_size = QCSMEM_X1E_SIZE;
190 
191 	if (bus_space_map(sc->sc_iot, QCMTX_X1E_BASE,
192 	    QCMTX_X1E_SIZE, 0, &sc->sc_mtx_ioh)) {
193 		aprint_error(": can't map mutex registers\n");
194 		return;
195 	}
196 
197 	smem_va = (uintptr_t)sc->sc_smem;
198 
199 	ptable = (void *)(smem_va + sc->sc_aux_size - PAGE_SIZE);
200 	if (ptable->magic != QCSMEM_PTABLE_MAGIC ||
201 	    ptable->version != QCSMEM_PTABLE_VERSION) {
202 		aprint_error(": unsupported ptable 0x%x/0x%x\n",
203 		    ptable->magic, ptable->version);
204 		return;
205 	}
206 
207 	header = (void *)smem_va;
208 	hdr_version = header->version[QCSMEM_HEADER_VERSION_MASTER_SBL_IDX] >> 16;
209 	if (hdr_version != QCSMEM_HEADER_VERSION_GLOBAL_PART) {
210 		aprint_error(": unsupported header 0x%x\n", hdr_version);
211 		return;
212 	}
213 
214 	for (i = 0; i < ptable->num_entries; i++) {
215 		pte = &ptable->entry[i];
216 		if (!pte->offset || !pte->size)
217 			continue;
218 		if (pte->host[0] == QCSMEM_GLOBAL_HOST &&
219 		    pte->host[1] == QCSMEM_GLOBAL_HOST)
220 			part = &sc->sc_global_partition;
221 		else if (pte->host[0] == QCSMEM_LOCAL_HOST &&
222 		    pte->host[1] < QCSMEM_HOST_COUNT)
223 			part = &sc->sc_partitions[pte->host[1]];
224 		else if (pte->host[1] == QCSMEM_LOCAL_HOST &&
225 		    pte->host[0] < QCSMEM_HOST_COUNT)
226 			part = &sc->sc_partitions[pte->host[0]];
227 		else
228 			continue;
229 		if (part->phdr != NULL)
230 			continue;
231 		phdr = (void *)(smem_va + pte->offset);
232 		if (phdr->magic != QCSMEM_PART_HDR_MAGIC) {
233 			aprint_error(": unsupported partition 0x%x\n",
234 			    phdr->magic);
235 			return;
236 		}
237 		if (pte->host[0] != phdr->host[0] ||
238 		    pte->host[1] != phdr->host[1]) {
239 			aprint_error(": bad hosts 0x%x/0x%x+0x%x/0x%x\n",
240 			    pte->host[0], phdr->host[0],
241 			    pte->host[1], phdr->host[1]);
242 			return;
243 		}
244 		if (pte->size != phdr->size) {
245 			aprint_error(": bad size 0x%x/0x%x\n",
246 			    pte->size, phdr->size);
247 			return;
248 		}
249 		if (phdr->offset_free_uncached > phdr->size) {
250 			aprint_error(": bad size 0x%x > 0x%x\n",
251 			    phdr->offset_free_uncached, phdr->size);
252 			return;
253 		}
254 		part->phdr = phdr;
255 		part->size = pte->size;
256 		part->cacheline = pte->cacheline;
257 	}
258 	if (sc->sc_global_partition.phdr == NULL) {
259 		aprint_error(": could not find global partition\n");
260 		return;
261 	}
262 
263 	sc->sc_item_count = QCSMEM_ITEM_COUNT;
264 	info = (struct qcsmem_info *)&ptable->entry[ptable->num_entries];
265 	if (info->magic == QCSMEM_INFO_MAGIC)
266 		sc->sc_item_count = info->num_items;
267 
268 	aprint_naive("\n");
269 	aprint_normal("\n");
270 
271 	qcsmem_sc = sc;
272 }
273 
274 static int
275 qcsmem_alloc_private(struct qcsmem_softc *sc, struct qcsmem_partition *part,
276     int item, int size)
277 {
278 	struct qcsmem_private_entry *entry, *last;
279 	struct qcsmem_partition_header *phdr = part->phdr;
280 	uintptr_t phdr_va = (uintptr_t)phdr;
281 
282 	entry = (void *)&phdr[1];
283 	last = (void *)(phdr_va + phdr->offset_free_uncached);
284 
285 	if ((void *)last > (void *)(phdr_va + part->size))
286 		return EINVAL;
287 
288 	while (entry < last) {
289 		if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
290 			device_printf(sc->sc_dev, "invalid canary\n");
291 			return EINVAL;
292 		}
293 
294 		if (entry->item == item)
295 			return 0;
296 
297 		entry = (void *)((uintptr_t)&entry[1] + entry->padding_hdr +
298 		    entry->size);
299 	}
300 
301 	if ((void *)entry > (void *)(phdr_va + part->size))
302 		return EINVAL;
303 
304 	if ((uintptr_t)&entry[1] + roundup(size, 8) >
305 	    phdr_va + phdr->offset_free_cached)
306 		return EINVAL;
307 
308 	entry->canary = QCSMEM_PRIV_ENTRY_CANARY;
309 	entry->item = item;
310 	entry->size = roundup(size, 8);
311 	entry->padding_data = entry->size - size;
312 	entry->padding_hdr = 0;
313 	membar_producer();
314 
315 	phdr->offset_free_uncached += sizeof(*entry) + entry->size;
316 
317 	return 0;
318 }
319 
320 static int
321 qcsmem_alloc_global(struct qcsmem_softc *sc, int item, int size)
322 {
323 	struct qcsmem_header *header;
324 	struct qcsmem_global_entry *entry;
325 
326 	header = (void *)sc->sc_smem;
327 	entry = &header->toc[item];
328 	if (entry->allocated)
329 		return 0;
330 
331 	size = roundup(size, 8);
332 	if (size > header->available)
333 		return EINVAL;
334 
335 	entry->offset = header->free_offset;
336 	entry->size = size;
337 	membar_producer();
338 	entry->allocated = 1;
339 
340 	header->free_offset += size;
341 	header->available -= size;
342 
343 	return 0;
344 }
345 
346 int
347 qcsmem_alloc(int host, int item, int size)
348 {
349 	struct qcsmem_softc *sc = qcsmem_sc;
350 	struct qcsmem_partition *part;
351 	int ret;
352 
353 	if (sc == NULL)
354 		return ENXIO;
355 
356 	if (item < QCSMEM_ITEM_FIXED)
357 		return EPERM;
358 
359 	if (item >= sc->sc_item_count)
360 		return ENXIO;
361 
362 	ret = qcmtx_lock(sc, QCSMEM_X1E_LOCK_IDX, 1000);
363 	if (ret)
364 		return ret;
365 
366 	if (host < QCSMEM_HOST_COUNT &&
367 	    sc->sc_partitions[host].phdr != NULL) {
368 		part = &sc->sc_partitions[host];
369 		ret = qcsmem_alloc_private(sc, part, item, size);
370 	} else if (sc->sc_global_partition.phdr != NULL) {
371 		part = &sc->sc_global_partition;
372 		ret = qcsmem_alloc_private(sc, part, item, size);
373 	} else {
374 		ret = qcsmem_alloc_global(sc, item, size);
375 	}
376 
377 	qcmtx_unlock(sc, QCSMEM_X1E_LOCK_IDX);
378 
379 	return ret;
380 }
381 
382 static void *
383 qcsmem_get_private(struct qcsmem_softc *sc, struct qcsmem_partition *part,
384     int item, int *size)
385 {
386 	struct qcsmem_private_entry *entry, *last;
387 	struct qcsmem_partition_header *phdr = part->phdr;
388 	uintptr_t phdr_va = (uintptr_t)phdr;
389 
390 	entry = (void *)&phdr[1];
391 	last = (void *)(phdr_va + phdr->offset_free_uncached);
392 
393 	while (entry < last) {
394 		if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
395 			device_printf(sc->sc_dev, "invalid canary\n");
396 			return NULL;
397 		}
398 
399 		if (entry->item == item) {
400 			if (size != NULL) {
401 				if (entry->size > part->size ||
402 				    entry->padding_data > entry->size)
403 					return NULL;
404 				*size = entry->size - entry->padding_data;
405 			}
406 
407 			return (void *)((uintptr_t)&entry[1] + entry->padding_hdr);
408 		}
409 
410 		entry = (void *)((uintptr_t)&entry[1] + entry->padding_hdr +
411 		    entry->size);
412 	}
413 
414 	if ((uintptr_t)entry > phdr_va + part->size)
415 		return NULL;
416 
417 	entry = (void *)(phdr_va + phdr->size -
418 	    roundup(sizeof(*entry), part->cacheline));
419 	last = (void *)(phdr_va + phdr->offset_free_cached);
420 
421 	if ((uintptr_t)entry < phdr_va ||
422 	    (uintptr_t)last > phdr_va + part->size)
423 		return NULL;
424 
425 	while (entry > last) {
426 		if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
427 			device_printf(sc->sc_dev, "invalid canary\n");
428 			return NULL;
429 		}
430 
431 		if (entry->item == item) {
432 			if (size != NULL) {
433 				if (entry->size > part->size ||
434 				    entry->padding_data > entry->size)
435 					return NULL;
436 				*size = entry->size - entry->padding_data;
437 			}
438 
439 			return (void *)((uintptr_t)entry - entry->size);
440 		}
441 
442 		entry = (void *)((uintptr_t)entry - entry->size -
443 		    roundup(sizeof(*entry), part->cacheline));
444 	}
445 
446 	if ((uintptr_t)entry < phdr_va)
447 		return NULL;
448 
449 	return NULL;
450 }
451 
452 static void *
453 qcsmem_get_global(struct qcsmem_softc *sc, int item, int *size)
454 {
455 	struct qcsmem_header *header;
456 	struct qcsmem_global_entry *entry;
457 	uint32_t aux_base;
458 
459 	header = (void *)sc->sc_smem;
460 	entry = &header->toc[item];
461 	if (!entry->allocated)
462 		return NULL;
463 
464 	aux_base = entry->aux_base & QCSMEM_GLOBAL_ENTRY_AUX_BASE_MASK;
465 	if (aux_base != 0 && aux_base != sc->sc_aux_base)
466 		return NULL;
467 
468 	if (entry->size + entry->offset > sc->sc_aux_size)
469 		return NULL;
470 
471 	if (size != NULL)
472 		*size = entry->size;
473 
474 	return (void *)((uintptr_t)sc->sc_smem +
475 	    entry->offset);
476 }
477 
478 void *
479 qcsmem_get(int host, int item, int *size)
480 {
481 	struct qcsmem_softc *sc = qcsmem_sc;
482 	struct qcsmem_partition *part;
483 	void *p = NULL;
484 	int ret;
485 
486 	if (sc == NULL)
487 		return NULL;
488 
489 	if (item >= sc->sc_item_count)
490 		return NULL;
491 
492 	ret = qcmtx_lock(sc, QCSMEM_X1E_LOCK_IDX, 1000);
493 	if (ret)
494 		return NULL;
495 
496 	if (host >= 0 &&
497 	    host < QCSMEM_HOST_COUNT &&
498 	    sc->sc_partitions[host].phdr != NULL) {
499 		part = &sc->sc_partitions[host];
500 		p = qcsmem_get_private(sc, part, item, size);
501 	} else if (sc->sc_global_partition.phdr != NULL) {
502 		part = &sc->sc_global_partition;
503 		p = qcsmem_get_private(sc, part, item, size);
504 	} else {
505 		p = qcsmem_get_global(sc, item, size);
506 	}
507 
508 	qcmtx_unlock(sc, QCSMEM_X1E_LOCK_IDX);
509 	return p;
510 }
511 
512 void
513 qcsmem_memset(void *ptr, uint8_t val, size_t len)
514 {
515 	if (len % 8 == 0 && val == 0) {
516 		volatile uint64_t *p = ptr;
517 		size_t n;
518 
519 		for (n = 0; n < len; n += 8) {
520 			p[n] = val;
521 		}
522 	} else {
523 		volatile uint8_t *p = ptr;
524 		size_t n;
525 
526 		for (n = 0; n < len; n++) {
527 			p[n] = val;
528 		}
529 	}
530 }
531 
532 static int
533 qcmtx_dolockunlock(struct qcsmem_softc *sc, u_int idx, int lock)
534 {
535 	if (idx >= QCMTX_NUM_LOCKS)
536 		return ENXIO;
537 
538 	if (lock) {
539 		MTXWRITE4(sc, QCMTX_OFF(idx), QCMTX_APPS_PROC_ID);
540 		if (MTXREAD4(sc, QCMTX_OFF(idx)) !=
541 		    QCMTX_APPS_PROC_ID)
542 			return EAGAIN;
543 		KASSERT(MTXREAD4(sc, QCMTX_OFF(idx)) == QCMTX_APPS_PROC_ID);
544 	} else {
545 		KASSERT(MTXREAD4(sc, QCMTX_OFF(idx)) == QCMTX_APPS_PROC_ID);
546 		MTXWRITE4(sc, QCMTX_OFF(idx), 0);
547 	}
548 
549 	return 0;
550 }
551 
552 static int
553 qcmtx_lock(struct qcsmem_softc *sc, u_int idx, u_int timeout_ms)
554 {
555 	int rv = EINVAL;
556 	u_int n;
557 
558 	for (n = 0; n < timeout_ms; n++) {
559 		rv = qcmtx_dolockunlock(sc, idx, 1);
560 		if (rv != EAGAIN) {
561 			break;
562 		}
563 		delay(1000);
564 	}
565 
566 	return rv;
567 }
568 
569 static void
570 qcmtx_unlock(struct qcsmem_softc *sc, u_int idx)
571 {
572 	qcmtx_dolockunlock(sc, idx, 0);
573 }
574