xref: /netbsd-src/sys/dev/acpi/qcomsmem.c (revision c95a3ae2317896c4d793c18beffdb76c65ba8b57)
1*c95a3ae2Sjmcneill /* $NetBSD: qcomsmem.c,v 1.1 2024/12/30 12:31:10 jmcneill Exp $ */
2*c95a3ae2Sjmcneill /*	$OpenBSD: qcsmem.c,v 1.1 2023/05/19 21:13:49 patrick Exp $	*/
3*c95a3ae2Sjmcneill /*
4*c95a3ae2Sjmcneill  * Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
5*c95a3ae2Sjmcneill  *
6*c95a3ae2Sjmcneill  * Permission to use, copy, modify, and distribute this software for any
7*c95a3ae2Sjmcneill  * purpose with or without fee is hereby granted, provided that the above
8*c95a3ae2Sjmcneill  * copyright notice and this permission notice appear in all copies.
9*c95a3ae2Sjmcneill  *
10*c95a3ae2Sjmcneill  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11*c95a3ae2Sjmcneill  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12*c95a3ae2Sjmcneill  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13*c95a3ae2Sjmcneill  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14*c95a3ae2Sjmcneill  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15*c95a3ae2Sjmcneill  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16*c95a3ae2Sjmcneill  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*c95a3ae2Sjmcneill  */
18*c95a3ae2Sjmcneill 
19*c95a3ae2Sjmcneill #include <sys/param.h>
20*c95a3ae2Sjmcneill #include <sys/systm.h>
21*c95a3ae2Sjmcneill #include <sys/device.h>
22*c95a3ae2Sjmcneill #include <sys/kmem.h>
23*c95a3ae2Sjmcneill 
24*c95a3ae2Sjmcneill #include <dev/acpi/acpivar.h>
25*c95a3ae2Sjmcneill #include <dev/acpi/qcomsmem.h>
26*c95a3ae2Sjmcneill 
27*c95a3ae2Sjmcneill #define QCSMEM_ITEM_FIXED	8
28*c95a3ae2Sjmcneill #define QCSMEM_ITEM_COUNT	512
29*c95a3ae2Sjmcneill #define QCSMEM_HOST_COUNT	15
30*c95a3ae2Sjmcneill 
31*c95a3ae2Sjmcneill struct qcsmem_proc_comm {
32*c95a3ae2Sjmcneill 	uint32_t command;
33*c95a3ae2Sjmcneill 	uint32_t status;
34*c95a3ae2Sjmcneill 	uint32_t params[2];
35*c95a3ae2Sjmcneill };
36*c95a3ae2Sjmcneill 
37*c95a3ae2Sjmcneill struct qcsmem_global_entry {
38*c95a3ae2Sjmcneill 	uint32_t allocated;
39*c95a3ae2Sjmcneill 	uint32_t offset;
40*c95a3ae2Sjmcneill 	uint32_t size;
41*c95a3ae2Sjmcneill 	uint32_t aux_base;
42*c95a3ae2Sjmcneill #define QCSMEM_GLOBAL_ENTRY_AUX_BASE_MASK	0xfffffffc
43*c95a3ae2Sjmcneill };
44*c95a3ae2Sjmcneill 
45*c95a3ae2Sjmcneill struct qcsmem_header {
46*c95a3ae2Sjmcneill 	struct qcsmem_proc_comm proc_comm[4];
47*c95a3ae2Sjmcneill 	uint32_t version[32];
48*c95a3ae2Sjmcneill #define QCSMEM_HEADER_VERSION_MASTER_SBL_IDX	7
49*c95a3ae2Sjmcneill #define QCSMEM_HEADER_VERSION_GLOBAL_HEAP	11
50*c95a3ae2Sjmcneill #define QCSMEM_HEADER_VERSION_GLOBAL_PART	12
51*c95a3ae2Sjmcneill 	uint32_t initialized;
52*c95a3ae2Sjmcneill 	uint32_t free_offset;
53*c95a3ae2Sjmcneill 	uint32_t available;
54*c95a3ae2Sjmcneill 	uint32_t reserved;
55*c95a3ae2Sjmcneill 	struct qcsmem_global_entry toc[QCSMEM_ITEM_COUNT];
56*c95a3ae2Sjmcneill };
57*c95a3ae2Sjmcneill 
58*c95a3ae2Sjmcneill struct qcsmem_ptable_entry {
59*c95a3ae2Sjmcneill 	uint32_t offset;
60*c95a3ae2Sjmcneill 	uint32_t size;
61*c95a3ae2Sjmcneill 	uint32_t flags;
62*c95a3ae2Sjmcneill 	uint16_t host[2];
63*c95a3ae2Sjmcneill #define QCSMEM_LOCAL_HOST			0
64*c95a3ae2Sjmcneill #define QCSMEM_GLOBAL_HOST			0xfffe
65*c95a3ae2Sjmcneill 	uint32_t cacheline;
66*c95a3ae2Sjmcneill 	uint32_t reserved[7];
67*c95a3ae2Sjmcneill };
68*c95a3ae2Sjmcneill 
69*c95a3ae2Sjmcneill struct qcsmem_ptable {
70*c95a3ae2Sjmcneill 	uint32_t magic;
71*c95a3ae2Sjmcneill #define QCSMEM_PTABLE_MAGIC	0x434f5424
72*c95a3ae2Sjmcneill 	uint32_t version;
73*c95a3ae2Sjmcneill #define QCSMEM_PTABLE_VERSION	1
74*c95a3ae2Sjmcneill 	uint32_t num_entries;
75*c95a3ae2Sjmcneill 	uint32_t reserved[5];
76*c95a3ae2Sjmcneill 	struct qcsmem_ptable_entry entry[];
77*c95a3ae2Sjmcneill };
78*c95a3ae2Sjmcneill 
79*c95a3ae2Sjmcneill struct qcsmem_partition_header {
80*c95a3ae2Sjmcneill 	uint32_t magic;
81*c95a3ae2Sjmcneill #define QCSMEM_PART_HDR_MAGIC	0x54525024
82*c95a3ae2Sjmcneill 	uint16_t host[2];
83*c95a3ae2Sjmcneill 	uint32_t size;
84*c95a3ae2Sjmcneill 	uint32_t offset_free_uncached;
85*c95a3ae2Sjmcneill 	uint32_t offset_free_cached;
86*c95a3ae2Sjmcneill 	uint32_t reserved[3];
87*c95a3ae2Sjmcneill };
88*c95a3ae2Sjmcneill 
89*c95a3ae2Sjmcneill struct qcsmem_partition {
90*c95a3ae2Sjmcneill 	struct qcsmem_partition_header *phdr;
91*c95a3ae2Sjmcneill 	size_t cacheline;
92*c95a3ae2Sjmcneill 	size_t size;
93*c95a3ae2Sjmcneill };
94*c95a3ae2Sjmcneill 
95*c95a3ae2Sjmcneill struct qcsmem_private_entry {
96*c95a3ae2Sjmcneill 	uint16_t canary;
97*c95a3ae2Sjmcneill #define QCSMEM_PRIV_ENTRY_CANARY	0xa5a5
98*c95a3ae2Sjmcneill 	uint16_t item;
99*c95a3ae2Sjmcneill 	uint32_t size;
100*c95a3ae2Sjmcneill 	uint16_t padding_data;
101*c95a3ae2Sjmcneill 	uint16_t padding_hdr;
102*c95a3ae2Sjmcneill 	uint32_t reserved;
103*c95a3ae2Sjmcneill };
104*c95a3ae2Sjmcneill 
105*c95a3ae2Sjmcneill struct qcsmem_info {
106*c95a3ae2Sjmcneill 	uint32_t magic;
107*c95a3ae2Sjmcneill #define QCSMEM_INFO_MAGIC	0x49494953
108*c95a3ae2Sjmcneill 	uint32_t size;
109*c95a3ae2Sjmcneill 	uint32_t base_addr;
110*c95a3ae2Sjmcneill 	uint32_t reserved;
111*c95a3ae2Sjmcneill 	uint32_t num_items;
112*c95a3ae2Sjmcneill };
113*c95a3ae2Sjmcneill 
114*c95a3ae2Sjmcneill struct qcsmem_softc {
115*c95a3ae2Sjmcneill 	device_t		sc_dev;
116*c95a3ae2Sjmcneill 	bus_space_tag_t		sc_iot;
117*c95a3ae2Sjmcneill 	void			*sc_smem;
118*c95a3ae2Sjmcneill 	bus_space_handle_t	sc_mtx_ioh;
119*c95a3ae2Sjmcneill 
120*c95a3ae2Sjmcneill 	bus_addr_t		sc_aux_base;
121*c95a3ae2Sjmcneill 	bus_size_t		sc_aux_size;
122*c95a3ae2Sjmcneill 
123*c95a3ae2Sjmcneill 	int			sc_item_count;
124*c95a3ae2Sjmcneill 	struct qcsmem_partition	sc_global_partition;
125*c95a3ae2Sjmcneill 	struct qcsmem_partition	sc_partitions[QCSMEM_HOST_COUNT];
126*c95a3ae2Sjmcneill };
127*c95a3ae2Sjmcneill 
128*c95a3ae2Sjmcneill #define QCMTX_OFF(idx)		((idx) * 0x1000)
129*c95a3ae2Sjmcneill #define QCMTX_NUM_LOCKS		32
130*c95a3ae2Sjmcneill #define QCMTX_APPS_PROC_ID	1
131*c95a3ae2Sjmcneill 
132*c95a3ae2Sjmcneill #define MTXREAD4(sc, reg)						\
133*c95a3ae2Sjmcneill 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mtx_ioh, (reg))
134*c95a3ae2Sjmcneill #define MTXWRITE4(sc, reg, val)						\
135*c95a3ae2Sjmcneill 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mtx_ioh, (reg), (val))
136*c95a3ae2Sjmcneill 
137*c95a3ae2Sjmcneill struct qcsmem_softc *qcsmem_sc;
138*c95a3ae2Sjmcneill 
139*c95a3ae2Sjmcneill #define QCSMEM_X1E_BASE		0xffe00000
140*c95a3ae2Sjmcneill #define QCSMEM_X1E_SIZE		0x200000
141*c95a3ae2Sjmcneill 
142*c95a3ae2Sjmcneill #define QCMTX_X1E_BASE		0x01f40000
143*c95a3ae2Sjmcneill #define QCMTX_X1E_SIZE		0x20000
144*c95a3ae2Sjmcneill 
145*c95a3ae2Sjmcneill #define QCSMEM_X1E_LOCK_IDX	3
146*c95a3ae2Sjmcneill 
147*c95a3ae2Sjmcneill static const struct device_compatible_entry compat_data[] = {
148*c95a3ae2Sjmcneill 	{ .compat = "QCOM0C84" },
149*c95a3ae2Sjmcneill 	DEVICE_COMPAT_EOL
150*c95a3ae2Sjmcneill };
151*c95a3ae2Sjmcneill 
152*c95a3ae2Sjmcneill static int	qcsmem_match(device_t, cfdata_t, void *);
153*c95a3ae2Sjmcneill static void	qcsmem_attach(device_t, device_t, void *);
154*c95a3ae2Sjmcneill static int	qcmtx_lock(struct qcsmem_softc *, u_int, u_int);
155*c95a3ae2Sjmcneill static void	qcmtx_unlock(struct qcsmem_softc *, u_int);
156*c95a3ae2Sjmcneill 
157*c95a3ae2Sjmcneill CFATTACH_DECL_NEW(qcomsmem, sizeof(struct qcsmem_softc),
158*c95a3ae2Sjmcneill     qcsmem_match, qcsmem_attach, NULL, NULL);
159*c95a3ae2Sjmcneill 
160*c95a3ae2Sjmcneill static int
161*c95a3ae2Sjmcneill qcsmem_match(device_t parent, cfdata_t match, void *aux)
162*c95a3ae2Sjmcneill {
163*c95a3ae2Sjmcneill 	struct acpi_attach_args *aa = aux;
164*c95a3ae2Sjmcneill 
165*c95a3ae2Sjmcneill 	return acpi_compatible_match(aa, compat_data);
166*c95a3ae2Sjmcneill }
167*c95a3ae2Sjmcneill 
168*c95a3ae2Sjmcneill static void
169*c95a3ae2Sjmcneill qcsmem_attach(device_t parent, device_t self, void *aux)
170*c95a3ae2Sjmcneill {
171*c95a3ae2Sjmcneill 	struct qcsmem_softc *sc = device_private(self);
172*c95a3ae2Sjmcneill 	struct acpi_attach_args *aa = aux;
173*c95a3ae2Sjmcneill 	struct qcsmem_header *header;
174*c95a3ae2Sjmcneill 	struct qcsmem_ptable *ptable;
175*c95a3ae2Sjmcneill 	struct qcsmem_ptable_entry *pte;
176*c95a3ae2Sjmcneill 	struct qcsmem_info *info;
177*c95a3ae2Sjmcneill 	struct qcsmem_partition *part;
178*c95a3ae2Sjmcneill 	struct qcsmem_partition_header *phdr;
179*c95a3ae2Sjmcneill 	uintptr_t smem_va;
180*c95a3ae2Sjmcneill 	uint32_t hdr_version;
181*c95a3ae2Sjmcneill 	int i;
182*c95a3ae2Sjmcneill 
183*c95a3ae2Sjmcneill 	sc->sc_dev = self;
184*c95a3ae2Sjmcneill 	sc->sc_iot = aa->aa_memt;
185*c95a3ae2Sjmcneill 	sc->sc_smem = AcpiOsMapMemory(QCSMEM_X1E_BASE, QCSMEM_X1E_SIZE);
186*c95a3ae2Sjmcneill 	KASSERT(sc->sc_smem != NULL);
187*c95a3ae2Sjmcneill 
188*c95a3ae2Sjmcneill 	sc->sc_aux_base = QCSMEM_X1E_BASE;
189*c95a3ae2Sjmcneill 	sc->sc_aux_size = QCSMEM_X1E_SIZE;
190*c95a3ae2Sjmcneill 
191*c95a3ae2Sjmcneill 	if (bus_space_map(sc->sc_iot, QCMTX_X1E_BASE,
192*c95a3ae2Sjmcneill 	    QCMTX_X1E_SIZE, 0, &sc->sc_mtx_ioh)) {
193*c95a3ae2Sjmcneill 		aprint_error(": can't map mutex registers\n");
194*c95a3ae2Sjmcneill 		return;
195*c95a3ae2Sjmcneill 	}
196*c95a3ae2Sjmcneill 
197*c95a3ae2Sjmcneill 	smem_va = (uintptr_t)sc->sc_smem;
198*c95a3ae2Sjmcneill 
199*c95a3ae2Sjmcneill 	ptable = (void *)(smem_va + sc->sc_aux_size - PAGE_SIZE);
200*c95a3ae2Sjmcneill 	if (ptable->magic != QCSMEM_PTABLE_MAGIC ||
201*c95a3ae2Sjmcneill 	    ptable->version != QCSMEM_PTABLE_VERSION) {
202*c95a3ae2Sjmcneill 		aprint_error(": unsupported ptable 0x%x/0x%x\n",
203*c95a3ae2Sjmcneill 		    ptable->magic, ptable->version);
204*c95a3ae2Sjmcneill 		return;
205*c95a3ae2Sjmcneill 	}
206*c95a3ae2Sjmcneill 
207*c95a3ae2Sjmcneill 	header = (void *)smem_va;
208*c95a3ae2Sjmcneill 	hdr_version = header->version[QCSMEM_HEADER_VERSION_MASTER_SBL_IDX] >> 16;
209*c95a3ae2Sjmcneill 	if (hdr_version != QCSMEM_HEADER_VERSION_GLOBAL_PART) {
210*c95a3ae2Sjmcneill 		aprint_error(": unsupported header 0x%x\n", hdr_version);
211*c95a3ae2Sjmcneill 		return;
212*c95a3ae2Sjmcneill 	}
213*c95a3ae2Sjmcneill 
214*c95a3ae2Sjmcneill 	for (i = 0; i < ptable->num_entries; i++) {
215*c95a3ae2Sjmcneill 		pte = &ptable->entry[i];
216*c95a3ae2Sjmcneill 		if (!pte->offset || !pte->size)
217*c95a3ae2Sjmcneill 			continue;
218*c95a3ae2Sjmcneill 		if (pte->host[0] == QCSMEM_GLOBAL_HOST &&
219*c95a3ae2Sjmcneill 		    pte->host[1] == QCSMEM_GLOBAL_HOST)
220*c95a3ae2Sjmcneill 			part = &sc->sc_global_partition;
221*c95a3ae2Sjmcneill 		else if (pte->host[0] == QCSMEM_LOCAL_HOST &&
222*c95a3ae2Sjmcneill 		    pte->host[1] < QCSMEM_HOST_COUNT)
223*c95a3ae2Sjmcneill 			part = &sc->sc_partitions[pte->host[1]];
224*c95a3ae2Sjmcneill 		else if (pte->host[1] == QCSMEM_LOCAL_HOST &&
225*c95a3ae2Sjmcneill 		    pte->host[0] < QCSMEM_HOST_COUNT)
226*c95a3ae2Sjmcneill 			part = &sc->sc_partitions[pte->host[0]];
227*c95a3ae2Sjmcneill 		else
228*c95a3ae2Sjmcneill 			continue;
229*c95a3ae2Sjmcneill 		if (part->phdr != NULL)
230*c95a3ae2Sjmcneill 			continue;
231*c95a3ae2Sjmcneill 		phdr = (void *)(smem_va + pte->offset);
232*c95a3ae2Sjmcneill 		if (phdr->magic != QCSMEM_PART_HDR_MAGIC) {
233*c95a3ae2Sjmcneill 			aprint_error(": unsupported partition 0x%x\n",
234*c95a3ae2Sjmcneill 			    phdr->magic);
235*c95a3ae2Sjmcneill 			return;
236*c95a3ae2Sjmcneill 		}
237*c95a3ae2Sjmcneill 		if (pte->host[0] != phdr->host[0] ||
238*c95a3ae2Sjmcneill 		    pte->host[1] != phdr->host[1]) {
239*c95a3ae2Sjmcneill 			aprint_error(": bad hosts 0x%x/0x%x+0x%x/0x%x\n",
240*c95a3ae2Sjmcneill 			    pte->host[0], phdr->host[0],
241*c95a3ae2Sjmcneill 			    pte->host[1], phdr->host[1]);
242*c95a3ae2Sjmcneill 			return;
243*c95a3ae2Sjmcneill 		}
244*c95a3ae2Sjmcneill 		if (pte->size != phdr->size) {
245*c95a3ae2Sjmcneill 			aprint_error(": bad size 0x%x/0x%x\n",
246*c95a3ae2Sjmcneill 			    pte->size, phdr->size);
247*c95a3ae2Sjmcneill 			return;
248*c95a3ae2Sjmcneill 		}
249*c95a3ae2Sjmcneill 		if (phdr->offset_free_uncached > phdr->size) {
250*c95a3ae2Sjmcneill 			aprint_error(": bad size 0x%x > 0x%x\n",
251*c95a3ae2Sjmcneill 			    phdr->offset_free_uncached, phdr->size);
252*c95a3ae2Sjmcneill 			return;
253*c95a3ae2Sjmcneill 		}
254*c95a3ae2Sjmcneill 		part->phdr = phdr;
255*c95a3ae2Sjmcneill 		part->size = pte->size;
256*c95a3ae2Sjmcneill 		part->cacheline = pte->cacheline;
257*c95a3ae2Sjmcneill 	}
258*c95a3ae2Sjmcneill 	if (sc->sc_global_partition.phdr == NULL) {
259*c95a3ae2Sjmcneill 		aprint_error(": could not find global partition\n");
260*c95a3ae2Sjmcneill 		return;
261*c95a3ae2Sjmcneill 	}
262*c95a3ae2Sjmcneill 
263*c95a3ae2Sjmcneill 	sc->sc_item_count = QCSMEM_ITEM_COUNT;
264*c95a3ae2Sjmcneill 	info = (struct qcsmem_info *)&ptable->entry[ptable->num_entries];
265*c95a3ae2Sjmcneill 	if (info->magic == QCSMEM_INFO_MAGIC)
266*c95a3ae2Sjmcneill 		sc->sc_item_count = info->num_items;
267*c95a3ae2Sjmcneill 
268*c95a3ae2Sjmcneill 	aprint_naive("\n");
269*c95a3ae2Sjmcneill 	aprint_normal("\n");
270*c95a3ae2Sjmcneill 
271*c95a3ae2Sjmcneill 	qcsmem_sc = sc;
272*c95a3ae2Sjmcneill }
273*c95a3ae2Sjmcneill 
274*c95a3ae2Sjmcneill static int
275*c95a3ae2Sjmcneill qcsmem_alloc_private(struct qcsmem_softc *sc, struct qcsmem_partition *part,
276*c95a3ae2Sjmcneill     int item, int size)
277*c95a3ae2Sjmcneill {
278*c95a3ae2Sjmcneill 	struct qcsmem_private_entry *entry, *last;
279*c95a3ae2Sjmcneill 	struct qcsmem_partition_header *phdr = part->phdr;
280*c95a3ae2Sjmcneill 	uintptr_t phdr_va = (uintptr_t)phdr;
281*c95a3ae2Sjmcneill 
282*c95a3ae2Sjmcneill 	entry = (void *)&phdr[1];
283*c95a3ae2Sjmcneill 	last = (void *)(phdr_va + phdr->offset_free_uncached);
284*c95a3ae2Sjmcneill 
285*c95a3ae2Sjmcneill 	if ((void *)last > (void *)(phdr_va + part->size))
286*c95a3ae2Sjmcneill 		return EINVAL;
287*c95a3ae2Sjmcneill 
288*c95a3ae2Sjmcneill 	while (entry < last) {
289*c95a3ae2Sjmcneill 		if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
290*c95a3ae2Sjmcneill 			device_printf(sc->sc_dev, "invalid canary\n");
291*c95a3ae2Sjmcneill 			return EINVAL;
292*c95a3ae2Sjmcneill 		}
293*c95a3ae2Sjmcneill 
294*c95a3ae2Sjmcneill 		if (entry->item == item)
295*c95a3ae2Sjmcneill 			return 0;
296*c95a3ae2Sjmcneill 
297*c95a3ae2Sjmcneill 		entry = (void *)((uintptr_t)&entry[1] + entry->padding_hdr +
298*c95a3ae2Sjmcneill 		    entry->size);
299*c95a3ae2Sjmcneill 	}
300*c95a3ae2Sjmcneill 
301*c95a3ae2Sjmcneill 	if ((void *)entry > (void *)(phdr_va + part->size))
302*c95a3ae2Sjmcneill 		return EINVAL;
303*c95a3ae2Sjmcneill 
304*c95a3ae2Sjmcneill 	if ((uintptr_t)&entry[1] + roundup(size, 8) >
305*c95a3ae2Sjmcneill 	    phdr_va + phdr->offset_free_cached)
306*c95a3ae2Sjmcneill 		return EINVAL;
307*c95a3ae2Sjmcneill 
308*c95a3ae2Sjmcneill 	entry->canary = QCSMEM_PRIV_ENTRY_CANARY;
309*c95a3ae2Sjmcneill 	entry->item = item;
310*c95a3ae2Sjmcneill 	entry->size = roundup(size, 8);
311*c95a3ae2Sjmcneill 	entry->padding_data = entry->size - size;
312*c95a3ae2Sjmcneill 	entry->padding_hdr = 0;
313*c95a3ae2Sjmcneill 	membar_producer();
314*c95a3ae2Sjmcneill 
315*c95a3ae2Sjmcneill 	phdr->offset_free_uncached += sizeof(*entry) + entry->size;
316*c95a3ae2Sjmcneill 
317*c95a3ae2Sjmcneill 	return 0;
318*c95a3ae2Sjmcneill }
319*c95a3ae2Sjmcneill 
320*c95a3ae2Sjmcneill static int
321*c95a3ae2Sjmcneill qcsmem_alloc_global(struct qcsmem_softc *sc, int item, int size)
322*c95a3ae2Sjmcneill {
323*c95a3ae2Sjmcneill 	struct qcsmem_header *header;
324*c95a3ae2Sjmcneill 	struct qcsmem_global_entry *entry;
325*c95a3ae2Sjmcneill 
326*c95a3ae2Sjmcneill 	header = (void *)sc->sc_smem;
327*c95a3ae2Sjmcneill 	entry = &header->toc[item];
328*c95a3ae2Sjmcneill 	if (entry->allocated)
329*c95a3ae2Sjmcneill 		return 0;
330*c95a3ae2Sjmcneill 
331*c95a3ae2Sjmcneill 	size = roundup(size, 8);
332*c95a3ae2Sjmcneill 	if (size > header->available)
333*c95a3ae2Sjmcneill 		return EINVAL;
334*c95a3ae2Sjmcneill 
335*c95a3ae2Sjmcneill 	entry->offset = header->free_offset;
336*c95a3ae2Sjmcneill 	entry->size = size;
337*c95a3ae2Sjmcneill 	membar_producer();
338*c95a3ae2Sjmcneill 	entry->allocated = 1;
339*c95a3ae2Sjmcneill 
340*c95a3ae2Sjmcneill 	header->free_offset += size;
341*c95a3ae2Sjmcneill 	header->available -= size;
342*c95a3ae2Sjmcneill 
343*c95a3ae2Sjmcneill 	return 0;
344*c95a3ae2Sjmcneill }
345*c95a3ae2Sjmcneill 
346*c95a3ae2Sjmcneill int
347*c95a3ae2Sjmcneill qcsmem_alloc(int host, int item, int size)
348*c95a3ae2Sjmcneill {
349*c95a3ae2Sjmcneill 	struct qcsmem_softc *sc = qcsmem_sc;
350*c95a3ae2Sjmcneill 	struct qcsmem_partition *part;
351*c95a3ae2Sjmcneill 	int ret;
352*c95a3ae2Sjmcneill 
353*c95a3ae2Sjmcneill 	if (sc == NULL)
354*c95a3ae2Sjmcneill 		return ENXIO;
355*c95a3ae2Sjmcneill 
356*c95a3ae2Sjmcneill 	if (item < QCSMEM_ITEM_FIXED)
357*c95a3ae2Sjmcneill 		return EPERM;
358*c95a3ae2Sjmcneill 
359*c95a3ae2Sjmcneill 	if (item >= sc->sc_item_count)
360*c95a3ae2Sjmcneill 		return ENXIO;
361*c95a3ae2Sjmcneill 
362*c95a3ae2Sjmcneill 	ret = qcmtx_lock(sc, QCSMEM_X1E_LOCK_IDX, 1000);
363*c95a3ae2Sjmcneill 	if (ret)
364*c95a3ae2Sjmcneill 		return ret;
365*c95a3ae2Sjmcneill 
366*c95a3ae2Sjmcneill 	if (host < QCSMEM_HOST_COUNT &&
367*c95a3ae2Sjmcneill 	    sc->sc_partitions[host].phdr != NULL) {
368*c95a3ae2Sjmcneill 		part = &sc->sc_partitions[host];
369*c95a3ae2Sjmcneill 		ret = qcsmem_alloc_private(sc, part, item, size);
370*c95a3ae2Sjmcneill 	} else if (sc->sc_global_partition.phdr != NULL) {
371*c95a3ae2Sjmcneill 		part = &sc->sc_global_partition;
372*c95a3ae2Sjmcneill 		ret = qcsmem_alloc_private(sc, part, item, size);
373*c95a3ae2Sjmcneill 	} else {
374*c95a3ae2Sjmcneill 		ret = qcsmem_alloc_global(sc, item, size);
375*c95a3ae2Sjmcneill 	}
376*c95a3ae2Sjmcneill 
377*c95a3ae2Sjmcneill 	qcmtx_unlock(sc, QCSMEM_X1E_LOCK_IDX);
378*c95a3ae2Sjmcneill 
379*c95a3ae2Sjmcneill 	return ret;
380*c95a3ae2Sjmcneill }
381*c95a3ae2Sjmcneill 
382*c95a3ae2Sjmcneill static void *
383*c95a3ae2Sjmcneill qcsmem_get_private(struct qcsmem_softc *sc, struct qcsmem_partition *part,
384*c95a3ae2Sjmcneill     int item, int *size)
385*c95a3ae2Sjmcneill {
386*c95a3ae2Sjmcneill 	struct qcsmem_private_entry *entry, *last;
387*c95a3ae2Sjmcneill 	struct qcsmem_partition_header *phdr = part->phdr;
388*c95a3ae2Sjmcneill 	uintptr_t phdr_va = (uintptr_t)phdr;
389*c95a3ae2Sjmcneill 
390*c95a3ae2Sjmcneill 	entry = (void *)&phdr[1];
391*c95a3ae2Sjmcneill 	last = (void *)(phdr_va + phdr->offset_free_uncached);
392*c95a3ae2Sjmcneill 
393*c95a3ae2Sjmcneill 	while (entry < last) {
394*c95a3ae2Sjmcneill 		if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
395*c95a3ae2Sjmcneill 			device_printf(sc->sc_dev, "invalid canary\n");
396*c95a3ae2Sjmcneill 			return NULL;
397*c95a3ae2Sjmcneill 		}
398*c95a3ae2Sjmcneill 
399*c95a3ae2Sjmcneill 		if (entry->item == item) {
400*c95a3ae2Sjmcneill 			if (size != NULL) {
401*c95a3ae2Sjmcneill 				if (entry->size > part->size ||
402*c95a3ae2Sjmcneill 				    entry->padding_data > entry->size)
403*c95a3ae2Sjmcneill 					return NULL;
404*c95a3ae2Sjmcneill 				*size = entry->size - entry->padding_data;
405*c95a3ae2Sjmcneill 			}
406*c95a3ae2Sjmcneill 
407*c95a3ae2Sjmcneill 			return (void *)((uintptr_t)&entry[1] + entry->padding_hdr);
408*c95a3ae2Sjmcneill 		}
409*c95a3ae2Sjmcneill 
410*c95a3ae2Sjmcneill 		entry = (void *)((uintptr_t)&entry[1] + entry->padding_hdr +
411*c95a3ae2Sjmcneill 		    entry->size);
412*c95a3ae2Sjmcneill 	}
413*c95a3ae2Sjmcneill 
414*c95a3ae2Sjmcneill 	if ((uintptr_t)entry > phdr_va + part->size)
415*c95a3ae2Sjmcneill 		return NULL;
416*c95a3ae2Sjmcneill 
417*c95a3ae2Sjmcneill 	entry = (void *)(phdr_va + phdr->size -
418*c95a3ae2Sjmcneill 	    roundup(sizeof(*entry), part->cacheline));
419*c95a3ae2Sjmcneill 	last = (void *)(phdr_va + phdr->offset_free_cached);
420*c95a3ae2Sjmcneill 
421*c95a3ae2Sjmcneill 	if ((uintptr_t)entry < phdr_va ||
422*c95a3ae2Sjmcneill 	    (uintptr_t)last > phdr_va + part->size)
423*c95a3ae2Sjmcneill 		return NULL;
424*c95a3ae2Sjmcneill 
425*c95a3ae2Sjmcneill 	while (entry > last) {
426*c95a3ae2Sjmcneill 		if (entry->canary != QCSMEM_PRIV_ENTRY_CANARY) {
427*c95a3ae2Sjmcneill 			device_printf(sc->sc_dev, "invalid canary\n");
428*c95a3ae2Sjmcneill 			return NULL;
429*c95a3ae2Sjmcneill 		}
430*c95a3ae2Sjmcneill 
431*c95a3ae2Sjmcneill 		if (entry->item == item) {
432*c95a3ae2Sjmcneill 			if (size != NULL) {
433*c95a3ae2Sjmcneill 				if (entry->size > part->size ||
434*c95a3ae2Sjmcneill 				    entry->padding_data > entry->size)
435*c95a3ae2Sjmcneill 					return NULL;
436*c95a3ae2Sjmcneill 				*size = entry->size - entry->padding_data;
437*c95a3ae2Sjmcneill 			}
438*c95a3ae2Sjmcneill 
439*c95a3ae2Sjmcneill 			return (void *)((uintptr_t)entry - entry->size);
440*c95a3ae2Sjmcneill 		}
441*c95a3ae2Sjmcneill 
442*c95a3ae2Sjmcneill 		entry = (void *)((uintptr_t)entry - entry->size -
443*c95a3ae2Sjmcneill 		    roundup(sizeof(*entry), part->cacheline));
444*c95a3ae2Sjmcneill 	}
445*c95a3ae2Sjmcneill 
446*c95a3ae2Sjmcneill 	if ((uintptr_t)entry < phdr_va)
447*c95a3ae2Sjmcneill 		return NULL;
448*c95a3ae2Sjmcneill 
449*c95a3ae2Sjmcneill 	return NULL;
450*c95a3ae2Sjmcneill }
451*c95a3ae2Sjmcneill 
452*c95a3ae2Sjmcneill static void *
453*c95a3ae2Sjmcneill qcsmem_get_global(struct qcsmem_softc *sc, int item, int *size)
454*c95a3ae2Sjmcneill {
455*c95a3ae2Sjmcneill 	struct qcsmem_header *header;
456*c95a3ae2Sjmcneill 	struct qcsmem_global_entry *entry;
457*c95a3ae2Sjmcneill 	uint32_t aux_base;
458*c95a3ae2Sjmcneill 
459*c95a3ae2Sjmcneill 	header = (void *)sc->sc_smem;
460*c95a3ae2Sjmcneill 	entry = &header->toc[item];
461*c95a3ae2Sjmcneill 	if (!entry->allocated)
462*c95a3ae2Sjmcneill 		return NULL;
463*c95a3ae2Sjmcneill 
464*c95a3ae2Sjmcneill 	aux_base = entry->aux_base & QCSMEM_GLOBAL_ENTRY_AUX_BASE_MASK;
465*c95a3ae2Sjmcneill 	if (aux_base != 0 && aux_base != sc->sc_aux_base)
466*c95a3ae2Sjmcneill 		return NULL;
467*c95a3ae2Sjmcneill 
468*c95a3ae2Sjmcneill 	if (entry->size + entry->offset > sc->sc_aux_size)
469*c95a3ae2Sjmcneill 		return NULL;
470*c95a3ae2Sjmcneill 
471*c95a3ae2Sjmcneill 	if (size != NULL)
472*c95a3ae2Sjmcneill 		*size = entry->size;
473*c95a3ae2Sjmcneill 
474*c95a3ae2Sjmcneill 	return (void *)((uintptr_t)sc->sc_smem +
475*c95a3ae2Sjmcneill 	    entry->offset);
476*c95a3ae2Sjmcneill }
477*c95a3ae2Sjmcneill 
478*c95a3ae2Sjmcneill void *
479*c95a3ae2Sjmcneill qcsmem_get(int host, int item, int *size)
480*c95a3ae2Sjmcneill {
481*c95a3ae2Sjmcneill 	struct qcsmem_softc *sc = qcsmem_sc;
482*c95a3ae2Sjmcneill 	struct qcsmem_partition *part;
483*c95a3ae2Sjmcneill 	void *p = NULL;
484*c95a3ae2Sjmcneill 	int ret;
485*c95a3ae2Sjmcneill 
486*c95a3ae2Sjmcneill 	if (sc == NULL)
487*c95a3ae2Sjmcneill 		return NULL;
488*c95a3ae2Sjmcneill 
489*c95a3ae2Sjmcneill 	if (item >= sc->sc_item_count)
490*c95a3ae2Sjmcneill 		return NULL;
491*c95a3ae2Sjmcneill 
492*c95a3ae2Sjmcneill 	ret = qcmtx_lock(sc, QCSMEM_X1E_LOCK_IDX, 1000);
493*c95a3ae2Sjmcneill 	if (ret)
494*c95a3ae2Sjmcneill 		return NULL;
495*c95a3ae2Sjmcneill 
496*c95a3ae2Sjmcneill 	if (host >= 0 &&
497*c95a3ae2Sjmcneill 	    host < QCSMEM_HOST_COUNT &&
498*c95a3ae2Sjmcneill 	    sc->sc_partitions[host].phdr != NULL) {
499*c95a3ae2Sjmcneill 		part = &sc->sc_partitions[host];
500*c95a3ae2Sjmcneill 		p = qcsmem_get_private(sc, part, item, size);
501*c95a3ae2Sjmcneill 	} else if (sc->sc_global_partition.phdr != NULL) {
502*c95a3ae2Sjmcneill 		part = &sc->sc_global_partition;
503*c95a3ae2Sjmcneill 		p = qcsmem_get_private(sc, part, item, size);
504*c95a3ae2Sjmcneill 	} else {
505*c95a3ae2Sjmcneill 		p = qcsmem_get_global(sc, item, size);
506*c95a3ae2Sjmcneill 	}
507*c95a3ae2Sjmcneill 
508*c95a3ae2Sjmcneill 	qcmtx_unlock(sc, QCSMEM_X1E_LOCK_IDX);
509*c95a3ae2Sjmcneill 	return p;
510*c95a3ae2Sjmcneill }
511*c95a3ae2Sjmcneill 
512*c95a3ae2Sjmcneill void
513*c95a3ae2Sjmcneill qcsmem_memset(void *ptr, uint8_t val, size_t len)
514*c95a3ae2Sjmcneill {
515*c95a3ae2Sjmcneill 	if (len % 8 == 0 && val == 0) {
516*c95a3ae2Sjmcneill 		volatile uint64_t *p = ptr;
517*c95a3ae2Sjmcneill 		size_t n;
518*c95a3ae2Sjmcneill 
519*c95a3ae2Sjmcneill 		for (n = 0; n < len; n += 8) {
520*c95a3ae2Sjmcneill 			p[n] = val;
521*c95a3ae2Sjmcneill 		}
522*c95a3ae2Sjmcneill 	} else {
523*c95a3ae2Sjmcneill 		volatile uint8_t *p = ptr;
524*c95a3ae2Sjmcneill 		size_t n;
525*c95a3ae2Sjmcneill 
526*c95a3ae2Sjmcneill 		for (n = 0; n < len; n++) {
527*c95a3ae2Sjmcneill 			p[n] = val;
528*c95a3ae2Sjmcneill 		}
529*c95a3ae2Sjmcneill 	}
530*c95a3ae2Sjmcneill }
531*c95a3ae2Sjmcneill 
532*c95a3ae2Sjmcneill static int
533*c95a3ae2Sjmcneill qcmtx_dolockunlock(struct qcsmem_softc *sc, u_int idx, int lock)
534*c95a3ae2Sjmcneill {
535*c95a3ae2Sjmcneill 	if (idx >= QCMTX_NUM_LOCKS)
536*c95a3ae2Sjmcneill 		return ENXIO;
537*c95a3ae2Sjmcneill 
538*c95a3ae2Sjmcneill 	if (lock) {
539*c95a3ae2Sjmcneill 		MTXWRITE4(sc, QCMTX_OFF(idx), QCMTX_APPS_PROC_ID);
540*c95a3ae2Sjmcneill 		if (MTXREAD4(sc, QCMTX_OFF(idx)) !=
541*c95a3ae2Sjmcneill 		    QCMTX_APPS_PROC_ID)
542*c95a3ae2Sjmcneill 			return EAGAIN;
543*c95a3ae2Sjmcneill 		KASSERT(MTXREAD4(sc, QCMTX_OFF(idx)) == QCMTX_APPS_PROC_ID);
544*c95a3ae2Sjmcneill 	} else {
545*c95a3ae2Sjmcneill 		KASSERT(MTXREAD4(sc, QCMTX_OFF(idx)) == QCMTX_APPS_PROC_ID);
546*c95a3ae2Sjmcneill 		MTXWRITE4(sc, QCMTX_OFF(idx), 0);
547*c95a3ae2Sjmcneill 	}
548*c95a3ae2Sjmcneill 
549*c95a3ae2Sjmcneill 	return 0;
550*c95a3ae2Sjmcneill }
551*c95a3ae2Sjmcneill 
552*c95a3ae2Sjmcneill static int
553*c95a3ae2Sjmcneill qcmtx_lock(struct qcsmem_softc *sc, u_int idx, u_int timeout_ms)
554*c95a3ae2Sjmcneill {
555*c95a3ae2Sjmcneill 	int rv = EINVAL;
556*c95a3ae2Sjmcneill 	u_int n;
557*c95a3ae2Sjmcneill 
558*c95a3ae2Sjmcneill 	for (n = 0; n < timeout_ms; n++) {
559*c95a3ae2Sjmcneill 		rv = qcmtx_dolockunlock(sc, idx, 1);
560*c95a3ae2Sjmcneill 		if (rv != EAGAIN) {
561*c95a3ae2Sjmcneill 			break;
562*c95a3ae2Sjmcneill 		}
563*c95a3ae2Sjmcneill 		delay(1000);
564*c95a3ae2Sjmcneill 	}
565*c95a3ae2Sjmcneill 
566*c95a3ae2Sjmcneill 	return rv;
567*c95a3ae2Sjmcneill }
568*c95a3ae2Sjmcneill 
569*c95a3ae2Sjmcneill static void
570*c95a3ae2Sjmcneill qcmtx_unlock(struct qcsmem_softc *sc, u_int idx)
571*c95a3ae2Sjmcneill {
572*c95a3ae2Sjmcneill 	qcmtx_dolockunlock(sc, idx, 0);
573*c95a3ae2Sjmcneill }
574