xref: /netbsd-src/sys/arch/arm/cortex/gic_v2m.c (revision f4ee76b1846811802e86bff9dd05453e48f849ba)
1 /* $NetBSD: gic_v2m.c,v 1.11 2021/03/14 08:09:20 skrll Exp $ */
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jared McNeill <jmcneill@invisible.ca>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #define _INTR_PRIVATE
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: gic_v2m.c,v 1.11 2021/03/14 08:09:20 skrll Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/kmem.h>
39 #include <sys/bitops.h>
40 
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 
44 #include <arm/pic/picvar.h>
45 #include <arm/cortex/gic_v2m.h>
46 
47 static uint64_t
gic_v2m_msi_addr(struct gic_v2m_frame * frame,int spi)48 gic_v2m_msi_addr(struct gic_v2m_frame *frame, int spi)
49 {
50 	if ((frame->frame_flags & GIC_V2M_FLAG_GRAVITON) != 0)
51 		return frame->frame_reg + ((spi - 32) << 3);
52 
53 	return frame->frame_reg + GIC_MSI_SETSPI;
54 }
55 
56 static uint32_t
gic_v2m_msi_data(struct gic_v2m_frame * frame,int spi)57 gic_v2m_msi_data(struct gic_v2m_frame *frame, int spi)
58 {
59 	if ((frame->frame_flags & GIC_V2M_FLAG_GRAVITON) != 0)
60 		return 0;
61 
62 	return spi;
63 }
64 
65 static int
gic_v2m_msi_alloc_spi(struct gic_v2m_frame * frame,int count,const struct pci_attach_args * pa)66 gic_v2m_msi_alloc_spi(struct gic_v2m_frame *frame, int count,
67     const struct pci_attach_args *pa)
68 {
69 	struct pci_attach_args *new_pa;
70 	int spi, n;
71 
72 	for (spi = frame->frame_base;
73 	     spi < frame->frame_base + frame->frame_count; ) {
74 		if (frame->frame_pa[spi] == NULL) {
75 			for (n = 1; n < count; n++)
76 				if (frame->frame_pa[spi + n] != NULL)
77 					goto next_spi;
78 
79 			for (n = 0; n < count; n++) {
80 				new_pa = kmem_alloc(sizeof(*new_pa), KM_SLEEP);
81 				memcpy(new_pa, pa, sizeof(*new_pa));
82 				frame->frame_pa[spi + n] = new_pa;
83 			}
84 
85 			return spi;
86 		}
87 next_spi:
88 		spi += count;
89 	}
90 
91 	return -1;
92 }
93 
94 static void
gic_v2m_msi_free_spi(struct gic_v2m_frame * frame,int spi)95 gic_v2m_msi_free_spi(struct gic_v2m_frame *frame, int spi)
96 {
97 	struct pci_attach_args *pa;
98 
99 	pa = frame->frame_pa[spi];
100 	frame->frame_pa[spi] = NULL;
101 
102 	if (pa != NULL)
103 		kmem_free(pa, sizeof(*pa));
104 }
105 
106 static int
gic_v2m_msi_available_spi(struct gic_v2m_frame * frame)107 gic_v2m_msi_available_spi(struct gic_v2m_frame *frame)
108 {
109 	int spi, n;
110 
111 	for (spi = frame->frame_base, n = 0;
112 	     spi < frame->frame_base + frame->frame_count;
113 	     spi++) {
114 		if (frame->frame_pa[spi] == NULL)
115 			n++;
116 	}
117 
118 	return n;
119 }
120 
121 static void
gic_v2m_msi_enable(struct gic_v2m_frame * frame,int spi,int count)122 gic_v2m_msi_enable(struct gic_v2m_frame *frame, int spi, int count)
123 {
124 	const struct pci_attach_args *pa = frame->frame_pa[spi];
125 	pci_chipset_tag_t pc = pa->pa_pc;
126 	pcitag_t tag = pa->pa_tag;
127 	pcireg_t ctl;
128 	int off;
129 
130 	if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
131 		panic("gic_v2m_msi_enable: device is not MSI-capable");
132 
133 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
134 	ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
135 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
136 
137 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
138 	ctl &= ~PCI_MSI_CTL_MME_MASK;
139 	ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK);
140 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
141 
142 	const uint64_t addr = gic_v2m_msi_addr(frame, spi);
143 	const uint32_t data = gic_v2m_msi_data(frame, spi);
144 
145 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
146 	if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
147 		pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO,
148 		    addr & 0xffffffff);
149 		pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI,
150 		    (addr >> 32) & 0xffffffff);
151 		pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data);
152 	} else {
153 		pci_conf_write(pc, tag, off + PCI_MSI_MADDR,
154 		    addr & 0xffffffff);
155 		pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data);
156 	}
157 	ctl |= PCI_MSI_CTL_MSI_ENABLE;
158 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
159 }
160 
161 static void
gic_v2m_msi_disable(struct gic_v2m_frame * frame,int spi)162 gic_v2m_msi_disable(struct gic_v2m_frame *frame, int spi)
163 {
164 	const struct pci_attach_args *pa = frame->frame_pa[spi];
165 	pci_chipset_tag_t pc = pa->pa_pc;
166 	pcitag_t tag = pa->pa_tag;
167 	pcireg_t ctl;
168 	int off;
169 
170 	if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
171 		panic("gic_v2m_msi_disable: device is not MSI-capable");
172 
173 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
174 	ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
175 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
176 }
177 
178 static void
gic_v2m_msix_enable(struct gic_v2m_frame * frame,int spi,int msix_vec,bus_space_tag_t bst,bus_space_handle_t bsh)179 gic_v2m_msix_enable(struct gic_v2m_frame *frame, int spi, int msix_vec,
180     bus_space_tag_t bst, bus_space_handle_t bsh)
181 {
182 	const struct pci_attach_args *pa = frame->frame_pa[spi];
183 	pci_chipset_tag_t pc = pa->pa_pc;
184 	pcitag_t tag = pa->pa_tag;
185 	pcireg_t ctl;
186 	uint32_t val;
187 	int off;
188 
189 	if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
190 		panic("gic_v2m_msix_enable: device is not MSI-X-capable");
191 
192 	ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
193 	ctl &= ~PCI_MSIX_CTL_ENABLE;
194 	pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
195 
196 	const uint64_t addr = gic_v2m_msi_addr(frame, spi);
197 	const uint32_t data = gic_v2m_msi_data(frame, spi);
198 	const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
199 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr);
200 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32));
201 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data);
202 	val = bus_space_read_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
203 	val &= ~PCI_MSIX_VECTCTL_MASK;
204 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, val);
205 
206 	ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
207 	ctl |= PCI_MSIX_CTL_ENABLE;
208 	pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
209 }
210 
211 static void
gic_v2m_msix_disable(struct gic_v2m_frame * frame,int spi)212 gic_v2m_msix_disable(struct gic_v2m_frame *frame, int spi)
213 {
214 	const struct pci_attach_args *pa = frame->frame_pa[spi];
215 	pci_chipset_tag_t pc = pa->pa_pc;
216 	pcitag_t tag = pa->pa_tag;
217 	pcireg_t ctl;
218 	int off;
219 
220 	if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
221 		panic("gic_v2m_msix_disable: device is not MSI-X-capable");
222 
223 	ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
224 	ctl &= ~PCI_MSIX_CTL_ENABLE;
225 	pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
226 }
227 
228 static pci_intr_handle_t *
gic_v2m_msi_alloc(struct arm_pci_msi * msi,int * count,const struct pci_attach_args * pa,bool exact)229 gic_v2m_msi_alloc(struct arm_pci_msi *msi, int *count,
230     const struct pci_attach_args *pa, bool exact)
231 {
232 	struct gic_v2m_frame * const frame = msi->msi_priv;
233 	pci_intr_handle_t *vectors;
234 	int n, off;
235 
236 	if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL))
237 		return NULL;
238 
239 	const int avail = gic_v2m_msi_available_spi(frame);
240 	if (exact && *count > avail)
241 		return NULL;
242 
243 	while (*count > avail)
244 		(*count) >>= 1;
245 
246 	if (*count == 0)
247 		return NULL;
248 
249 	const int spi_base = gic_v2m_msi_alloc_spi(frame, *count, pa);
250 	if (spi_base == -1)
251 		return NULL;
252 
253 	vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
254 	for (n = 0; n < *count; n++) {
255 		const int spi = spi_base + n;
256 		vectors[n] = ARM_PCI_INTR_MSI |
257 		    __SHIFTIN(spi, ARM_PCI_INTR_IRQ) |
258 		    __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) |
259 		    __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
260 	}
261 
262 	gic_v2m_msi_enable(frame, spi_base, *count);
263 
264 	return vectors;
265 }
266 
267 static pci_intr_handle_t *
gic_v2m_msix_alloc(struct arm_pci_msi * msi,u_int * table_indexes,int * count,const struct pci_attach_args * pa,bool exact)268 gic_v2m_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count,
269     const struct pci_attach_args *pa, bool exact)
270 {
271 	struct gic_v2m_frame * const frame = msi->msi_priv;
272 	pci_intr_handle_t *vectors;
273 	bus_space_tag_t bst;
274 	bus_space_handle_t bsh;
275 	bus_size_t bsz;
276 	uint32_t table_offset, table_size;
277 	int n, off, bar, error;
278 	pcireg_t tbl;
279 
280 	if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL))
281 		return NULL;
282 
283 	const int avail = gic_v2m_msi_available_spi(frame);
284 	if (exact && *count > avail)
285 		return NULL;
286 
287 	while (*count > avail) {
288 		if (avail < *count)
289 			(*count) >>= 1;
290 	}
291 	if (*count == 0)
292 		return NULL;
293 
294 	tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET);
295 	bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_TBLBIR_MASK));
296 	table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
297 	table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE;
298 	if (table_size == 0)
299 		return NULL;
300 
301 	error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar),
302 	    BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset,
303 	    &bst, &bsh, NULL, &bsz);
304 	if (error)
305 		return NULL;
306 
307 	const int spi_base = gic_v2m_msi_alloc_spi(frame, *count, pa);
308 	if (spi_base == -1) {
309 		bus_space_unmap(bst, bsh, bsz);
310 		return NULL;
311 	}
312 
313 	vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
314 	for (n = 0; n < *count; n++) {
315 		const int spi = spi_base + n;
316 		const int msix_vec = table_indexes ? table_indexes[n] : n;
317 		vectors[msix_vec] = ARM_PCI_INTR_MSIX |
318 		    __SHIFTIN(spi, ARM_PCI_INTR_IRQ) |
319 		    __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) |
320 		    __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
321 
322 		gic_v2m_msix_enable(frame, spi, msix_vec, bst, bsh);
323 	}
324 
325 	bus_space_unmap(bst, bsh, bsz);
326 
327 	return vectors;
328 }
329 
330 static void *
gic_v2m_msi_intr_establish(struct arm_pci_msi * msi,pci_intr_handle_t ih,int ipl,int (* func)(void *),void * arg,const char * xname)331 gic_v2m_msi_intr_establish(struct arm_pci_msi *msi,
332     pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname)
333 {
334 	struct gic_v2m_frame * const frame = msi->msi_priv;
335 
336 	const int spi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ);
337 	const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0;
338 
339 	return pic_establish_intr(frame->frame_pic, spi, ipl,
340 	    IST_EDGE | mpsafe, func, arg, xname);
341 }
342 
343 static void
gic_v2m_msi_intr_release(struct arm_pci_msi * msi,pci_intr_handle_t * pih,int count)344 gic_v2m_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih,
345     int count)
346 {
347 	struct gic_v2m_frame * const frame = msi->msi_priv;
348 	int n;
349 
350 	for (n = 0; n < count; n++) {
351 		const int spi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ);
352 		if (pih[n] & ARM_PCI_INTR_MSIX)
353 			gic_v2m_msix_disable(frame, spi);
354 		if (pih[n] & ARM_PCI_INTR_MSI)
355 			gic_v2m_msi_disable(frame, spi);
356 		gic_v2m_msi_free_spi(frame, spi);
357 		struct intrsource * const is =
358 		    frame->frame_pic->pic_sources[spi];
359 		if (is != NULL)
360 			pic_disestablish_source(is);
361 	}
362 }
363 
364 int
gic_v2m_init(struct gic_v2m_frame * frame,device_t dev,uint32_t frame_id)365 gic_v2m_init(struct gic_v2m_frame *frame, device_t dev, uint32_t frame_id)
366 {
367 	struct arm_pci_msi *msi = &frame->frame_msi;
368 
369 	msi->msi_dev = dev;
370 	msi->msi_priv = frame;
371 	msi->msi_alloc = gic_v2m_msi_alloc;
372 	msi->msix_alloc = gic_v2m_msix_alloc;
373 	msi->msi_intr_establish = gic_v2m_msi_intr_establish;
374 	msi->msi_intr_release = gic_v2m_msi_intr_release;
375 
376 	return arm_pci_msi_add(msi);
377 }
378