xref: /netbsd-src/sys/arch/arm/cortex/gicv3_its.c (revision 68fd9b308d37e436734ee045b651be7a192ee739)
1 /* $NetBSD: gicv3_its.c,v 1.40 2024/12/15 11:24:14 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jared McNeill <jmcneill@invisible.ca>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #define _INTR_PRIVATE
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: gicv3_its.c,v 1.40 2024/12/15 11:24:14 jmcneill Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/kmem.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/bitops.h>
42 
43 #include <uvm/uvm.h>
44 
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 #include <machine/cpufunc.h>
49 
50 #include <arm/pic/picvar.h>
51 #include <arm/cortex/gicv3_its.h>
52 
53 #ifdef ITS_DEBUG
54 #define DPRINTF(x)	printf x
55 #else
56 #define DPRINTF(x)	__nothing
57 #endif
58 
59 /*
60  * ITS translation table sizes
61  */
62 #define	GITS_COMMANDS_SIZE	0x1000
63 #define	GITS_COMMANDS_ALIGN	0x10000
64 
65 #define	GITS_ITT_ALIGN		0x100
66 
67 #define	GITS_INDIRECT_ENTRY_SIZE	8
68 
69 /*
70  * IIDR values used for errata
71  */
72 #define GITS_IIDR_PID_CAVIUM_THUNDERX	0xa1
73 #define GITS_IIDR_IMP_CAVIUM		0x34c
74 #define	GITS_IIDR_CAVIUM_ERRATA_MASK	(GITS_IIDR_Implementor|GITS_IIDR_ProductID|GITS_IIDR_Variant)
75 #define	GITS_IIDR_CAVIUM_ERRATA_VALUE							\
76 		(__SHIFTIN(GITS_IIDR_IMP_CAVIUM, GITS_IIDR_Implementor) |		\
77 		 __SHIFTIN(GITS_IIDR_PID_CAVIUM_THUNDERX, GITS_IIDR_ProductID) |	\
78 		 __SHIFTIN(0, GITS_IIDR_Variant))
79 
80 static const char * gits_cache_type[] = {
81 	[GITS_Cache_DEVICE_nGnRnE]	= "Device-nGnRnE",
82 	[GITS_Cache_NORMAL_NC]		= "Non-cacheable",
83 	[GITS_Cache_NORMAL_RA_WT]	= "Cacheable RA WT",
84 	[GITS_Cache_NORMAL_RA_WB]	= "Cacheable RA WB",
85 	[GITS_Cache_NORMAL_WA_WT]	= "Cacheable WA WT",
86 	[GITS_Cache_NORMAL_WA_WB]	= "Cacheable WA WB",
87 	[GITS_Cache_NORMAL_RA_WA_WT]	= "Cacheable RA WA WT",
88 	[GITS_Cache_NORMAL_RA_WA_WB]	= "Cacheable RA WA WB",
89 };
90 
91 static const char * gits_share_type[] = {
92 	[GITS_Shareability_NS]		= "Non-shareable",
93 	[GITS_Shareability_IS]		= "Inner shareable",
94 	[GITS_Shareability_OS]		= "Outer shareable",
95 	[3]				= "(Reserved)",
96 };
97 
98 static inline uint32_t
99 gits_read_4(struct gicv3_its *its, bus_size_t reg)
100 {
101 	return bus_space_read_4(its->its_bst, its->its_bsh, reg);
102 }
103 
104 static inline void
105 gits_write_4(struct gicv3_its *its, bus_size_t reg, uint32_t val)
106 {
107 	bus_space_write_4(its->its_bst, its->its_bsh, reg, val);
108 }
109 
110 static inline uint64_t
111 gits_read_8(struct gicv3_its *its, bus_size_t reg)
112 {
113 	return bus_space_read_8(its->its_bst, its->its_bsh, reg);
114 }
115 
116 static inline void
117 gits_write_8(struct gicv3_its *its, bus_size_t reg, uint64_t val)
118 {
119 	bus_space_write_8(its->its_bst, its->its_bsh, reg, val);
120 }
121 
122 static int
123 gits_command(struct gicv3_its *its, const struct gicv3_its_command *cmd)
124 {
125 	uint64_t cwriter, creadr;
126 	u_int woff;
127 
128 	creadr = gits_read_8(its, GITS_CREADR);
129 	if (ISSET(creadr, GITS_CREADR_Stalled)) {
130 		DPRINTF(("ITS: stalled! GITS_CREADR = 0x%lx\n", creadr));
131 		return EIO;
132 	}
133 
134 	cwriter = gits_read_8(its, GITS_CWRITER);
135 	woff = cwriter & GITS_CWRITER_Offset;
136 
137 	uint64_t *dw = (uint64_t *)(its->its_cmd.base + woff);
138 	for (int i = 0; i < __arraycount(cmd->dw); i++) {
139 		dw[i] = htole64(cmd->dw[i]);
140 		DPRINTF(("ITS:     dw[%u] = 0x%016lx\n", i, cmd->dw[i]));
141 	}
142 
143 	if (its->its_cmd_flush) {
144 		cpu_dcache_wb_range((vaddr_t)dw, sizeof(cmd->dw));
145 	}
146 	dsb(sy);
147 
148 	woff += sizeof(cmd->dw);
149 	if (woff == its->its_cmd.len)
150 		woff = 0;
151 
152 	gits_write_8(its, GITS_CWRITER, woff);
153 
154 	return 0;
155 }
156 
157 static int
158 gits_command_mapc(struct gicv3_its *its, uint16_t icid, uint64_t rdbase, bool v)
159 {
160 	struct gicv3_its_command cmd;
161 
162 	KASSERT((rdbase & 0xffff) == 0);
163 
164 	/*
165 	 * Map a collection table entry (ICID) to the target redistributor (RDbase).
166 	 */
167 	memset(&cmd, 0, sizeof(cmd));
168 	cmd.dw[0] = GITS_CMD_MAPC;
169 	cmd.dw[2] = icid;
170 	if (v) {
171 		cmd.dw[2] |= rdbase;
172 		cmd.dw[2] |= __BIT(63);
173 	}
174 
175 	DPRINTF(("ITS #%u: MAPC icid 0x%x rdbase 0x%lx valid %u\n",
176 	    its->its_id, icid, rdbase, v));
177 
178 	return gits_command(its, &cmd);
179 }
180 
181 static int
182 gits_command_mapd(struct gicv3_its *its, uint32_t deviceid, uint64_t itt_addr, u_int size, bool v)
183 {
184 	struct gicv3_its_command cmd;
185 
186 	KASSERT((itt_addr & 0xff) == 0);
187 
188 	/*
189 	 * Map a device table entry (DeviceID) to its associated ITT (ITT_addr).
190 	 */
191 	memset(&cmd, 0, sizeof(cmd));
192 	cmd.dw[0] = GITS_CMD_MAPD | ((uint64_t)deviceid << 32);
193 	if (v) {
194 		cmd.dw[1] = uimax(1, size) - 1;
195 		cmd.dw[2] = itt_addr | __BIT(63);
196 	}
197 
198 	DPRINTF(("ITS #%u: MAPD deviceid 0x%x itt_addr 0x%lx size %u valid %u\n",
199 	    its->its_id, deviceid, itt_addr, size, v));
200 
201 	return gits_command(its, &cmd);
202 }
203 
204 static int
205 gits_command_mapti(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint32_t pintid, uint16_t icid)
206 {
207 	struct gicv3_its_command cmd;
208 
209 	/*
210 	 * Map the event defined by EventID and DeviceID to its associated ITE, defined by ICID and pINTID
211 	 * in the ITT associated with DeviceID.
212 	 */
213 	memset(&cmd, 0, sizeof(cmd));
214 	cmd.dw[0] = GITS_CMD_MAPTI | ((uint64_t)deviceid << 32);
215 	cmd.dw[1] = eventid | ((uint64_t)pintid << 32);
216 	cmd.dw[2] = icid;
217 
218 	DPRINTF(("ITS #%u: MAPTI deviceid 0x%x eventid 0x%x pintid 0x%x icid 0x%x\n",
219 	    its->its_id, deviceid, eventid, pintid, icid));
220 
221 	return gits_command(its, &cmd);
222 }
223 
224 static int
225 gits_command_movi(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid, uint16_t icid)
226 {
227 	struct gicv3_its_command cmd;
228 
229 	/*
230 	 * Update the ICID field in the ITT entry for the event defined by DeviceID and
231 	 * EventID.
232 	 */
233 	memset(&cmd, 0, sizeof(cmd));
234 	cmd.dw[0] = GITS_CMD_MOVI | ((uint64_t)deviceid << 32);
235 	cmd.dw[1] = eventid;
236 	cmd.dw[2] = icid;
237 
238 	DPRINTF(("ITS #%u: MOVI deviceid 0x%x eventid 0x%x icid 0x%x\n",
239 	    its->its_id, deviceid, eventid, icid));
240 
241 	return gits_command(its, &cmd);
242 }
243 
244 static int
245 gits_command_inv(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid)
246 {
247 	struct gicv3_its_command cmd;
248 
249 	/*
250 	 * Ensure any caching in the redistributors associated with the specified
251 	 * EventID is consistent with the LPI configuration tables.
252 	 */
253 	memset(&cmd, 0, sizeof(cmd));
254 	cmd.dw[0] = GITS_CMD_INV | ((uint64_t)deviceid << 32);
255 	cmd.dw[1] = eventid;
256 
257 	DPRINTF(("ITS #%u: INV deviceid 0x%x eventid 0x%x\n",
258 	    its->its_id, deviceid, eventid));
259 
260 	return gits_command(its, &cmd);
261 }
262 
263 static int
264 gits_command_invall(struct gicv3_its *its, uint16_t icid)
265 {
266 	struct gicv3_its_command cmd;
267 
268 	/*
269 	 * Ensure any caching associated with this ICID is consistent with LPI
270 	 * configuration tables for all redistributors.
271 	 */
272 	memset(&cmd, 0, sizeof(cmd));
273 	cmd.dw[0] = GITS_CMD_INVALL;
274 	cmd.dw[2] = icid;
275 
276 	DPRINTF(("ITS #%u: INVALL icid 0x%x\n", its->its_id, icid));
277 
278 	return gits_command(its, &cmd);
279 }
280 
281 static int
282 gits_command_sync(struct gicv3_its *its, uint64_t rdbase)
283 {
284 	struct gicv3_its_command cmd;
285 
286 	KASSERT((rdbase & 0xffff) == 0);
287 
288 	/*
289 	 * Ensure all outstanding ITS operations associated with physical interrupts
290 	 * for the specified redistributor (RDbase) are globally observed before
291 	 * further ITS commands are executed.
292 	 */
293 	memset(&cmd, 0, sizeof(cmd));
294 	cmd.dw[0] = GITS_CMD_SYNC;
295 	cmd.dw[2] = rdbase;
296 
297 	DPRINTF(("ITS #%u: SYNC rdbase 0x%lx\n", its->its_id, rdbase));
298 
299 	return gits_command(its, &cmd);
300 }
301 
302 #if 0
303 static int
304 gits_command_int(struct gicv3_its *its, uint32_t deviceid, uint32_t eventid)
305 {
306 	struct gicv3_its_command cmd;
307 
308 	/*
309 	 * Translate the deviceid and eventid into an icid and pintid through
310 	 * the device table and ITT. Mark the pintid as pending
311 	 * on the redistributor.
312 	 * If the interrupt is not configured the command queue stalls.
313 	 */
314 	memset(&cmd, 0, sizeof(cmd));
315 	cmd.dw[0] = GITS_CMD_INT | ((uint64_t)deviceid << 32);
316 	cmd.dw[1] = eventid;
317 
318 	DPRINTF(("ITS #%u: INT deviceid 0x%x eventid 0x%x\n",
319 	    its->its_id, deviceid, eventid));
320 
321 	return gits_command(its, &cmd);
322 }
323 #endif
324 
325 static int
326 gits_wait(struct gicv3_its *its)
327 {
328 	u_int woff, roff;
329 	int retry = 100000;
330 
331 	/*
332 	 * The ITS command queue is empty when CWRITER and CREADR specify the
333 	 * same base address offset value.
334 	 */
335 	for (retry = 1000; retry > 0; retry--) {
336 		woff = gits_read_8(its, GITS_CWRITER) & GITS_CWRITER_Offset;
337 		roff = gits_read_8(its, GITS_CREADR) & GITS_CREADR_Offset;
338 		if (woff == roff)
339 			break;
340 		delay(100);
341 	}
342 	if (retry == 0) {
343 		device_printf(its->its_gic->sc_dev,
344 		    "ITS command queue timeout! CREADR=0x%lx CWRITER=0x%lx\n",
345 		    gits_read_8(its, GITS_CREADR), gits_read_8(its, GITS_CWRITER));
346 		return ETIMEDOUT;
347 	}
348 
349 	return 0;
350 }
351 
352 static int
353 gicv3_its_msi_alloc_lpi(struct gicv3_its *its,
354     const struct pci_attach_args *pa)
355 {
356 	struct pci_attach_args *new_pa;
357 	vmem_addr_t n;
358 
359 	KASSERT(its->its_gic->sc_lpi_pool != NULL);
360 
361 	if (vmem_alloc(its->its_gic->sc_lpi_pool, 1, VM_INSTANTFIT|VM_SLEEP, &n) != 0)
362 		return -1;
363 
364 	KASSERT(its->its_pa[n] == NULL);
365 
366 	new_pa = kmem_alloc(sizeof(*new_pa), KM_SLEEP);
367 	memcpy(new_pa, pa, sizeof(*new_pa));
368 	its->its_pa[n] = new_pa;
369 	return n + its->its_pic->pic_irqbase;
370 }
371 
372 static void
373 gicv3_its_msi_free_lpi(struct gicv3_its *its, int lpi)
374 {
375 	struct pci_attach_args *pa;
376 
377 	KASSERT(its->its_gic->sc_lpi_pool != NULL);
378 	KASSERT(lpi >= its->its_pic->pic_irqbase);
379 
380 	pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
381 	its->its_pa[lpi - its->its_pic->pic_irqbase] = NULL;
382 	kmem_free(pa, sizeof(*pa));
383 
384 	vmem_free(its->its_gic->sc_lpi_pool, lpi - its->its_pic->pic_irqbase, 1);
385 }
386 
387 static uint32_t
388 gicv3_its_devid(pci_chipset_tag_t pc, pcitag_t tag)
389 {
390 	uint32_t devid;
391 	int b, d, f;
392 
393 	pci_decompose_tag(pc, tag, &b, &d, &f);
394 
395 	devid = (b << 8) | (d << 3) | f;
396 
397 	return pci_get_devid(pc, devid);
398 }
399 
400 static int
401 gicv3_its_device_map(struct gicv3_its *its, uint32_t devid, u_int count)
402 {
403 	struct gicv3_its_device *dev;
404 	struct gicv3_its_table *itstab = &its->its_tab_device;
405 	u_int vectors;
406 	int error;
407 
408 	vectors = MAX(2, count);
409 	while (!powerof2(vectors))
410 		vectors++;
411 
412 	const uint64_t typer = gits_read_8(its, GITS_TYPER);
413 	const u_int itt_entry_size = __SHIFTOUT(typer, GITS_TYPER_ITT_entry_size) + 1;
414 	const u_int itt_size = roundup(uimax(vectors, 2) * itt_entry_size, GITS_ITT_ALIGN);
415 
416 	LIST_FOREACH(dev, &its->its_devices, dev_list)
417 		if (dev->dev_id == devid) {
418 			return itt_size <= dev->dev_size ? 0 : EEXIST;
419 		}
420 
421 	if (itstab->tab_indirect) {
422 		/* Need to allocate the L2 table. */
423 		uint64_t *l1_tab = itstab->tab_l1;
424 		struct gicv3_its_page_table *pt;
425 		const u_int index = devid / itstab->tab_l2_num_ids;
426 
427 		pt = kmem_alloc(sizeof(*pt), KM_SLEEP);
428 		pt->pt_dev_id = devid;
429 		gicv3_dma_alloc(its->its_gic, &pt->pt_dma, itstab->tab_l2_entry_size,
430 		    itstab->tab_page_size);
431 		LIST_INSERT_HEAD(&itstab->tab_pt, pt, pt_list);
432 
433 		if (!itstab->tab_shareable) {
434 			cpu_dcache_wb_range((vaddr_t)pt->pt_dma.base,
435 			    itstab->tab_l2_entry_size);
436 		}
437 		l1_tab[index] = pt->pt_dma.segs[0].ds_addr | GITS_BASER_Valid;
438 		if (!itstab->tab_shareable) {
439 			cpu_dcache_wb_range((vaddr_t)&l1_tab[index],
440 			    sizeof(l1_tab[index]));
441 		}
442 		dsb(sy);
443 
444 		DPRINTF(("ITS: Allocated L2 entry at index %u for devid 0x%x\n",
445 		    index, devid));
446 	}
447 
448 	dev = kmem_alloc(sizeof(*dev), KM_SLEEP);
449 	dev->dev_id = devid;
450 	dev->dev_size = itt_size;
451 	gicv3_dma_alloc(its->its_gic, &dev->dev_itt, itt_size, GITS_ITT_ALIGN);
452 	LIST_INSERT_HEAD(&its->its_devices, dev, dev_list);
453 
454 	if (its->its_cmd_flush) {
455 		cpu_dcache_wb_range((vaddr_t)dev->dev_itt.base, itt_size);
456 	}
457 	dsb(sy);
458 
459 	/*
460 	 * Map the device to the ITT
461 	 */
462 	const u_int size = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
463 	mutex_enter(its->its_lock);
464 	error = gits_command_mapd(its, devid, dev->dev_itt.segs[0].ds_addr, size, true);
465 	if (error == 0) {
466 		error = gits_wait(its);
467 	}
468 	mutex_exit(its->its_lock);
469 
470 	return error;
471 }
472 
473 static void
474 gicv3_its_msi_enable(struct gicv3_its *its, int lpi, int count)
475 {
476 	const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
477 	pci_chipset_tag_t pc = pa->pa_pc;
478 	pcitag_t tag = pa->pa_tag;
479 	pcireg_t ctl;
480 	int off;
481 
482 	if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
483 		panic("gicv3_its_msi_enable: device is not MSI-capable");
484 
485 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
486 	ctl &= ~PCI_MSI_CTL_MME_MASK;
487 	ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK);
488 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
489 
490 	const uint64_t addr = its->its_base + GITS_TRANSLATER;
491 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
492 	if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
493 		pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO,
494 		    addr & 0xffffffff);
495 		pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI,
496 		    (addr >> 32) & 0xffffffff);
497 		pci_conf_write(pc, tag, off + PCI_MSI_MDATA64,
498 		    lpi - its->its_pic->pic_irqbase);
499 	} else {
500 		KASSERT((addr >> 32) == 0);
501 		pci_conf_write(pc, tag, off + PCI_MSI_MADDR,
502 		    addr & 0xffffffff);
503 		pci_conf_write(pc, tag, off + PCI_MSI_MDATA,
504 		    lpi - its->its_pic->pic_irqbase);
505 	}
506 	ctl |= PCI_MSI_CTL_MSI_ENABLE;
507 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
508 }
509 
510 static void
511 gicv3_its_msi_disable(struct gicv3_its *its, int lpi)
512 {
513 	const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
514 	pci_chipset_tag_t pc = pa->pa_pc;
515 	pcitag_t tag = pa->pa_tag;
516 	pcireg_t ctl;
517 	int off;
518 
519 	if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
520 		panic("gicv3_its_msi_enable: device is not MSI-capable");
521 
522 	ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
523 	ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
524 	pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
525 }
526 
527 static void
528 gicv3_its_msix_enable(struct gicv3_its *its, int lpi, int msix_vec,
529     bus_space_tag_t bst, bus_space_handle_t bsh)
530 {
531 	const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
532 	pci_chipset_tag_t pc = pa->pa_pc;
533 	pcitag_t tag = pa->pa_tag;
534 	pcireg_t ctl;
535 	uint32_t val;
536 	int off;
537 
538 	if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
539 		panic("gicv3_its_msix_enable: device is not MSI-X-capable");
540 
541 	const uint64_t addr = its->its_base + GITS_TRANSLATER;
542 	const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
543 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr);
544 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32));
545 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, lpi - its->its_pic->pic_irqbase);
546 	val = bus_space_read_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
547 	val &= ~PCI_MSIX_VECTCTL_MASK;
548 	bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, val);
549 
550 	ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
551 	ctl |= PCI_MSIX_CTL_ENABLE;
552 	pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
553 }
554 
555 static void
556 gicv3_its_msix_disable(struct gicv3_its *its, int lpi)
557 {
558 	const struct pci_attach_args *pa = its->its_pa[lpi - its->its_pic->pic_irqbase];
559 	pci_chipset_tag_t pc = pa->pa_pc;
560 	pcitag_t tag = pa->pa_tag;
561 	pcireg_t ctl;
562 	int off;
563 
564 	if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
565 		panic("gicv3_its_msix_disable: device is not MSI-X-capable");
566 
567 	ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
568 	ctl &= ~PCI_MSIX_CTL_ENABLE;
569 	pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
570 }
571 
572 static pci_intr_handle_t *
573 gicv3_its_msi_alloc(struct arm_pci_msi *msi, int *count,
574     const struct pci_attach_args *pa, bool exact)
575 {
576 	struct gicv3_its * const its = msi->msi_priv;
577 	struct cpu_info * const ci = cpu_lookup(0);
578 	pci_intr_handle_t *vectors;
579 	int n, off, error;
580 
581 	if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL))
582 		return NULL;
583 
584 	const uint64_t typer = gits_read_8(its, GITS_TYPER);
585 	const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
586 	if (*count == 0 || *count > (1 << id_bits))
587 		return NULL;
588 
589 	const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
590 
591 	if (gicv3_its_device_map(its, devid, *count) != 0)
592 		return NULL;
593 
594 	vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
595 	mutex_enter(its->its_lock);
596 	for (n = 0; n < *count; n++) {
597 		const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
598 		KASSERT(lpi >= 0);
599 		vectors[n] = ARM_PCI_INTR_MSI |
600 		    __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
601 		    __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) |
602 		    __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
603 
604 		if (n == 0)
605 			gicv3_its_msi_enable(its, lpi, *count);
606 
607 		/*
608 		 * Record devid and target PE
609 		 */
610 		its->its_devid[lpi - its->its_pic->pic_irqbase] = devid;
611 		its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
612 
613 		/*
614 		 * Map event
615 		 */
616 		gits_command_mapti(its, devid, lpi - its->its_pic->pic_irqbase, lpi, cpu_index(ci));
617 		gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
618 	}
619 	error = gits_wait(its);
620 	mutex_exit(its->its_lock);
621 
622 	if (error != 0) {
623 		kmem_free(vectors, sizeof(*vectors) * *count);
624 		vectors = NULL;
625 	}
626 
627 	return vectors;
628 }
629 
630 static pci_intr_handle_t *
631 gicv3_its_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count,
632     const struct pci_attach_args *pa, bool exact)
633 {
634 	struct gicv3_its * const its = msi->msi_priv;
635 	struct cpu_info *ci = cpu_lookup(0);
636 	pci_intr_handle_t *vectors;
637 	bus_space_tag_t bst;
638 	bus_space_handle_t bsh;
639 	bus_size_t bsz;
640 	uint32_t table_offset, table_size;
641 	int n, off, bar, error;
642 	pcireg_t tbl;
643 
644 	if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL))
645 		return NULL;
646 
647 	const uint64_t typer = gits_read_8(its, GITS_TYPER);
648 	const u_int id_bits = __SHIFTOUT(typer, GITS_TYPER_ID_bits) + 1;
649 	if (*count == 0 || *count > (1 << id_bits))
650 		return NULL;
651 
652 	tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET);
653 	bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_TBLBIR_MASK));
654 	table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
655 	table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE;
656 	if (table_size == 0)
657 		return NULL;
658 
659 	error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar),
660 	    BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset,
661 	    &bst, &bsh, NULL, &bsz);
662 	if (error)
663 		return NULL;
664 
665 	const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
666 
667 	if (gicv3_its_device_map(its, devid, *count) != 0) {
668 		bus_space_unmap(bst, bsh, bsz);
669 		return NULL;
670 	}
671 
672 	vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
673 	mutex_enter(its->its_lock);
674 	for (n = 0; n < *count; n++) {
675 		const int lpi = gicv3_its_msi_alloc_lpi(its, pa);
676 		KASSERT(lpi >= 0);
677 		const int msix_vec = table_indexes ? table_indexes[n] : n;
678 		vectors[msix_vec] = ARM_PCI_INTR_MSIX |
679 		    __SHIFTIN(lpi, ARM_PCI_INTR_IRQ) |
680 		    __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) |
681 		    __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
682 
683 		gicv3_its_msix_enable(its, lpi, msix_vec, bst, bsh);
684 
685 		/*
686 		 * Record devid and target PE
687 		 */
688 		its->its_devid[lpi - its->its_pic->pic_irqbase] = devid;
689 		its->its_targets[lpi - its->its_pic->pic_irqbase] = ci;
690 
691 		/*
692 		 * Map event
693 		 */
694 		gits_command_mapti(its, devid, lpi - its->its_pic->pic_irqbase, lpi, cpu_index(ci));
695 		gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
696 	}
697 	gits_wait(its);
698 	mutex_exit(its->its_lock);
699 
700 	bus_space_unmap(bst, bsh, bsz);
701 
702 	return vectors;
703 }
704 
705 static void *
706 gicv3_its_msi_intr_establish(struct arm_pci_msi *msi,
707     pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname)
708 {
709 	struct gicv3_its * const its = msi->msi_priv;
710 	void *intrh;
711 
712 	const int lpi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ);
713 	const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0;
714 
715 	intrh = pic_establish_intr(its->its_pic, lpi - its->its_pic->pic_irqbase, ipl,
716 	    IST_EDGE | mpsafe, func, arg, xname);
717 	if (intrh == NULL)
718 		return NULL;
719 
720 	/* Invalidate LPI configuration tables */
721 	KASSERT(its->its_pa[lpi - its->its_pic->pic_irqbase] != NULL);
722 	const uint32_t devid = its->its_devid[lpi - its->its_pic->pic_irqbase];
723 	gits_command_inv(its, devid, lpi - its->its_pic->pic_irqbase);
724 
725 	return intrh;
726 }
727 
728 static void
729 gicv3_its_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih,
730     int count)
731 {
732 	struct gicv3_its * const its = msi->msi_priv;
733 	int n;
734 
735 	for (n = 0; n < count; n++) {
736 		const int lpi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ);
737 		KASSERT(lpi >= its->its_pic->pic_irqbase);
738 		if (pih[n] & ARM_PCI_INTR_MSIX)
739 			gicv3_its_msix_disable(its, lpi);
740 		if (pih[n] & ARM_PCI_INTR_MSI)
741 			gicv3_its_msi_disable(its, lpi);
742 		gicv3_its_msi_free_lpi(its, lpi);
743 		its->its_targets[lpi - its->its_pic->pic_irqbase] = NULL;
744 		its->its_devid[lpi - its->its_pic->pic_irqbase] = 0;
745 		struct intrsource * const is =
746 		    its->its_pic->pic_sources[lpi - its->its_pic->pic_irqbase];
747 		if (is != NULL)
748 			pic_disestablish_source(is);
749 	}
750 }
751 
752 static void
753 gicv3_its_command_init(struct gicv3_softc *sc, struct gicv3_its *its)
754 {
755 	uint64_t cbaser, tmp;
756 
757 	gicv3_dma_alloc(sc, &its->its_cmd, GITS_COMMANDS_SIZE, GITS_COMMANDS_ALIGN);
758 	if (its->its_cmd_flush) {
759 		cpu_dcache_wb_range((vaddr_t)its->its_cmd.base, GITS_COMMANDS_SIZE);
760 	}
761 	dsb(sy);
762 
763 	KASSERT((gits_read_4(its, GITS_CTLR) & GITS_CTLR_Enabled) == 0);
764 	KASSERT((gits_read_4(its, GITS_CTLR) & GITS_CTLR_Quiescent) != 0);
765 
766 	cbaser = its->its_cmd.segs[0].ds_addr;
767 	cbaser |= __SHIFTIN((its->its_cmd.len / 4096) - 1, GITS_CBASER_Size);
768 	cbaser |= GITS_CBASER_Valid;
769 
770 	cbaser |= __SHIFTIN(GITS_Cache_NORMAL_WA_WB, GITS_CBASER_InnerCache);
771 	cbaser |= __SHIFTIN(GITS_Shareability_IS, GITS_CBASER_Shareability);
772 	gits_write_8(its, GITS_CBASER, cbaser);
773 
774 	tmp = gits_read_8(its, GITS_CBASER);
775 	if (__SHIFTOUT(tmp, GITS_CBASER_Shareability) != GITS_Shareability_IS) {
776 		if (__SHIFTOUT(tmp, GITS_CBASER_InnerCache) == GITS_Shareability_NS) {
777 			cbaser &= ~GITS_CBASER_InnerCache;
778 			cbaser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_CBASER_InnerCache);
779 			cbaser &= ~GITS_CBASER_Shareability;
780 			cbaser |= __SHIFTIN(GITS_Shareability_NS, GITS_CBASER_Shareability);
781 			gits_write_8(its, GITS_CBASER, cbaser);
782 		}
783 
784 		its->its_cmd_flush = true;
785 	}
786 	aprint_normal_dev(sc->sc_dev, "ITS command table @ %#lx/%#lx, %s, %s\n",
787 	    its->its_cmd.segs[0].ds_addr, its->its_cmd.len,
788 	    gits_cache_type[__SHIFTOUT(cbaser, GITS_BASER_InnerCache)],
789 	    gits_share_type[__SHIFTOUT(cbaser, GITS_BASER_Shareability)]);
790 
791 	gits_write_8(its, GITS_CWRITER, 0);
792 }
793 
794 static void
795 gicv3_its_table_params(struct gicv3_softc *sc, struct gicv3_its *its,
796     u_int *devbits, u_int *innercache, u_int *share)
797 {
798 
799 	const uint64_t typer = gits_read_8(its, GITS_TYPER);
800 	const uint32_t iidr = gits_read_4(its, GITS_IIDR);
801 
802 	/* Default values */
803 	*devbits = __SHIFTOUT(typer, GITS_TYPER_Devbits) + 1;
804 	*innercache = GITS_Cache_NORMAL_WA_WB;
805 	*share = GITS_Shareability_IS;
806 
807 	/* Cavium ThunderX errata */
808 	if ((iidr & GITS_IIDR_CAVIUM_ERRATA_MASK) == GITS_IIDR_CAVIUM_ERRATA_VALUE) {
809 		*devbits = 20;		/* 8Mb */
810 		*innercache = GITS_Cache_DEVICE_nGnRnE;
811 		aprint_normal_dev(sc->sc_dev, "Cavium ThunderX errata detected\n");
812 	}
813 }
814 
815 static bool
816 gicv3_its_table_probe_indirect(struct gicv3_its *its, int tab)
817 {
818 	uint64_t baser;
819 
820 	baser = gits_read_8(its, GITS_BASERn(tab));
821 	baser |= GITS_BASER_Indirect;
822 	gits_write_8(its, GITS_BASERn(tab), baser);
823 
824 	baser = gits_read_8(its, GITS_BASERn(tab));
825 
826 	return (baser & GITS_BASER_Indirect) != 0;
827 }
828 
829 static void
830 gicv3_its_table_init(struct gicv3_softc *sc, struct gicv3_its *its)
831 {
832 	u_int page_size, table_align;
833 	u_int devbits, innercache, share;
834 	const char *table_type;
835 	uint64_t baser;
836 	int tab;
837 
838 	gicv3_its_table_params(sc, its, &devbits, &innercache, &share);
839 
840 	DPRINTF(("ITS: devbits = %u\n", devbits));
841 
842 	for (tab = 0; tab < 8; tab++) {
843 		struct gicv3_its_table *itstab;
844 		bool indirect = false;
845 		uint64_t l1_entry_size, l2_entry_size;
846 		uint64_t l1_num_ids, l2_num_ids;
847 		uint64_t table_size;
848 
849 		baser = gits_read_8(its, GITS_BASERn(tab));
850 
851 		l1_entry_size = __SHIFTOUT(baser, GITS_BASER_Entry_Size) + 1;
852 		l2_entry_size = 0;
853 		l2_num_ids = 0;
854 
855 		switch (__SHIFTOUT(baser, GITS_BASER_Page_Size)) {
856 		case GITS_Page_Size_64KB:
857 			page_size = 65536;
858 			break;
859 		case GITS_Page_Size_16KB:
860 			page_size = 16384;
861 			break;
862 		case GITS_Page_Size_4KB:
863 		default:
864 			page_size = 4096;
865 		}
866 		table_align = page_size;
867 
868 		switch (__SHIFTOUT(baser, GITS_BASER_Type)) {
869 		case GITS_Type_Devices:
870 			/*
871 			 * Table size scales with the width of the DeviceID.
872 			 */
873 			l1_num_ids = 1ULL << devbits;
874 			DPRINTF(("ITS: l1_num_ids = %lu\n", l1_num_ids));
875 			indirect =
876 			    gicv3_its_table_probe_indirect(its, tab);
877 			if (indirect) {
878 				DPRINTF(("ITS: indirect\n"));
879 				l2_entry_size = l1_entry_size;
880 				l2_num_ids = page_size / l2_entry_size;
881 				l1_num_ids = l1_num_ids / l2_num_ids;
882 				l1_entry_size = GITS_INDIRECT_ENTRY_SIZE;
883 			}
884 			table_size = roundup2(l1_entry_size * l1_num_ids, page_size);
885 			if (howmany(table_size, page_size) > GITS_BASER_Size + 1) {
886 				DPRINTF(("ITS: clamp table size 0x%lx -> ", table_size));
887 				table_size = (GITS_BASER_Size + 1) * page_size;
888 				DPRINTF(("0x%lx\n", table_size));
889 			}
890 			table_type = "Devices";
891 
892 			DPRINTF(("ITS: table_size is 0x%lx\n", table_size));
893 
894 			itstab = &its->its_tab_device;
895 			itstab->tab_page_size = page_size;
896 			itstab->tab_l1_entry_size = l1_entry_size;
897 			itstab->tab_l1_num_ids = l1_num_ids;
898 			itstab->tab_l2_entry_size = l2_entry_size;
899 			itstab->tab_l2_num_ids = l2_num_ids;
900 			itstab->tab_indirect = indirect;
901 			LIST_INIT(&itstab->tab_pt);
902 			break;
903 		case GITS_Type_InterruptCollections:
904 			/*
905 			 * Allocate space for one interrupt collection per CPU.
906 			 */
907 			table_size = roundup(l1_entry_size * ncpu, page_size);
908 			table_type = "Collections";
909 			break;
910 		default:
911 			table_size = 0;
912 			break;
913 		}
914 
915 		if (table_size == 0)
916 			continue;
917 
918 		gicv3_dma_alloc(sc, &its->its_tab[tab], table_size, table_align);
919 		if (its->its_cmd_flush) {
920 			cpu_dcache_wb_range((vaddr_t)its->its_tab[tab].base, table_size);
921 		}
922 		dsb(sy);
923 
924 		baser &= ~GITS_BASER_Size;
925 		baser |= __SHIFTIN(howmany(table_size, page_size) - 1, GITS_BASER_Size);
926 		baser &= ~GITS_BASER_Physical_Address;
927 		baser |= its->its_tab[tab].segs[0].ds_addr;
928 		baser &= ~GITS_BASER_InnerCache;
929 		baser |= __SHIFTIN(innercache, GITS_BASER_InnerCache);
930 		baser &= ~GITS_BASER_Shareability;
931 		baser |= __SHIFTIN(share, GITS_BASER_Shareability);
932 		baser |= GITS_BASER_Valid;
933 		if (indirect) {
934 			baser |= GITS_BASER_Indirect;
935 		} else {
936 			baser &= ~GITS_BASER_Indirect;
937 		}
938 
939 		gits_write_8(its, GITS_BASERn(tab), baser);
940 
941 		baser = gits_read_8(its, GITS_BASERn(tab));
942 		if (__SHIFTOUT(baser, GITS_BASER_Shareability) == GITS_Shareability_NS) {
943 			baser &= ~GITS_BASER_InnerCache;
944 			baser |= __SHIFTIN(GITS_Cache_NORMAL_NC, GITS_BASER_InnerCache);
945 
946 			gits_write_8(its, GITS_BASERn(tab), baser);
947 		}
948 
949 		baser = gits_read_8(its, GITS_BASERn(tab));
950 		aprint_normal_dev(sc->sc_dev, "ITS [#%d] %s table @ %#lx/%#lx, %s, %s%s\n",
951 		    tab, table_type, its->its_tab[tab].segs[0].ds_addr, table_size,
952 		    gits_cache_type[__SHIFTOUT(baser, GITS_BASER_InnerCache)],
953 		    gits_share_type[__SHIFTOUT(baser, GITS_BASER_Shareability)],
954 		    indirect ? ", indirect" : "");
955 
956 		if (__SHIFTOUT(baser, GITS_BASER_Type) == GITS_Type_Devices) {
957 			its->its_tab_device.tab_l1 = its->its_tab[tab].base;
958 			its->its_tab_device.tab_shareable =
959 			    __SHIFTOUT(baser, GITS_BASER_Shareability) != GITS_Shareability_NS;
960 		}
961 
962 	}
963 }
964 
965 static void
966 gicv3_its_enable(struct gicv3_softc *sc, struct gicv3_its *its)
967 {
968 	uint32_t ctlr;
969 
970 	ctlr = gits_read_4(its, GITS_CTLR);
971 	ctlr |= GITS_CTLR_Enabled;
972 	gits_write_4(its, GITS_CTLR, ctlr);
973 }
974 
975 static void
976 gicv3_its_cpu_init(void *priv, struct cpu_info *ci)
977 {
978 	struct gicv3_its * const its = priv;
979 	struct gicv3_softc * const sc = its->its_gic;
980 	uint64_t rdbase;
981 	size_t irq;
982 
983 	const uint64_t typer = bus_space_read_8(sc->sc_bst, its->its_bsh, GITS_TYPER);
984 	if (typer & GITS_TYPER_PTA) {
985 		void *va = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_r[ci->ci_gic_redist]);
986 		rdbase = vtophys((vaddr_t)va);
987 	} else {
988 		rdbase = (uint64_t)sc->sc_processor_id[cpu_index(ci)] << 16;
989 	}
990 	its->its_rdbase[cpu_index(ci)] = rdbase;
991 
992 	/*
993 	 * Map collection ID of this CPU's index to this CPU's redistributor.
994 	 */
995 	mutex_enter(its->its_lock);
996 	gits_command_mapc(its, cpu_index(ci), rdbase, true);
997 	gits_command_invall(its, cpu_index(ci));
998 	gits_wait(its);
999 
1000 	/*
1001 	 * Update routing for LPIs targetting this CPU
1002 	 */
1003 	for (irq = 0; irq < its->its_pic->pic_maxsources; irq++) {
1004 		if (its->its_targets[irq] != ci)
1005 			continue;
1006 		KASSERT(its->its_pa[irq] != NULL);
1007 
1008 		const uint32_t devid = its->its_devid[irq];
1009 		gits_command_movi(its, devid, irq, cpu_index(ci));
1010 		gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
1011 	}
1012 	gits_wait(its);
1013 	mutex_exit(its->its_lock);
1014 
1015 	its->its_cpuonline[cpu_index(ci)] = true;
1016 }
1017 
1018 static void
1019 gicv3_its_get_affinity(void *priv, size_t irq, kcpuset_t *affinity)
1020 {
1021 	struct gicv3_its * const its = priv;
1022 	struct cpu_info *ci;
1023 
1024 	ci = its->its_targets[irq];
1025 	if (ci)
1026 		kcpuset_set(affinity, cpu_index(ci));
1027 }
1028 
1029 static int
1030 gicv3_its_set_affinity(void *priv, size_t irq, const kcpuset_t *affinity)
1031 {
1032 	struct gicv3_its * const its = priv;
1033 	const struct pci_attach_args *pa;
1034 	struct cpu_info *ci;
1035 
1036 	const int set = kcpuset_countset(affinity);
1037 	if (set != 1)
1038 		return EINVAL;
1039 
1040 	pa = its->its_pa[irq];
1041 	if (pa == NULL)
1042 		return EPASSTHROUGH;
1043 
1044 	ci = cpu_lookup(kcpuset_ffs(affinity) - 1);
1045 	its->its_targets[irq] = ci;
1046 
1047 	if (its->its_cpuonline[cpu_index(ci)] == true) {
1048 		const uint32_t devid = gicv3_its_devid(pa->pa_pc, pa->pa_tag);
1049 		mutex_enter(its->its_lock);
1050 		gits_command_movi(its, devid, irq, cpu_index(ci));
1051 		gits_command_sync(its, its->its_rdbase[cpu_index(ci)]);
1052 		mutex_exit(its->its_lock);
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 int
1059 gicv3_its_init(struct gicv3_softc *sc, bus_space_handle_t bsh,
1060     uint64_t its_base, uint32_t its_id)
1061 {
1062 	struct gicv3_its *its;
1063 	struct arm_pci_msi *msi;
1064 
1065 	const uint64_t typer = bus_space_read_8(sc->sc_bst, bsh, GITS_TYPER);
1066 	if ((typer & GITS_TYPER_Physical) == 0)
1067 		return ENXIO;
1068 
1069 	its = kmem_zalloc(sizeof(*its), KM_SLEEP);
1070 	its->its_id = its_id;
1071 	its->its_bst = sc->sc_bst;
1072 	its->its_bsh = bsh;
1073 	its->its_dmat = sc->sc_dmat;
1074 	its->its_base = its_base;
1075 	its->its_pic = &sc->sc_lpi;
1076 	snprintf(its->its_pic->pic_name, sizeof(its->its_pic->pic_name), "gicv3-its");
1077 	KASSERT(its->its_pic->pic_maxsources > 0);
1078 	its->its_pa = kmem_zalloc(sizeof(struct pci_attach_args *) * its->its_pic->pic_maxsources, KM_SLEEP);
1079 	its->its_targets = kmem_zalloc(sizeof(struct cpu_info *) * its->its_pic->pic_maxsources, KM_SLEEP);
1080 	its->its_devid = kmem_zalloc(sizeof(uint32_t) * its->its_pic->pic_maxsources, KM_SLEEP);
1081 	its->its_gic = sc;
1082 	its->its_rdbase = kmem_zalloc(sizeof(*its->its_rdbase) * ncpu, KM_SLEEP);
1083 	its->its_cpuonline = kmem_zalloc(sizeof(*its->its_cpuonline) * ncpu, KM_SLEEP);
1084 	its->its_cb.cpu_init = gicv3_its_cpu_init;
1085 	its->its_cb.get_affinity = gicv3_its_get_affinity;
1086 	its->its_cb.set_affinity = gicv3_its_set_affinity;
1087 	its->its_cb.priv = its;
1088 	LIST_INIT(&its->its_devices);
1089 	LIST_INSERT_HEAD(&sc->sc_lpi_callbacks, &its->its_cb, list);
1090 	its->its_lock = mutex_obj_alloc(MUTEX_SPIN, IPL_NONE);
1091 
1092 	gicv3_its_command_init(sc, its);
1093 	gicv3_its_table_init(sc, its);
1094 
1095 	gicv3_its_enable(sc, its);
1096 
1097 	gicv3_its_cpu_init(its, curcpu());
1098 
1099 	msi = &its->its_msi;
1100 	msi->msi_id = its_id;
1101 	msi->msi_dev = sc->sc_dev;
1102 	msi->msi_priv = its;
1103 	msi->msi_alloc = gicv3_its_msi_alloc;
1104 	msi->msix_alloc = gicv3_its_msix_alloc;
1105 	msi->msi_intr_establish = gicv3_its_msi_intr_establish;
1106 	msi->msi_intr_release = gicv3_its_msi_intr_release;
1107 
1108 	return arm_pci_msi_add(msi);
1109 }
1110