xref: /netbsd-src/sys/dev/pci/virtio_pci.c (revision 0345cf900328bbbe7068b5c6e89dd681ff936761)
1 /* $NetBSD: virtio_pci.c,v 1.55 2024/09/25 17:12:47 christos Exp $ */
2 
3 /*
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.55 2024/09/25 17:12:47 christos Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/types.h>
35 
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/systm.h>
43 
44 #include <dev/pci/pcidevs.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
49 #include <dev/pci/virtio_pcireg.h>
50 
51 #define VIRTIO_PRIVATE
52 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
53 
54 #if defined(__alpha__) || defined(__sparc64__)
55 /*
56  * XXX VIRTIO_F_ACCESS_PLATFORM is required for standard PCI DMA
57  * XXX to work on these platforms, at least by Qemu.
58  * XXX
59  * XXX Generalize this later.
60  */
61 #define	__NEED_VIRTIO_F_ACCESS_PLATFORM
62 #endif /* __alpha__ || __sparc64__ */
63 
64 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...)	\
65 do {							\
66 	if ((_use_log)) {				\
67 		log(LOG_DEBUG, "%s: " _fmt,		\
68 		    device_xname((_sc)->sc_dev),	\
69 		    ##_args);				\
70 	} else {					\
71 		aprint_error_dev((_sc)->sc_dev,		\
72 		    _fmt, ##_args);			\
73 	}						\
74 } while(0)
75 
76 static int	virtio_pci_match(device_t, cfdata_t, void *);
77 static void	virtio_pci_attach(device_t, device_t, void *);
78 static int	virtio_pci_rescan(device_t, const char *, const int *);
79 static int	virtio_pci_detach(device_t, int);
80 
81 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
82 				sizeof(pcireg_t))
83 struct virtio_pci_softc {
84 	struct virtio_softc	sc_sc;
85 	bool			sc_intr_pervq;
86 
87 	/* IO space */
88 	bus_space_tag_t		sc_iot;
89 	bus_space_handle_t	sc_ioh;
90 	bus_size_t		sc_iosize;
91 
92 	/* BARs */
93 	bus_space_tag_t		sc_bars_iot[NMAPREG];
94 	bus_space_handle_t	sc_bars_ioh[NMAPREG];
95 	bus_size_t		sc_bars_iosize[NMAPREG];
96 
97 	/* notify space */
98 	bus_space_tag_t		sc_notify_iot;
99 	bus_space_handle_t	sc_notify_ioh;
100 	bus_size_t		sc_notify_iosize;
101 	uint32_t		sc_notify_off_multiplier;
102 
103 	/* isr space */
104 	bus_space_tag_t		sc_isr_iot;
105 	bus_space_handle_t	sc_isr_ioh;
106 	bus_size_t		sc_isr_iosize;
107 
108 	/* generic */
109 	struct pci_attach_args	sc_pa;
110 	pci_intr_handle_t	*sc_ihp;
111 	void			**sc_ihs;
112 	int			sc_ihs_num;
113 	int			sc_devcfg_offset;	/* for 0.9 */
114 };
115 
116 static int	virtio_pci_attach_09(device_t, void *);
117 static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
118 static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
119 static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t,
120 		    uint64_t);
121 static void	virtio_pci_set_status_09(struct virtio_softc *, int);
122 static void	virtio_pci_negotiate_features_09(struct virtio_softc *,
123 		    uint64_t);
124 
125 static int	virtio_pci_attach_10(device_t, void *);
126 static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
127 static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
128 static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t,
129 		    uint64_t);
130 static void	virtio_pci_set_status_10(struct virtio_softc *, int);
131 static void	virtio_pci_negotiate_features_10(struct virtio_softc *,
132 		    uint64_t);
133 static int	virtio_pci_find_cap(struct virtio_pci_softc *, int, void *,
134 		    int);
135 
136 static int	virtio_pci_alloc_interrupts(struct virtio_softc *);
137 static void	virtio_pci_free_interrupts(struct virtio_softc *);
138 static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *);
139 static int	virtio_pci_intr(void *);
140 static int	virtio_pci_msix_queue_intr(void *);
141 static int	virtio_pci_msix_config_intr(void *);
142 static int	virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
143 static int	virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
144 static int	virtio_pci_establish_msix_interrupts(struct virtio_softc *,
145 		    const struct pci_attach_args *);
146 static int	virtio_pci_establish_intx_interrupt(struct virtio_softc *,
147 		    const struct pci_attach_args *);
148 static bool	virtio_pci_msix_enabled(struct virtio_pci_softc *);
149 
150 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
151 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
152 
153 /*
154  * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores
155  * are running in big-endian mode, with all peripheral being configured to
156  * little-endian mode. Their default bus_space(9) functions forcibly swap
157  * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are
158  * correctly handled by bus_space(9), while DMA'ed ones should be swapped
159  * by hand, in violation of virtio(4) specifications.
160  */
161 
162 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN
163 #	define READ_ENDIAN_09	BIG_ENDIAN
164 #	define READ_ENDIAN_10	BIG_ENDIAN
165 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
166 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
167 #elif BYTE_ORDER == BIG_ENDIAN
168 #	define READ_ENDIAN_09	LITTLE_ENDIAN
169 #	define READ_ENDIAN_10	BIG_ENDIAN
170 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
171 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
172 #else /* little endian */
173 #	define READ_ENDIAN_09	LITTLE_ENDIAN
174 #	define READ_ENDIAN_10	LITTLE_ENDIAN
175 #	define STRUCT_ENDIAN_09	LITTLE_ENDIAN
176 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
177 #endif
178 
179 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
180     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
181     virtio_pci_rescan, NULL, 0);
182 
183 static const struct virtio_ops virtio_pci_ops_09 = {
184 	.kick = virtio_pci_kick_09,
185 	.read_queue_size = virtio_pci_read_queue_size_09,
186 	.setup_queue = virtio_pci_setup_queue_09,
187 	.set_status = virtio_pci_set_status_09,
188 	.neg_features = virtio_pci_negotiate_features_09,
189 	.alloc_interrupts = virtio_pci_alloc_interrupts,
190 	.free_interrupts = virtio_pci_free_interrupts,
191 	.setup_interrupts = virtio_pci_setup_interrupts_09,
192 };
193 
194 static const struct virtio_ops virtio_pci_ops_10 = {
195 	.kick = virtio_pci_kick_10,
196 	.read_queue_size = virtio_pci_read_queue_size_10,
197 	.setup_queue = virtio_pci_setup_queue_10,
198 	.set_status = virtio_pci_set_status_10,
199 	.neg_features = virtio_pci_negotiate_features_10,
200 	.alloc_interrupts = virtio_pci_alloc_interrupts,
201 	.free_interrupts = virtio_pci_free_interrupts,
202 	.setup_interrupts = virtio_pci_setup_interrupts_10,
203 };
204 
205 static int
206 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
207 {
208 	const struct pci_attach_args * const pa = aux;
209 
210 	switch (PCI_VENDOR(pa->pa_id)) {
211 	case PCI_VENDOR_QUMRANET:
212 		/* Transitional devices MUST have a PCI Revision ID of 0. */
213 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
214 			    PCI_PRODUCT(pa->pa_id)) &&
215 			(PCI_PRODUCT(pa->pa_id) <=
216 			    PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
217 		    PCI_REVISION(pa->pa_class) == 0)
218 			return 1;
219 		/*
220 		 * Non-transitional devices SHOULD have a PCI Revision
221 		 * ID of 1 or higher.  Drivers MUST match any PCI
222 		 * Revision ID value.
223 		 */
224 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
225 			    PCI_PRODUCT(pa->pa_id)) &&
226 			(PCI_PRODUCT(pa->pa_id) <=
227 			    PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
228 		    /* XXX: TODO */
229 		    PCI_REVISION(pa->pa_class) == 1)
230 			return 1;
231 		break;
232 	}
233 
234 	return 0;
235 }
236 
237 static void
238 virtio_pci_attach(device_t parent, device_t self, void *aux)
239 {
240 	struct virtio_pci_softc * const psc = device_private(self);
241 	struct virtio_softc * const sc = &psc->sc_sc;
242 	const struct pci_attach_args * const pa = aux;
243 	pci_chipset_tag_t pc = pa->pa_pc;
244 	pcitag_t tag = pa->pa_tag;
245 	int revision;
246 	int ret;
247 	pcireg_t id;
248 	pcireg_t csr;
249 
250 	revision = PCI_REVISION(pa->pa_class);
251 	switch (revision) {
252 	case 0:
253 		/* subsystem ID shows what I am */
254 		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
255 		break;
256 	case 1:
257 		/* pci product number shows what I am */
258 		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
259 		break;
260 	default:
261 		aprint_normal(": unknown revision 0x%02x; giving up\n",
262 		    revision);
263 		return;
264 	}
265 
266 	aprint_normal("\n");
267 	aprint_naive("\n");
268 	virtio_print_device_type(self, id, revision);
269 
270 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
271 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
272 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
273 
274 	sc->sc_dev = self;
275 	psc->sc_pa = *pa;
276 	psc->sc_iot = pa->pa_iot;
277 
278 	sc->sc_dmat = pa->pa_dmat;
279 	if (pci_dma64_available(pa))
280 		sc->sc_dmat = pa->pa_dmat64;
281 
282 	/* attach is dependent on revision */
283 	ret = 0;
284 	if (revision == 1) {
285 		/* try to attach 1.0 */
286 		ret = virtio_pci_attach_10(self, aux);
287 	}
288 	if (ret == 0 && revision == 0) {
289 		/*
290 		 * revision 0 means 0.9 only or both 0.9 and 1.0.  The
291 		 * latter are so-called "Transitional Devices".  For
292 		 * those devices, we want to use the 1.0 interface if
293 		 * possible.
294 		 *
295 		 * XXX Currently only on platforms that require 1.0
296 		 * XXX features, such as VIRTIO_F_ACCESS_PLATFORM.
297 		 */
298 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
299 		/* First, try to attach 1.0 */
300 		ret = virtio_pci_attach_10(self, aux);
301 		if (ret != 0) {
302 			aprint_error_dev(self,
303 			    "VirtIO 1.0 error = %d, falling back to 0.9\n",
304 			    ret);
305 			/* Fall back to 0.9. */
306 			ret = virtio_pci_attach_09(self, aux);
307 		}
308 #else
309 		ret = virtio_pci_attach_09(self, aux);
310 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
311 	}
312 	if (ret) {
313 		aprint_error_dev(self, "cannot attach (%d)\n", ret);
314 		return;
315 	}
316 	KASSERT(sc->sc_ops);
317 
318 	/* preset config region */
319 	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
320 	if (virtio_pci_adjust_config_region(psc))
321 		return;
322 
323 	/* generic */
324 	virtio_device_reset(sc);
325 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
326 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
327 
328 	sc->sc_childdevid = id;
329 	sc->sc_child = NULL;
330 	virtio_pci_rescan(self, NULL, NULL);
331 	return;
332 }
333 
334 /* ARGSUSED */
335 static int
336 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
337 {
338 	struct virtio_pci_softc * const psc = device_private(self);
339 	struct virtio_softc * const sc = &psc->sc_sc;
340 	struct virtio_attach_args va;
341 
342 	if (sc->sc_child)	/* Child already attached? */
343 		return 0;
344 
345 	memset(&va, 0, sizeof(va));
346 	va.sc_childdevid = sc->sc_childdevid;
347 
348 	config_found(self, &va, NULL, CFARGS_NONE);
349 
350 	if (virtio_attach_failed(sc))
351 		return 0;
352 
353 	return 0;
354 }
355 
356 static int
357 virtio_pci_detach(device_t self, int flags)
358 {
359 	struct virtio_pci_softc * const psc = device_private(self);
360 	struct virtio_softc * const sc = &psc->sc_sc;
361 	unsigned i;
362 	int r;
363 
364 	r = config_detach_children(self, flags);
365 	if (r != 0)
366 		return r;
367 
368 	/* Check that child never attached, or detached properly */
369 	KASSERT(sc->sc_child == NULL);
370 	KASSERT(sc->sc_vqs == NULL);
371 	KASSERT(psc->sc_ihs_num == 0);
372 
373 	if (sc->sc_version_1) {
374 		for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) {
375 			if (psc->sc_bars_iosize[i] == 0)
376 				continue;
377 			bus_space_unmap(psc->sc_bars_iot[i],
378 			    psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]);
379 			psc->sc_bars_iosize[i] = 0;
380 		}
381 	} else {
382 		if (psc->sc_iosize) {
383 			bus_space_unmap(psc->sc_iot, psc->sc_ioh,
384 			    psc->sc_iosize);
385 			psc->sc_iosize = 0;
386 		}
387 	}
388 
389 	return 0;
390 }
391 
392 static int
393 virtio_pci_attach_09(device_t self, void *aux)
394 {
395 	struct virtio_pci_softc * const psc = device_private(self);
396 	const struct pci_attach_args * const pa = aux;
397 	struct virtio_softc * const sc = &psc->sc_sc;
398 
399 	/* complete IO region */
400 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
401 		&psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
402 		aprint_error_dev(self, "can't map i/o space\n");
403 		return EIO;
404 	}
405 
406 	/* queue space */
407 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
408 		VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
409 		aprint_error_dev(self, "can't map notify i/o space\n");
410 		return EIO;
411 	}
412 	psc->sc_notify_iosize = 2;
413 	psc->sc_notify_iot = psc->sc_iot;
414 
415 	/* ISR space */
416 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
417 		VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
418 		aprint_error_dev(self, "can't map isr i/o space\n");
419 		return EIO;
420 	}
421 	psc->sc_isr_iosize = 1;
422 	psc->sc_isr_iot = psc->sc_iot;
423 
424 	/* set our version 0.9 ops */
425 	sc->sc_ops = &virtio_pci_ops_09;
426 	sc->sc_bus_endian = READ_ENDIAN_09;
427 	sc->sc_struct_endian = STRUCT_ENDIAN_09;
428 	return 0;
429 }
430 
431 static int
432 virtio_pci_attach_10(device_t self, void *aux)
433 {
434 	struct virtio_pci_softc * const psc = device_private(self);
435 	const struct pci_attach_args * const pa = aux;
436 	struct virtio_softc * const sc = &psc->sc_sc;
437 	const pci_chipset_tag_t pc = pa->pa_pc;
438 	const pcitag_t tag = pa->pa_tag;
439 
440 	struct virtio_pci_cap common, isr, device;
441 	struct virtio_pci_notify_cap notify;
442 	int have_device_cfg = 0;
443 	bus_size_t bars[NMAPREG] = { 0 };
444 	int bars_idx[NMAPREG] = { 0 };
445 	struct virtio_pci_cap * const caps[] =
446 	    { &common, &isr, &device, &notify.cap };
447 	int i, j, ret = 0;
448 
449 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
450 		&common, sizeof(common)))
451 		return ENODEV;
452 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
453 		&notify, sizeof(notify)))
454 		return ENODEV;
455 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
456 		&isr, sizeof(isr)))
457 		return ENODEV;
458 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
459 		&device, sizeof(device)))
460 		memset(&device, 0, sizeof(device));
461 	else
462 		have_device_cfg = 1;
463 
464 	/* Figure out which bars we need to map */
465 	for (i = 0; i < __arraycount(caps); i++) {
466 		int bar = caps[i]->bar;
467 		bus_size_t len = caps[i]->offset + caps[i]->length;
468 
469 		if (caps[i]->length == 0)
470 			continue;
471 		if (bars[bar] < len)
472 			bars[bar] = len;
473 	}
474 
475 	for (i = j = 0; i < __arraycount(bars); i++) {
476 		int reg;
477 		pcireg_t type;
478 
479 		if (bars[i] == 0)
480 			continue;
481 		reg = PCI_BAR(i);
482 		type = pci_mapreg_type(pc, tag, reg);
483 		if (pci_mapreg_map(pa, reg, type, 0,
484 			&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
485 			NULL, &psc->sc_bars_iosize[j])) {
486 			aprint_error_dev(self, "can't map bar %u \n", i);
487 			ret = EIO;
488 			goto err;
489 		}
490 		aprint_debug_dev(self,
491 		    "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
492 		    j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
493 		bars_idx[i] = j;
494 		j++;
495 	}
496 
497 	i = bars_idx[notify.cap.bar];
498 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
499 		notify.cap.offset, notify.cap.length, &psc->sc_notify_ioh)) {
500 		aprint_error_dev(self, "can't map notify i/o space\n");
501 		ret = EIO;
502 		goto err;
503 	}
504 	psc->sc_notify_iosize = notify.cap.length;
505 	psc->sc_notify_iot = psc->sc_bars_iot[i];
506 	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
507 
508 	if (have_device_cfg) {
509 		i = bars_idx[device.bar];
510 		if (bus_space_subregion(psc->sc_bars_iot[i],
511 			psc->sc_bars_ioh[i], device.offset, device.length,
512 			&sc->sc_devcfg_ioh)) {
513 			aprint_error_dev(self, "can't map devcfg i/o space\n");
514 			ret = EIO;
515 			goto err;
516 		}
517 		aprint_debug_dev(self,
518 		    "device.offset = 0x%x, device.length = 0x%x\n",
519 		    device.offset, device.length);
520 		sc->sc_devcfg_iosize = device.length;
521 		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
522 	}
523 
524 	i = bars_idx[isr.bar];
525 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
526 		isr.offset, isr.length, &psc->sc_isr_ioh)) {
527 		aprint_error_dev(self, "can't map isr i/o space\n");
528 		ret = EIO;
529 		goto err;
530 	}
531 	psc->sc_isr_iosize = isr.length;
532 	psc->sc_isr_iot = psc->sc_bars_iot[i];
533 
534 	i = bars_idx[common.bar];
535 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
536 		common.offset, common.length, &psc->sc_ioh)) {
537 		aprint_error_dev(self, "can't map common i/o space\n");
538 		ret = EIO;
539 		goto err;
540 	}
541 	psc->sc_iosize = common.length;
542 	psc->sc_iot = psc->sc_bars_iot[i];
543 
544 	psc->sc_sc.sc_version_1 = 1;
545 
546 	/* set our version 1.0 ops */
547 	sc->sc_ops = &virtio_pci_ops_10;
548 	sc->sc_bus_endian = READ_ENDIAN_10;
549 	sc->sc_struct_endian = STRUCT_ENDIAN_10;
550 	return 0;
551 
552 err:
553 	/* undo our pci_mapreg_map()s */
554 	for (i = 0; i < __arraycount(bars); i++) {
555 		if (psc->sc_bars_iosize[i] == 0)
556 			continue;
557 		bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
558 		    psc->sc_bars_iosize[i]);
559 		psc->sc_bars_iosize[i] = 0;
560 	}
561 	return ret;
562 }
563 
564 /* v1.0 attach helper */
565 static int
566 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf,
567     int buflen)
568 {
569 	device_t self = psc->sc_sc.sc_dev;
570 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
571 	pcitag_t tag = psc->sc_pa.pa_tag;
572 	unsigned int offset, i, len;
573 	union {
574 		pcireg_t reg[8];
575 		struct virtio_pci_cap vcap;
576 	} *v = buf;
577 
578 	if (buflen < sizeof(struct virtio_pci_cap))
579 		return ERANGE;
580 
581 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset,
582 		&v->reg[0]))
583 		return ENOENT;
584 
585 	do {
586 		for (i = 0; i < 4; i++)
587 			v->reg[i] =
588 			    le32toh(pci_conf_read(pc, tag, offset + i * 4));
589 		if (v->vcap.cfg_type == cfg_type)
590 			break;
591 		offset = v->vcap.cap_next;
592 	} while (offset != 0);
593 
594 	if (offset == 0)
595 		return ENOENT;
596 
597 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
598 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
599 		if (len > buflen) {
600 			aprint_error_dev(self, "%s cap too large\n", __func__);
601 			return ERANGE;
602 		}
603 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
604 			v->reg[i] =
605 			    le32toh(pci_conf_read(pc, tag, offset + i * 4));
606 	}
607 
608 	/* endian fixup */
609 	v->vcap.offset = le32toh(v->vcap.offset);
610 	v->vcap.length = le32toh(v->vcap.length);
611 	return 0;
612 }
613 
614 /* -------------------------------------
615  * Version 0.9 support
616  * -------------------------------------*/
617 
618 static void
619 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
620 {
621 	struct virtio_pci_softc * const psc = container_of(sc,
622 	    struct virtio_pci_softc, sc_sc);
623 
624 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
625 }
626 
627 /* only applicable for v 0.9 but also called for 1.0 */
628 static int
629 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
630 {
631 	struct virtio_softc * const sc = &psc->sc_sc;
632 	device_t self = sc->sc_dev;
633 
634 	if (psc->sc_sc.sc_version_1)
635 		return 0;
636 
637 	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
638 	sc->sc_devcfg_iot = psc->sc_iot;
639 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
640 		psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
641 		&sc->sc_devcfg_ioh)) {
642 		aprint_error_dev(self, "can't map config i/o space\n");
643 		return EIO;
644 	}
645 
646 	return 0;
647 }
648 
649 static uint16_t
650 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
651 {
652 	struct virtio_pci_softc * const psc = container_of(sc,
653 	    struct virtio_pci_softc, sc_sc);
654 
655 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
656 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
657 	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
658 	    VIRTIO_CONFIG_QUEUE_SIZE);
659 }
660 
661 static void
662 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
663 {
664 	struct virtio_pci_softc * const psc = container_of(sc,
665 	    struct virtio_pci_softc, sc_sc);
666 
667 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
668 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
669 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
670 	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
671 
672 	if (psc->sc_ihs_num > 1) {
673 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
674 		if (psc->sc_intr_pervq)
675 			vec += idx;
676 		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
677 		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
678 	}
679 }
680 
681 static void
682 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
683 {
684 	struct virtio_pci_softc * const psc = container_of(sc,
685 	    struct virtio_pci_softc, sc_sc);
686 	int old = 0;
687 
688 	if (status != 0) {
689 		old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
690 		    VIRTIO_CONFIG_DEVICE_STATUS);
691 	}
692 	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
693 	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
694 }
695 
696 static void
697 virtio_pci_negotiate_features_09(struct virtio_softc *sc,
698     uint64_t guest_features)
699 {
700 	struct virtio_pci_softc * const psc = container_of(sc,
701 	    struct virtio_pci_softc, sc_sc);
702 	uint32_t r;
703 
704 	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
705 	    VIRTIO_CONFIG_DEVICE_FEATURES);
706 
707 	r &= guest_features;
708 
709 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
710 	    VIRTIO_CONFIG_GUEST_FEATURES, r);
711 
712 	sc->sc_active_features = r;
713 }
714 
715 /* -------------------------------------
716  * Version 1.0 support
717  * -------------------------------------*/
718 
719 static void
720 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
721 {
722 	struct virtio_pci_softc * const psc = container_of(sc,
723 	    struct virtio_pci_softc, sc_sc);
724 	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
725 	    psc->sc_notify_off_multiplier;
726 
727 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
728 }
729 
730 static uint16_t
731 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
732 {
733 	struct virtio_pci_softc * const psc = container_of(sc,
734 	    struct virtio_pci_softc, sc_sc);
735 	bus_space_tag_t iot = psc->sc_iot;
736 	bus_space_handle_t ioh = psc->sc_ioh;
737 
738 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
739 	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
740 }
741 
742 /*
743  * By definition little endian only in v1.0.  NB: "MAY" in the text
744  * below refers to "independently" (i.e. the order of accesses) not
745  * "32-bit" (which is restricted by the earlier "MUST").
746  *
747  * 4.1.3.1 Driver Requirements: PCI Device Layout
748  *
749  * For device configuration access, the driver MUST use ... 32-bit
750  * wide and aligned accesses for ... 64-bit wide fields.  For 64-bit
751  * fields, the driver MAY access each of the high and low 32-bit parts
752  * of the field independently.
753  */
754 static __inline void
755 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
756     bus_size_t offset, uint64_t value)
757 {
758 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
759 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
760 }
761 
762 static void
763 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
764 {
765 	struct virtio_pci_softc * const psc = container_of(sc,
766 	    struct virtio_pci_softc, sc_sc);
767 	struct virtqueue *vq = &sc->sc_vqs[idx];
768 	bus_space_tag_t iot = psc->sc_iot;
769 	bus_space_handle_t ioh = psc->sc_ioh;
770 	KASSERT(vq->vq_index == idx);
771 
772 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
773 	if (addr == 0) {
774 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
775 		virtio_pci_bus_space_write_8(iot, ioh,
776 		    VIRTIO_CONFIG1_QUEUE_DESC, 0);
777 		virtio_pci_bus_space_write_8(iot, ioh,
778 		    VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
779 		virtio_pci_bus_space_write_8(iot, ioh,
780 		    VIRTIO_CONFIG1_QUEUE_USED, 0);
781 	} else {
782 		virtio_pci_bus_space_write_8(iot, ioh,
783 		    VIRTIO_CONFIG1_QUEUE_DESC, addr);
784 		virtio_pci_bus_space_write_8(iot, ioh,
785 		    VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
786 		virtio_pci_bus_space_write_8(iot, ioh,
787 		    VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
788 		bus_space_write_2(iot, ioh,
789 		    VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
790 		vq->vq_notify_off = bus_space_read_2(iot, ioh,
791 		    VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
792 	}
793 
794 	if (psc->sc_ihs_num > 1) {
795 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
796 		if (psc->sc_intr_pervq)
797 			vec += idx;
798 		bus_space_write_2(iot, ioh,
799 		    VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
800 	}
801 }
802 
803 static void
804 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
805 {
806 	struct virtio_pci_softc * const psc = container_of(sc,
807 	    struct virtio_pci_softc, sc_sc);
808 	bus_space_tag_t iot = psc->sc_iot;
809 	bus_space_handle_t ioh = psc->sc_ioh;
810 	int old = 0;
811 
812 	if (status)
813 		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
814 	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
815 	    status | old);
816 }
817 
818 void
819 virtio_pci_negotiate_features_10(struct virtio_softc *sc,
820     uint64_t guest_features)
821 {
822 	struct virtio_pci_softc * const psc = container_of(sc,
823 	    struct virtio_pci_softc, sc_sc);
824 	device_t self = sc->sc_dev;
825 	bus_space_tag_t iot = psc->sc_iot;
826 	bus_space_handle_t ioh = psc->sc_ioh;
827 	uint64_t host, negotiated, device_status;
828 
829 	guest_features |= VIRTIO_F_VERSION_1;
830 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
831 	/* XXX This could use some work. */
832 	guest_features |= VIRTIO_F_ACCESS_PLATFORM;
833 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
834 	/* notify on empty is 0.9 only */
835 	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
836 	sc->sc_active_features = 0;
837 
838 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
839 	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
840 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
841 	host |= (uint64_t)bus_space_read_4(iot, ioh,
842 	    VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
843 
844 	negotiated = host & guest_features;
845 
846 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
847 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
848 	    negotiated & 0xffffffff);
849 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
850 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
851 	    negotiated >> 32);
852 	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
853 
854 	device_status = bus_space_read_1(iot, ioh,
855 	    VIRTIO_CONFIG1_DEVICE_STATUS);
856 	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
857 		aprint_error_dev(self, "feature negotiation failed\n");
858 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
859 		    VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
860 		return;
861 	}
862 
863 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
864 		aprint_error_dev(self, "host rejected version 1\n");
865 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
866 		    VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
867 		return;
868 	}
869 
870 	sc->sc_active_features = negotiated;
871 	return;
872 }
873 
874 /* -------------------------------------
875  * Generic PCI interrupt code
876  * -------------------------------------*/
877 
878 static int
879 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
880 {
881 	struct virtio_pci_softc * const psc = container_of(sc,
882 	    struct virtio_pci_softc, sc_sc);
883 	bus_space_tag_t iot = psc->sc_iot;
884 	bus_space_handle_t ioh = psc->sc_ioh;
885 	int vector, ret, qid;
886 
887 	if (!virtio_pci_msix_enabled(psc))
888 		return 0;
889 
890 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
891 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
892 	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
893 	if (ret != vector) {
894 		VIRTIO_PCI_LOG(sc, reinit, "can't set config msix vector\n");
895 		return -1;
896 	}
897 
898 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
899 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
900 
901 		if (psc->sc_intr_pervq)
902 			vector += qid;
903 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
904 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
905 		    vector);
906 		ret = bus_space_read_2(iot, ioh,
907 		    VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
908 		if (ret != vector) {
909 			VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
910 			    "msix vector\n", qid);
911 			return -1;
912 		}
913 	}
914 
915 	return 0;
916 }
917 
918 static int
919 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
920 {
921 	struct virtio_pci_softc * const psc = container_of(sc,
922 	    struct virtio_pci_softc, sc_sc);
923 	int offset, vector, ret, qid;
924 
925 	if (!virtio_pci_msix_enabled(psc))
926 		return 0;
927 
928 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
929 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
930 
931 	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
932 	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
933 	if (ret != vector) {
934 		aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
935 		    __func__, vector, ret);
936 		VIRTIO_PCI_LOG(sc, reinit,
937 		    "can't set config msix vector\n");
938 		return -1;
939 	}
940 
941 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
942 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
943 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
944 
945 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
946 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
947 
948 		if (psc->sc_intr_pervq)
949 			vector += qid;
950 
951 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
952 		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
953 		if (ret != vector) {
954 			aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
955 			    " expected=%d, actual=%d\n",
956 			    __func__, qid, vector, ret);
957 			VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
958 			    "msix vector\n", qid);
959 			return -1;
960 		}
961 	}
962 
963 	return 0;
964 }
965 
966 static int
967 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
968     const struct pci_attach_args *pa)
969 {
970 	struct virtio_pci_softc * const psc = container_of(sc,
971 	    struct virtio_pci_softc, sc_sc);
972 	device_t self = sc->sc_dev;
973 	pci_chipset_tag_t pc = pa->pa_pc;
974 	struct virtqueue *vq;
975 	char intrbuf[PCI_INTRSTR_LEN];
976 	char intr_xname[INTRDEVNAMEBUF];
977 	char const *intrstr;
978 	int idx, qid, n;
979 
980 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
981 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
982 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
983 
984 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
985 	    device_xname(sc->sc_dev));
986 
987 	psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
988 	    sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
989 	if (psc->sc_ihs[idx] == NULL) {
990 		aprint_error_dev(self,
991 		    "couldn't establish MSI-X for config\n");
992 		goto error;
993 	}
994 
995 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
996 	if (psc->sc_intr_pervq) {
997 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
998 			n = idx + qid;
999 			vq = &sc->sc_vqs[qid];
1000 
1001 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
1002 			    device_xname(sc->sc_dev), qid);
1003 
1004 			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
1005 				pci_intr_setattr(pc, &psc->sc_ihp[n],
1006 				    PCI_INTR_MPSAFE, true);
1007 			}
1008 
1009 			psc->sc_ihs[n] = pci_intr_establish_xname(pc,
1010 			    psc->sc_ihp[n], sc->sc_ipl,
1011 			    vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
1012 			if (psc->sc_ihs[n] == NULL) {
1013 				aprint_error_dev(self,
1014 				    "couldn't establish MSI-X for a vq\n");
1015 				goto error;
1016 			}
1017 		}
1018 	} else {
1019 		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
1020 			pci_intr_setattr(pc, &psc->sc_ihp[idx],
1021 			    PCI_INTR_MPSAFE, true);
1022 		}
1023 
1024 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
1025 		    device_xname(sc->sc_dev));
1026 		psc->sc_ihs[idx] = pci_intr_establish_xname(pc,
1027 		    psc->sc_ihp[idx], sc->sc_ipl,
1028 		    virtio_pci_msix_queue_intr, sc, intr_xname);
1029 		if (psc->sc_ihs[idx] == NULL) {
1030 			aprint_error_dev(self,
1031 			    "couldn't establish MSI-X for queues\n");
1032 			goto error;
1033 		}
1034 	}
1035 
1036 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1037 	intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
1038 	    sizeof(intrbuf));
1039 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1040 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1041 	if (psc->sc_intr_pervq) {
1042 		kcpuset_t *affinity;
1043 		int affinity_to, r;
1044 
1045 		kcpuset_create(&affinity, false);
1046 
1047 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
1048 			n = idx + qid;
1049 			affinity_to = (qid / 2) % ncpu;
1050 
1051 			intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1052 			    intrbuf, sizeof(intrbuf));
1053 
1054 			kcpuset_zero(affinity);
1055 			kcpuset_set(affinity, affinity_to);
1056 			r = interrupt_distribute(psc->sc_ihs[n], affinity,
1057 			    NULL);
1058 			if (r == 0) {
1059 				aprint_normal_dev(self,
1060 				    "for vq #%d interrupting at %s"
1061 				    " affinity to %u\n",
1062 				    qid, intrstr, affinity_to);
1063 			} else {
1064 				aprint_normal_dev(self,
1065 				    "for vq #%d interrupting at %s\n",
1066 				    qid, intrstr);
1067 			}
1068 		}
1069 
1070 		kcpuset_destroy(affinity);
1071 	} else {
1072 		intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
1073 		    sizeof(intrbuf));
1074 		aprint_normal_dev(self, "queues interrupting at %s\n",
1075 		    intrstr);
1076 	}
1077 
1078 	return 0;
1079 
1080 error:
1081 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1082 	if (psc->sc_ihs[idx] != NULL)
1083 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1084 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1085 	if (psc->sc_intr_pervq) {
1086 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
1087 			n = idx + qid;
1088 			if (psc->sc_ihs[n] == NULL)
1089 				continue;
1090 			pci_intr_disestablish(psc->sc_pa.pa_pc,
1091 			    psc->sc_ihs[n]);
1092 		}
1093 
1094 	} else {
1095 		if (psc->sc_ihs[idx] != NULL) {
1096 			pci_intr_disestablish(psc->sc_pa.pa_pc,
1097 			    psc->sc_ihs[idx]);
1098 		}
1099 	}
1100 
1101 	return -1;
1102 }
1103 
1104 static int
1105 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1106     const struct pci_attach_args *pa)
1107 {
1108 	struct virtio_pci_softc * const psc = container_of(sc,
1109 	    struct virtio_pci_softc, sc_sc);
1110 	device_t self = sc->sc_dev;
1111 	pci_chipset_tag_t pc = pa->pa_pc;
1112 	char intrbuf[PCI_INTRSTR_LEN];
1113 	char const *intrstr;
1114 
1115 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1116 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1117 
1118 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1119 	    sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1120 	if (psc->sc_ihs[0] == NULL) {
1121 		aprint_error_dev(self, "couldn't establish INTx\n");
1122 		return -1;
1123 	}
1124 
1125 	intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf,
1126 	    sizeof(intrbuf));
1127 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1128 
1129 	return 0;
1130 }
1131 
1132 static int
1133 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1134 {
1135 	struct virtio_pci_softc * const psc = container_of(sc,
1136 	    struct virtio_pci_softc, sc_sc);
1137 	device_t self = sc->sc_dev;
1138 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1139 	pcitag_t tag = psc->sc_pa.pa_tag;
1140 	int error;
1141 	int nmsix;
1142 	int off;
1143 	int counts[PCI_INTR_TYPE_SIZE];
1144 	pci_intr_type_t max_type;
1145 	pcireg_t ctl;
1146 
1147 	nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1148 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1149 
1150 	/* We need at least two: one for config and the other for queues */
1151 	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1152 		/* Try INTx only */
1153 		max_type = PCI_INTR_TYPE_INTX;
1154 		counts[PCI_INTR_TYPE_INTX] = 1;
1155 	} else {
1156 		/* Try MSI-X first and INTx second */
1157 		if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
1158 		    sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1159 			nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1160 		} else {
1161 			nmsix = 2;
1162 		}
1163 
1164 		max_type = PCI_INTR_TYPE_MSIX;
1165 		counts[PCI_INTR_TYPE_MSIX] = nmsix;
1166 		counts[PCI_INTR_TYPE_MSI] = 0;
1167 		counts[PCI_INTR_TYPE_INTX] = 1;
1168 	}
1169 
1170 retry:
1171 	error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1172 	if (error != 0) {
1173 		aprint_error_dev(self, "couldn't map interrupt\n");
1174 		return -1;
1175 	}
1176 
1177 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1178 		psc->sc_intr_pervq = nmsix > 2 ? true : false;
1179 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1180 		    KM_SLEEP);
1181 
1182 		error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1183 		if (error != 0) {
1184 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1185 			pci_intr_release(pc, psc->sc_ihp, nmsix);
1186 
1187 			/* Retry INTx */
1188 			max_type = PCI_INTR_TYPE_INTX;
1189 			counts[PCI_INTR_TYPE_INTX] = 1;
1190 			goto retry;
1191 		}
1192 
1193 		psc->sc_ihs_num = nmsix;
1194 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1195 		virtio_pci_adjust_config_region(psc);
1196 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1197 		psc->sc_intr_pervq = false;
1198 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1199 		    KM_SLEEP);
1200 
1201 		error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1202 		if (error != 0) {
1203 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1204 			pci_intr_release(pc, psc->sc_ihp, 1);
1205 			return -1;
1206 		}
1207 
1208 		psc->sc_ihs_num = 1;
1209 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1210 		virtio_pci_adjust_config_region(psc);
1211 
1212 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1213 		if (error != 0) {
1214 			ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1215 			ctl &= ~PCI_MSIX_CTL_ENABLE;
1216 			pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1217 		}
1218 	}
1219 
1220 	if (!psc->sc_intr_pervq)
1221 		CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
1222 	return 0;
1223 }
1224 
1225 static void
1226 virtio_pci_free_interrupts(struct virtio_softc *sc)
1227 {
1228 	struct virtio_pci_softc * const psc = container_of(sc,
1229 	    struct virtio_pci_softc, sc_sc);
1230 
1231 	for (int i = 0; i < psc->sc_ihs_num; i++) {
1232 		if (psc->sc_ihs[i] == NULL)
1233 			continue;
1234 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1235 		psc->sc_ihs[i] = NULL;
1236 	}
1237 
1238 	if (psc->sc_ihs_num > 0) {
1239 		pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp,
1240 		    psc->sc_ihs_num);
1241 	}
1242 
1243 	if (psc->sc_ihs != NULL) {
1244 		kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1245 		psc->sc_ihs = NULL;
1246 	}
1247 	psc->sc_ihs_num = 0;
1248 }
1249 
1250 static bool
1251 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1252 {
1253 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1254 
1255 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1256 		return true;
1257 
1258 	return false;
1259 }
1260 
1261 /*
1262  * Interrupt handler.
1263  */
1264 static int
1265 virtio_pci_intr(void *arg)
1266 {
1267 	struct virtio_softc *sc = arg;
1268 	struct virtio_pci_softc * const psc = container_of(sc,
1269 	    struct virtio_pci_softc, sc_sc);
1270 	int isr, r = 0;
1271 
1272 	/* check and ack the interrupt */
1273 	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1274 	if (isr == 0)
1275 		return 0;
1276 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1277 	    (sc->sc_config_change != NULL))
1278 		r = (sc->sc_config_change)(sc);
1279 	if (sc->sc_intrhand != NULL) {
1280 		if (sc->sc_soft_ih != NULL)
1281 			softint_schedule(sc->sc_soft_ih);
1282 		else
1283 			r |= (sc->sc_intrhand)(sc);
1284 	}
1285 
1286 	return r;
1287 }
1288 
1289 static int
1290 virtio_pci_msix_queue_intr(void *arg)
1291 {
1292 	struct virtio_softc *sc = arg;
1293 	int r = 0;
1294 
1295 	if (sc->sc_intrhand != NULL) {
1296 		if (sc->sc_soft_ih != NULL)
1297 			softint_schedule(sc->sc_soft_ih);
1298 		else
1299 			r |= (sc->sc_intrhand)(sc);
1300 	}
1301 
1302 	return r;
1303 }
1304 
1305 static int
1306 virtio_pci_msix_config_intr(void *arg)
1307 {
1308 	struct virtio_softc *sc = arg;
1309 	int r = 0;
1310 
1311 	if (sc->sc_config_change != NULL)
1312 		r = (sc->sc_config_change)(sc);
1313 	return r;
1314 }
1315 
1316 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1317 
1318 #ifdef _MODULE
1319 #include "ioconf.c"
1320 #endif
1321 
1322 static int
1323 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1324 {
1325 	int error = 0;
1326 
1327 #ifdef _MODULE
1328 	switch (cmd) {
1329 	case MODULE_CMD_INIT:
1330 		error = config_init_component(cfdriver_ioconf_virtio_pci,
1331 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1332 		break;
1333 	case MODULE_CMD_FINI:
1334 		error = config_fini_component(cfdriver_ioconf_virtio_pci,
1335 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1336 		break;
1337 	default:
1338 		error = ENOTTY;
1339 		break;
1340 	}
1341 #endif
1342 
1343 	return error;
1344 }
1345