xref: /netbsd-src/sys/dev/pci/virtio_pci.c (revision 4724848cf0da353df257f730694b7882798e5daf)
1 /* $NetBSD: virtio_pci.c,v 1.41 2023/04/16 17:57:08 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.41 2023/04/16 17:57:08 riastradh Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/syslog.h>
40 
41 #include <sys/device.h>
42 
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 
47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
48 #include <dev/pci/virtio_pcireg.h>
49 
50 #define VIRTIO_PRIVATE
51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
52 
53 
54 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...)	\
55 do {							\
56 	if ((_use_log)) {				\
57 		log(LOG_DEBUG, "%s: " _fmt,		\
58 		    device_xname((_sc)->sc_dev),	\
59 		    ##_args);				\
60 	} else {					\
61 		aprint_error_dev((_sc)->sc_dev,		\
62 		    _fmt, ##_args);			\
63 	}						\
64 } while(0)
65 
66 static int	virtio_pci_match(device_t, cfdata_t, void *);
67 static void	virtio_pci_attach(device_t, device_t, void *);
68 static int	virtio_pci_rescan(device_t, const char *, const int *);
69 static int	virtio_pci_detach(device_t, int);
70 
71 
72 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
73 				sizeof(pcireg_t))
74 struct virtio_pci_softc {
75 	struct virtio_softc	sc_sc;
76 	bool			sc_intr_pervq;
77 
78 	/* IO space */
79 	bus_space_tag_t		sc_iot;
80 	bus_space_handle_t	sc_ioh;
81 	bus_size_t		sc_iosize;
82 	bus_size_t		sc_mapped_iosize;
83 
84 	/* BARs */
85 	bus_space_tag_t		sc_bars_iot[NMAPREG];
86 	bus_space_handle_t	sc_bars_ioh[NMAPREG];
87 	bus_size_t		sc_bars_iosize[NMAPREG];
88 
89 	/* notify space */
90 	bus_space_tag_t		sc_notify_iot;
91 	bus_space_handle_t	sc_notify_ioh;
92 	bus_size_t		sc_notify_iosize;
93 	uint32_t		sc_notify_off_multiplier;
94 
95 	/* isr space */
96 	bus_space_tag_t		sc_isr_iot;
97 	bus_space_handle_t	sc_isr_ioh;
98 	bus_size_t		sc_isr_iosize;
99 
100 	/* generic */
101 	struct pci_attach_args	sc_pa;
102 	pci_intr_handle_t	*sc_ihp;
103 	void			**sc_ihs;
104 	int			sc_ihs_num;
105 	int			sc_devcfg_offset;	/* for 0.9 */
106 };
107 
108 static int	virtio_pci_attach_09(device_t, void *);
109 static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
110 static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
111 static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
112 static void	virtio_pci_set_status_09(struct virtio_softc *, int);
113 static void	virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
114 
115 static int	virtio_pci_attach_10(device_t, void *);
116 static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
117 static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
118 static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
119 static void	virtio_pci_set_status_10(struct virtio_softc *, int);
120 static void	virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
121 static int	virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
122 
123 static int	virtio_pci_alloc_interrupts(struct virtio_softc *);
124 static void	virtio_pci_free_interrupts(struct virtio_softc *);
125 static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
126 static int	virtio_pci_intr(void *arg);
127 static int	virtio_pci_msix_queue_intr(void *);
128 static int	virtio_pci_msix_config_intr(void *);
129 static int	virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
130 static int	virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
131 static int	virtio_pci_establish_msix_interrupts(struct virtio_softc *,
132 		    struct pci_attach_args *);
133 static int	virtio_pci_establish_intx_interrupt(struct virtio_softc *,
134 		    struct pci_attach_args *);
135 static bool	virtio_pci_msix_enabled(struct virtio_pci_softc *);
136 
137 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
138 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
139 
140 /*
141  * When using PCI attached virtio on aarch64-eb under Qemu, the IO space
142  * suddenly read BIG_ENDIAN where it should stay LITTLE_ENDIAN. The data read
143  * 1 byte at a time seem OK but reading bigger lengths result in swapped
144  * endian. This is most notable on reading 8 byters since we can't use
145  * bus_space_{read,write}_8().
146  */
147 
148 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
149 #	define READ_ENDIAN_09	BIG_ENDIAN	/* should be LITTLE_ENDIAN */
150 #	define READ_ENDIAN_10	BIG_ENDIAN
151 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
152 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
153 #elif BYTE_ORDER == BIG_ENDIAN
154 #	define READ_ENDIAN_09	LITTLE_ENDIAN
155 #	define READ_ENDIAN_10	BIG_ENDIAN
156 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
157 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
158 #else /* little endian */
159 #	define READ_ENDIAN_09	LITTLE_ENDIAN
160 #	define READ_ENDIAN_10	LITTLE_ENDIAN
161 #	define STRUCT_ENDIAN_09	LITTLE_ENDIAN
162 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
163 #endif
164 
165 
166 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
167     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
168     virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
169 
170 static const struct virtio_ops virtio_pci_ops_09 = {
171 	.kick = virtio_pci_kick_09,
172 	.read_queue_size = virtio_pci_read_queue_size_09,
173 	.setup_queue = virtio_pci_setup_queue_09,
174 	.set_status = virtio_pci_set_status_09,
175 	.neg_features = virtio_pci_negotiate_features_09,
176 	.alloc_interrupts = virtio_pci_alloc_interrupts,
177 	.free_interrupts = virtio_pci_free_interrupts,
178 	.setup_interrupts = virtio_pci_setup_interrupts_09,
179 };
180 
181 static const struct virtio_ops virtio_pci_ops_10 = {
182 	.kick = virtio_pci_kick_10,
183 	.read_queue_size = virtio_pci_read_queue_size_10,
184 	.setup_queue = virtio_pci_setup_queue_10,
185 	.set_status = virtio_pci_set_status_10,
186 	.neg_features = virtio_pci_negotiate_features_10,
187 	.alloc_interrupts = virtio_pci_alloc_interrupts,
188 	.free_interrupts = virtio_pci_free_interrupts,
189 	.setup_interrupts = virtio_pci_setup_interrupts_10,
190 };
191 
192 static int
193 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
194 {
195 	struct pci_attach_args *pa;
196 
197 	pa = (struct pci_attach_args *)aux;
198 	switch (PCI_VENDOR(pa->pa_id)) {
199 	case PCI_VENDOR_QUMRANET:
200 		/* Transitional devices MUST have a PCI Revision ID of 0. */
201 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
202 		      PCI_PRODUCT(pa->pa_id)) &&
203 		     (PCI_PRODUCT(pa->pa_id) <=
204 		      PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
205 	              PCI_REVISION(pa->pa_class) == 0)
206 			return 1;
207 		/*
208 		 * Non-transitional devices SHOULD have a PCI Revision
209 		 * ID of 1 or higher.  Drivers MUST match any PCI
210 		 * Revision ID value.
211 		 */
212 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
213 		      PCI_PRODUCT(pa->pa_id)) &&
214 		     (PCI_PRODUCT(pa->pa_id) <=
215 		      PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
216 		      /* XXX: TODO */
217 		      PCI_REVISION(pa->pa_class) == 1)
218 			return 1;
219 		break;
220 	}
221 
222 	return 0;
223 }
224 
225 static void
226 virtio_pci_attach(device_t parent, device_t self, void *aux)
227 {
228 	struct virtio_pci_softc * const psc = device_private(self);
229 	struct virtio_softc * const sc = &psc->sc_sc;
230 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
231 	pci_chipset_tag_t pc = pa->pa_pc;
232 	pcitag_t tag = pa->pa_tag;
233 	int revision;
234 	int ret;
235 	pcireg_t id;
236 	pcireg_t csr;
237 
238 	revision = PCI_REVISION(pa->pa_class);
239 	switch (revision) {
240 	case 0:
241 		/* subsystem ID shows what I am */
242 		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
243 		break;
244 	case 1:
245 		/* pci product number shows what I am */
246 		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
247 		break;
248 	default:
249 		aprint_normal(": unknown revision 0x%02x; giving up\n",
250 			      revision);
251 		return;
252 	}
253 
254 	aprint_normal("\n");
255 	aprint_naive("\n");
256 	virtio_print_device_type(self, id, revision);
257 
258 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
259 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
260 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
261 
262 	sc->sc_dev = self;
263 	psc->sc_pa = *pa;
264 	psc->sc_iot = pa->pa_iot;
265 
266 	sc->sc_dmat = pa->pa_dmat;
267 	if (pci_dma64_available(pa))
268 		sc->sc_dmat = pa->pa_dmat64;
269 
270 	/* attach is dependent on revision */
271 	ret = 0;
272 	if (revision == 1) {
273 		/* try to attach 1.0 */
274 		ret = virtio_pci_attach_10(self, aux);
275 	}
276 	if (ret == 0 && revision == 0) {
277 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
278 		ret = virtio_pci_attach_09(self, aux);
279 	}
280 	if (ret) {
281 		aprint_error_dev(self, "cannot attach (%d)\n", ret);
282 		return;
283 	}
284 	KASSERT(sc->sc_ops);
285 
286 	/* preset config region */
287 	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
288 	if (virtio_pci_adjust_config_region(psc))
289 		return;
290 
291 	/* generic */
292 	virtio_device_reset(sc);
293 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
294 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
295 
296 	sc->sc_childdevid = id;
297 	sc->sc_child = NULL;
298 	virtio_pci_rescan(self, NULL, NULL);
299 	return;
300 }
301 
302 /* ARGSUSED */
303 static int
304 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
305 {
306 	struct virtio_pci_softc * const psc = device_private(self);
307 	struct virtio_softc * const sc = &psc->sc_sc;
308 	struct virtio_attach_args va;
309 
310 	if (sc->sc_child)	/* Child already attached? */
311 		return 0;
312 
313 	memset(&va, 0, sizeof(va));
314 	va.sc_childdevid = sc->sc_childdevid;
315 
316 	config_found(self, &va, NULL, CFARGS_NONE);
317 
318 	if (virtio_attach_failed(sc))
319 		return 0;
320 
321 	return 0;
322 }
323 
324 
325 static int
326 virtio_pci_detach(device_t self, int flags)
327 {
328 	struct virtio_pci_softc * const psc = device_private(self);
329 	struct virtio_softc * const sc = &psc->sc_sc;
330 	int r;
331 
332 	r = config_detach_children(self, flags);
333 	if (r != 0)
334 		return r;
335 
336 	/* Check that child never attached, or detached properly */
337 	KASSERTMSG(!ISSET(sc->sc_child_flags,
338 		(VIRTIO_CHILD_ATTACH_FINISHED|VIRTIO_CHILD_ATTACH_FAILED)) ||
339 	    ISSET(sc->sc_child_flags, VIRTIO_CHILD_DETACHED),
340 	    "%s: child flags %x", device_xname(self), sc->sc_child_flags);
341 	KASSERT(sc->sc_vqs == NULL);
342 	KASSERT(psc->sc_ihs_num == 0);
343 
344 	if (psc->sc_iosize)
345 		bus_space_unmap(psc->sc_iot, psc->sc_ioh,
346 			psc->sc_mapped_iosize);
347 	psc->sc_iosize = 0;
348 
349 	return 0;
350 }
351 
352 
353 static int
354 virtio_pci_attach_09(device_t self, void *aux)
355 	//struct virtio_pci_softc *psc, struct pci_attach_args *pa)
356 {
357 	struct virtio_pci_softc * const psc = device_private(self);
358 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
359 	struct virtio_softc * const sc = &psc->sc_sc;
360 //	pci_chipset_tag_t pc = pa->pa_pc;
361 //	pcitag_t tag = pa->pa_tag;
362 
363 	/* complete IO region */
364 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
365 			   &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
366 		aprint_error_dev(self, "can't map i/o space\n");
367 		return EIO;
368 	}
369 	psc->sc_mapped_iosize = psc->sc_iosize;
370 
371 	/* queue space */
372 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
373 			VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
374 		aprint_error_dev(self, "can't map notify i/o space\n");
375 		return EIO;
376 	}
377 	psc->sc_notify_iosize = 2;
378 	psc->sc_notify_iot = psc->sc_iot;
379 
380 	/* ISR space */
381 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
382 			VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
383 		aprint_error_dev(self, "can't map isr i/o space\n");
384 		return EIO;
385 	}
386 	psc->sc_isr_iosize = 1;
387 	psc->sc_isr_iot = psc->sc_iot;
388 
389 	/* set our version 0.9 ops */
390 	sc->sc_ops = &virtio_pci_ops_09;
391 	sc->sc_bus_endian    = READ_ENDIAN_09;
392 	sc->sc_struct_endian = STRUCT_ENDIAN_09;
393 	return 0;
394 }
395 
396 
397 static int
398 virtio_pci_attach_10(device_t self, void *aux)
399 {
400 	struct virtio_pci_softc * const psc = device_private(self);
401 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
402 	struct virtio_softc * const sc = &psc->sc_sc;
403 	pci_chipset_tag_t pc = pa->pa_pc;
404 	pcitag_t tag = pa->pa_tag;
405 
406 	struct virtio_pci_cap common, isr, device;
407 	struct virtio_pci_notify_cap notify;
408 	int have_device_cfg = 0;
409 	bus_size_t bars[NMAPREG] = { 0 };
410 	int bars_idx[NMAPREG] = { 0 };
411 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
412 	int i, j, ret = 0;
413 
414 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
415 			&common, sizeof(common)))
416 		return ENODEV;
417 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
418 			&notify, sizeof(notify)))
419 		return ENODEV;
420 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
421 			&isr, sizeof(isr)))
422 		return ENODEV;
423 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
424 			&device, sizeof(device)))
425 		memset(&device, 0, sizeof(device));
426 	else
427 		have_device_cfg = 1;
428 
429 	/* Figure out which bars we need to map */
430 	for (i = 0; i < __arraycount(caps); i++) {
431 		int bar = caps[i]->bar;
432 		bus_size_t len = caps[i]->offset + caps[i]->length;
433 		if (caps[i]->length == 0)
434 			continue;
435 		if (bars[bar] < len)
436 			bars[bar] = len;
437 	}
438 
439 	for (i = j = 0; i < __arraycount(bars); i++) {
440 		int reg;
441 		pcireg_t type;
442 		if (bars[i] == 0)
443 			continue;
444 		reg = PCI_BAR(i);
445 		type = pci_mapreg_type(pc, tag, reg);
446 		if (pci_mapreg_map(pa, reg, type, 0,
447 				&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
448 				NULL, &psc->sc_bars_iosize[j])) {
449 			aprint_error_dev(self, "can't map bar %u \n", i);
450 			ret = EIO;
451 			goto err;
452 		}
453 		aprint_debug_dev(self,
454 		    "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
455 		    j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
456 		bars_idx[i] = j;
457 		j++;
458 	}
459 
460 	i = bars_idx[notify.cap.bar];
461 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
462 			notify.cap.offset, notify.cap.length,
463 			&psc->sc_notify_ioh)) {
464 		aprint_error_dev(self, "can't map notify i/o space\n");
465 		ret = EIO;
466 		goto err;
467 	}
468 	psc->sc_notify_iosize = notify.cap.length;
469 	psc->sc_notify_iot = psc->sc_bars_iot[i];
470 	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
471 
472 	if (have_device_cfg) {
473 		i = bars_idx[device.bar];
474 		if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
475 				device.offset, device.length,
476 				&sc->sc_devcfg_ioh)) {
477 			aprint_error_dev(self, "can't map devcfg i/o space\n");
478 			ret = EIO;
479 			goto err;
480 		}
481 		aprint_debug_dev(self,
482 			"device.offset = 0x%x, device.length = 0x%x\n",
483 			device.offset, device.length);
484 		sc->sc_devcfg_iosize = device.length;
485 		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
486 	}
487 
488 	i = bars_idx[isr.bar];
489 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
490 			isr.offset, isr.length, &psc->sc_isr_ioh)) {
491 		aprint_error_dev(self, "can't map isr i/o space\n");
492 		ret = EIO;
493 		goto err;
494 	}
495 	psc->sc_isr_iosize = isr.length;
496 	psc->sc_isr_iot = psc->sc_bars_iot[i];
497 
498 	i = bars_idx[common.bar];
499 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
500 			common.offset, common.length, &psc->sc_ioh)) {
501 		aprint_error_dev(self, "can't map common i/o space\n");
502 		ret = EIO;
503 		goto err;
504 	}
505 	psc->sc_iosize = common.length;
506 	psc->sc_iot = psc->sc_bars_iot[i];
507 	psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
508 
509 	psc->sc_sc.sc_version_1 = 1;
510 
511 	/* set our version 1.0 ops */
512 	sc->sc_ops = &virtio_pci_ops_10;
513 	sc->sc_bus_endian    = READ_ENDIAN_10;
514 	sc->sc_struct_endian = STRUCT_ENDIAN_10;
515 	return 0;
516 
517 err:
518 	/* undo our pci_mapreg_map()s */
519 	for (i = 0; i < __arraycount(bars); i++) {
520 		if (psc->sc_bars_iosize[i] == 0)
521 			continue;
522 		bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
523 				psc->sc_bars_iosize[i]);
524 	}
525 	return ret;
526 }
527 
528 /* v1.0 attach helper */
529 static int
530 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
531 {
532 	device_t self = psc->sc_sc.sc_dev;
533 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
534 	pcitag_t tag = psc->sc_pa.pa_tag;
535 	unsigned int offset, i, len;
536 	union {
537 		pcireg_t reg[8];
538 		struct virtio_pci_cap vcap;
539 	} *v = buf;
540 
541 	if (buflen < sizeof(struct virtio_pci_cap))
542 		return ERANGE;
543 
544 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
545 		return ENOENT;
546 
547 	do {
548 		for (i = 0; i < 4; i++)
549 			v->reg[i] =
550 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
551 		if (v->vcap.cfg_type == cfg_type)
552 			break;
553 		offset = v->vcap.cap_next;
554 	} while (offset != 0);
555 
556 	if (offset == 0)
557 		return ENOENT;
558 
559 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
560 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
561 		if (len > buflen) {
562 			aprint_error_dev(self, "%s cap too large\n", __func__);
563 			return ERANGE;
564 		}
565 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
566 			v->reg[i] =
567 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
568 	}
569 
570 	/* endian fixup */
571 	v->vcap.offset = le32toh(v->vcap.offset);
572 	v->vcap.length = le32toh(v->vcap.length);
573 	return 0;
574 }
575 
576 
577 /* -------------------------------------
578  * Version 0.9 support
579  * -------------------------------------*/
580 
581 static void
582 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
583 {
584 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
585 
586 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
587 }
588 
589 /* only applicable for v 0.9 but also called for 1.0 */
590 static int
591 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
592 {
593 	struct virtio_softc * const sc = &psc->sc_sc;
594 	device_t self = sc->sc_dev;
595 
596 	if (psc->sc_sc.sc_version_1)
597 		return 0;
598 
599 	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
600 	sc->sc_devcfg_iot = psc->sc_iot;
601 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
602 			psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
603 			&sc->sc_devcfg_ioh)) {
604 		aprint_error_dev(self, "can't map config i/o space\n");
605 		return EIO;
606 	}
607 
608 	return 0;
609 }
610 
611 static uint16_t
612 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
613 {
614 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
615 
616 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
617 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
618 	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
619 	    VIRTIO_CONFIG_QUEUE_SIZE);
620 }
621 
622 static void
623 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
624 {
625 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
626 
627 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
628 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
629 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
630 	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
631 
632 	if (psc->sc_ihs_num > 1) {
633 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
634 		if (psc->sc_intr_pervq)
635 			vec += idx;
636 		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
637 		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
638 	}
639 }
640 
641 static void
642 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
643 {
644 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
645 	int old = 0;
646 
647 	if (status != 0) {
648 	    old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
649 		VIRTIO_CONFIG_DEVICE_STATUS);
650 	}
651 	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
652 	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
653 }
654 
655 static void
656 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
657 {
658 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
659 	uint32_t r;
660 
661 	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
662 	    VIRTIO_CONFIG_DEVICE_FEATURES);
663 
664 	r &= guest_features;
665 
666 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
667 	    VIRTIO_CONFIG_GUEST_FEATURES, r);
668 
669 	sc->sc_active_features = r;
670 }
671 
672 /* -------------------------------------
673  * Version 1.0 support
674  * -------------------------------------*/
675 
676 static void
677 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
678 {
679 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
680 	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
681 		psc->sc_notify_off_multiplier;
682 
683 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
684 }
685 
686 
687 static uint16_t
688 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
689 {
690 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
691 	bus_space_tag_t	   iot = psc->sc_iot;
692 	bus_space_handle_t ioh = psc->sc_ioh;
693 
694 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
695 	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
696 }
697 
698 /*
699  * By definition little endian only in v1.0.  NB: "MAY" in the text
700  * below refers to "independently" (i.e. the order of accesses) not
701  * "32-bit" (which is restricted by the earlier "MUST").
702  *
703  * 4.1.3.1 Driver Requirements: PCI Device Layout
704  *
705  * For device configuration access, the driver MUST use ... 32-bit
706  * wide and aligned accesses for ... 64-bit wide fields.  For 64-bit
707  * fields, the driver MAY access each of the high and low 32-bit parts
708  * of the field independently.
709  */
710 static __inline void
711 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
712      bus_size_t offset, uint64_t value)
713 {
714 #if _QUAD_HIGHWORD
715 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
716 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
717 #else
718 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
719 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
720 #endif
721 }
722 
723 static void
724 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
725 {
726 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
727 	struct virtqueue *vq = &sc->sc_vqs[idx];
728 	bus_space_tag_t	   iot = psc->sc_iot;
729 	bus_space_handle_t ioh = psc->sc_ioh;
730 	KASSERT(vq->vq_index == idx);
731 
732 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
733 	if (addr == 0) {
734 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
735 		virtio_pci_bus_space_write_8(iot, ioh,
736 		    VIRTIO_CONFIG1_QUEUE_DESC,   0);
737 		virtio_pci_bus_space_write_8(iot, ioh,
738 		    VIRTIO_CONFIG1_QUEUE_AVAIL,  0);
739 		virtio_pci_bus_space_write_8(iot, ioh,
740 		    VIRTIO_CONFIG1_QUEUE_USED,   0);
741 	} else {
742 		virtio_pci_bus_space_write_8(iot, ioh,
743 			VIRTIO_CONFIG1_QUEUE_DESC, addr);
744 		virtio_pci_bus_space_write_8(iot, ioh,
745 			VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
746 		virtio_pci_bus_space_write_8(iot, ioh,
747 			VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
748 		bus_space_write_2(iot, ioh,
749 			VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
750 		vq->vq_notify_off = bus_space_read_2(iot, ioh,
751 			VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
752 	}
753 
754 	if (psc->sc_ihs_num > 1) {
755 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
756 		if (psc->sc_intr_pervq)
757 			vec += idx;
758 		bus_space_write_2(iot, ioh,
759 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
760 	}
761 }
762 
763 static void
764 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
765 {
766 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
767 	bus_space_tag_t	   iot = psc->sc_iot;
768 	bus_space_handle_t ioh = psc->sc_ioh;
769 	int old = 0;
770 
771 	if (status)
772 		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
773 	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
774 }
775 
776 void
777 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
778 {
779 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
780 	device_t self          =  sc->sc_dev;
781 	bus_space_tag_t	   iot = psc->sc_iot;
782 	bus_space_handle_t ioh = psc->sc_ioh;
783 	uint64_t host, negotiated, device_status;
784 
785 	guest_features |= VIRTIO_F_VERSION_1;
786 	/* notify on empty is 0.9 only */
787 	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
788 	sc->sc_active_features = 0;
789 
790 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
791 	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
792 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
793 	host |= (uint64_t)
794 		bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
795 
796 	negotiated = host & guest_features;
797 
798 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
799 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
800 			negotiated & 0xffffffff);
801 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
802 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
803 			negotiated >> 32);
804 	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
805 
806 	device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
807 	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
808 		aprint_error_dev(self, "feature negotiation failed\n");
809 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
810 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
811 		return;
812 	}
813 
814 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
815 		aprint_error_dev(self, "host rejected version 1\n");
816 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
817 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
818 		return;
819 	}
820 
821 	sc->sc_active_features = negotiated;
822 	return;
823 }
824 
825 
826 /* -------------------------------------
827  * Generic PCI interrupt code
828  * -------------------------------------*/
829 
830 static int
831 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
832 {
833 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
834 	bus_space_tag_t	   iot = psc->sc_iot;
835 	bus_space_handle_t ioh = psc->sc_ioh;
836 	int vector, ret, qid;
837 
838 	if (!virtio_pci_msix_enabled(psc))
839 		return 0;
840 
841 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
842 	bus_space_write_2(iot, ioh,
843 		VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
844 	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
845 	if (ret != vector) {
846 		VIRTIO_PCI_LOG(sc, reinit,
847 		    "can't set config msix vector\n");
848 		return -1;
849 	}
850 
851 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
852 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
853 
854 		if (psc->sc_intr_pervq)
855 			vector += qid;
856 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
857 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
858 			vector);
859 		ret = bus_space_read_2(iot, ioh,
860 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
861 		if (ret != vector) {
862 			VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
863 			    "msix vector\n", qid);
864 			return -1;
865 		}
866 	}
867 
868 	return 0;
869 }
870 
871 static int
872 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
873 {
874 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
875 	int offset, vector, ret, qid;
876 
877 	if (!virtio_pci_msix_enabled(psc))
878 		return 0;
879 
880 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
881 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
882 
883 	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
884 	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
885 	if (ret != vector) {
886 		aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
887 		    __func__, vector, ret);
888 		VIRTIO_PCI_LOG(sc, reinit,
889 		    "can't set config msix vector\n");
890 		return -1;
891 	}
892 
893 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
894 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
895 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
896 
897 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
898 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
899 
900 		if (psc->sc_intr_pervq)
901 			vector += qid;
902 
903 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
904 		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
905 		if (ret != vector) {
906 			aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
907 			    " expected=%d, actual=%d\n",
908 			    __func__, qid, vector, ret);
909 			VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
910 			    "msix vector\n", qid);
911 			return -1;
912 		}
913 	}
914 
915 	return 0;
916 }
917 
918 static int
919 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
920     struct pci_attach_args *pa)
921 {
922 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
923 	device_t self = sc->sc_dev;
924 	pci_chipset_tag_t pc = pa->pa_pc;
925 	struct virtqueue *vq;
926 	char intrbuf[PCI_INTRSTR_LEN];
927 	char intr_xname[INTRDEVNAMEBUF];
928 	char const *intrstr;
929 	int idx, qid, n;
930 
931 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
932 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
933 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
934 
935 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
936 	    device_xname(sc->sc_dev));
937 
938 	psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
939 	    sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
940 	if (psc->sc_ihs[idx] == NULL) {
941 		aprint_error_dev(self, "couldn't establish MSI-X for config\n");
942 		goto error;
943 	}
944 
945 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
946 	if (psc->sc_intr_pervq) {
947 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
948 			n = idx + qid;
949 			vq = &sc->sc_vqs[qid];
950 
951 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
952 			    device_xname(sc->sc_dev), qid);
953 
954 			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
955 				pci_intr_setattr(pc, &psc->sc_ihp[n],
956 				    PCI_INTR_MPSAFE, true);
957 			}
958 
959 			psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
960 			    sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
961 			if (psc->sc_ihs[n] == NULL) {
962 				aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
963 				goto error;
964 			}
965 		}
966 	} else {
967 		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
968 			pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
969 
970 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
971 		    device_xname(sc->sc_dev));
972 		psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
973 		    sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
974 		if (psc->sc_ihs[idx] == NULL) {
975 			aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
976 			goto error;
977 		}
978 	}
979 
980 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
981 	intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
982 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
983 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
984 	if (psc->sc_intr_pervq) {
985 		kcpuset_t *affinity;
986 		int affinity_to, r;
987 
988 		kcpuset_create(&affinity, false);
989 
990 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
991 			n = idx + qid;
992 			affinity_to = (qid / 2) % ncpu;
993 
994 			intrstr = pci_intr_string(pc, psc->sc_ihp[n],
995 			    intrbuf, sizeof(intrbuf));
996 
997 			kcpuset_zero(affinity);
998 			kcpuset_set(affinity, affinity_to);
999 			r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
1000 			if (r == 0) {
1001 				aprint_normal_dev(self,
1002 				    "for vq #%d interrupting at %s affinity to %u\n",
1003 				    qid, intrstr, affinity_to);
1004 			} else {
1005 				aprint_normal_dev(self,
1006 				    "for vq #%d interrupting at %s\n",
1007 				    qid, intrstr);
1008 			}
1009 		}
1010 
1011 		kcpuset_destroy(affinity);
1012 	} else {
1013 		intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1014 		aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
1015 	}
1016 
1017 	return 0;
1018 
1019 error:
1020 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1021 	if (psc->sc_ihs[idx] != NULL)
1022 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1023 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1024 	if (psc->sc_intr_pervq) {
1025 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
1026 			n = idx + qid;
1027 			if (psc->sc_ihs[n] == NULL)
1028 				continue;
1029 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1030 		}
1031 
1032 	} else {
1033 		if (psc->sc_ihs[idx] != NULL)
1034 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1035 	}
1036 
1037 	return -1;
1038 }
1039 
1040 static int
1041 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1042     struct pci_attach_args *pa)
1043 {
1044 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1045 	device_t self = sc->sc_dev;
1046 	pci_chipset_tag_t pc = pa->pa_pc;
1047 	char intrbuf[PCI_INTRSTR_LEN];
1048 	char const *intrstr;
1049 
1050 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1051 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1052 
1053 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1054 	    sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1055 	if (psc->sc_ihs[0] == NULL) {
1056 		aprint_error_dev(self, "couldn't establish INTx\n");
1057 		return -1;
1058 	}
1059 
1060 	intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1061 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1062 
1063 	return 0;
1064 }
1065 
1066 static int
1067 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1068 {
1069 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1070 	device_t self = sc->sc_dev;
1071 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1072 	pcitag_t tag = psc->sc_pa.pa_tag;
1073 	int error;
1074 	int nmsix;
1075 	int off;
1076 	int counts[PCI_INTR_TYPE_SIZE];
1077 	pci_intr_type_t max_type;
1078 	pcireg_t ctl;
1079 
1080 	nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1081 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1082 
1083 	/* We need at least two: one for config and the other for queues */
1084 	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1085 		/* Try INTx only */
1086 		max_type = PCI_INTR_TYPE_INTX;
1087 		counts[PCI_INTR_TYPE_INTX] = 1;
1088 	} else {
1089 		/* Try MSI-X first and INTx second */
1090 		if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
1091 		    sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1092 			nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1093 		} else {
1094 			nmsix = 2;
1095 		}
1096 
1097 		max_type = PCI_INTR_TYPE_MSIX;
1098 		counts[PCI_INTR_TYPE_MSIX] = nmsix;
1099 		counts[PCI_INTR_TYPE_MSI] = 0;
1100 		counts[PCI_INTR_TYPE_INTX] = 1;
1101 	}
1102 
1103 retry:
1104 	error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1105 	if (error != 0) {
1106 		aprint_error_dev(self, "couldn't map interrupt\n");
1107 		return -1;
1108 	}
1109 
1110 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1111 		psc->sc_intr_pervq = nmsix > 2 ? true : false;
1112 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1113 		    KM_SLEEP);
1114 
1115 		error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1116 		if (error != 0) {
1117 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1118 			pci_intr_release(pc, psc->sc_ihp, nmsix);
1119 
1120 			/* Retry INTx */
1121 			max_type = PCI_INTR_TYPE_INTX;
1122 			counts[PCI_INTR_TYPE_INTX] = 1;
1123 			goto retry;
1124 		}
1125 
1126 		psc->sc_ihs_num = nmsix;
1127 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1128 		virtio_pci_adjust_config_region(psc);
1129 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1130 		psc->sc_intr_pervq = false;
1131 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1132 		    KM_SLEEP);
1133 
1134 		error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1135 		if (error != 0) {
1136 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1137 			pci_intr_release(pc, psc->sc_ihp, 1);
1138 			return -1;
1139 		}
1140 
1141 		psc->sc_ihs_num = 1;
1142 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1143 		virtio_pci_adjust_config_region(psc);
1144 
1145 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1146 		if (error != 0) {
1147 			ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1148 			ctl &= ~PCI_MSIX_CTL_ENABLE;
1149 			pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1150 		}
1151 	}
1152 
1153 	if (!psc->sc_intr_pervq)
1154 		CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
1155 	return 0;
1156 }
1157 
1158 static void
1159 virtio_pci_free_interrupts(struct virtio_softc *sc)
1160 {
1161 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1162 
1163 	for (int i = 0; i < psc->sc_ihs_num; i++) {
1164 		if (psc->sc_ihs[i] == NULL)
1165 			continue;
1166 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1167 		psc->sc_ihs[i] = NULL;
1168 	}
1169 
1170 	if (psc->sc_ihs_num > 0)
1171 		pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1172 
1173 	if (psc->sc_ihs != NULL) {
1174 		kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1175 		psc->sc_ihs = NULL;
1176 	}
1177 	psc->sc_ihs_num = 0;
1178 }
1179 
1180 static bool
1181 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1182 {
1183 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1184 
1185 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1186 		return true;
1187 
1188 	return false;
1189 }
1190 
1191 /*
1192  * Interrupt handler.
1193  */
1194 static int
1195 virtio_pci_intr(void *arg)
1196 {
1197 	struct virtio_softc *sc = arg;
1198 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1199 	int isr, r = 0;
1200 
1201 	/* check and ack the interrupt */
1202 	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1203 	if (isr == 0)
1204 		return 0;
1205 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1206 	    (sc->sc_config_change != NULL))
1207 		r = (sc->sc_config_change)(sc);
1208 	if (sc->sc_intrhand != NULL) {
1209 		if (sc->sc_soft_ih != NULL)
1210 			softint_schedule(sc->sc_soft_ih);
1211 		else
1212 			r |= (sc->sc_intrhand)(sc);
1213 	}
1214 
1215 	return r;
1216 }
1217 
1218 static int
1219 virtio_pci_msix_queue_intr(void *arg)
1220 {
1221 	struct virtio_softc *sc = arg;
1222 	int r = 0;
1223 
1224 	if (sc->sc_intrhand != NULL) {
1225 		if (sc->sc_soft_ih != NULL)
1226 			softint_schedule(sc->sc_soft_ih);
1227 		else
1228 			r |= (sc->sc_intrhand)(sc);
1229 	}
1230 
1231 	return r;
1232 }
1233 
1234 static int
1235 virtio_pci_msix_config_intr(void *arg)
1236 {
1237 	struct virtio_softc *sc = arg;
1238 	int r = 0;
1239 
1240 	if (sc->sc_config_change != NULL)
1241 		r = (sc->sc_config_change)(sc);
1242 	return r;
1243 }
1244 
1245 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1246 
1247 #ifdef _MODULE
1248 #include "ioconf.c"
1249 #endif
1250 
1251 static int
1252 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1253 {
1254 	int error = 0;
1255 
1256 #ifdef _MODULE
1257 	switch (cmd) {
1258 	case MODULE_CMD_INIT:
1259 		error = config_init_component(cfdriver_ioconf_virtio_pci,
1260 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1261 		break;
1262 	case MODULE_CMD_FINI:
1263 		error = config_fini_component(cfdriver_ioconf_virtio_pci,
1264 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1265 		break;
1266 	default:
1267 		error = ENOTTY;
1268 		break;
1269 	}
1270 #endif
1271 
1272 	return error;
1273 }
1274