xref: /openbsd-src/sys/dev/pci/virtio_pci.c (revision ff0ccef3c10d0f4cfabce3e33cd26fe007d64b40)
1 /*	$OpenBSD: virtio_pci.c,v 1.47 2024/12/03 19:14:40 sf Exp $	*/
2 /*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3 
4 /*
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/mutex.h>
34 
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/virtio_pcireg.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 
43 #define DNPRINTF(n,x...)				\
44     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
45 
46 
47 /*
48  * XXX: Before being used on big endian arches, the access to config registers
49  * XXX: needs to be reviewed/fixed. The non-device specific registers are
50  * XXX: PCI-endian while the device specific registers are native endian.
51  */
52 
53 #define MAX_MSIX_VECS	8
54 
55 struct virtio_pci_softc;
56 struct virtio_pci_attach_args;
57 
58 int		virtio_pci_match(struct device *, void *, void *);
59 void		virtio_pci_attach(struct device *, struct device *, void *);
60 int		virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
61 int		virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
62 int		virtio_pci_detach(struct device *, int);
63 
64 void		virtio_pci_kick(struct virtio_softc *, uint16_t);
65 int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
66 uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
67 uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
68 uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
69 uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
70 void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
71 void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
72 void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
73 void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
74 uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
75 void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
76 void		virtio_pci_setup_intrs(struct virtio_softc *);
77 int		virtio_pci_get_status(struct virtio_softc *);
78 void		virtio_pci_set_status(struct virtio_softc *, int);
79 int		virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *);
80 int		virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *);
81 void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
82 void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
83 int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int, int (*)(void *), void *);
84 int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int);
85 void		virtio_pci_intr_barrier(struct virtio_softc *);
86 void		virtio_pci_free_irqs(struct virtio_pci_softc *);
87 int		virtio_pci_poll_intr(void *);
88 int		virtio_pci_legacy_intr(void *);
89 int		virtio_pci_legacy_intr_mpsafe(void *);
90 int		virtio_pci_config_intr(void *);
91 int		virtio_pci_queue_intr(void *);
92 int		virtio_pci_shared_queue_intr(void *);
93 int		virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen);
94 #if VIRTIO_DEBUG
95 void virtio_pci_dump_caps(struct virtio_pci_softc *sc);
96 #endif
97 
98 enum irq_type {
99 	IRQ_NO_MSIX,
100 	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
101 	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
102 };
103 
104 struct virtio_pci_intr {
105 	char	 name[16];
106 	void	*ih;
107 };
108 
109 struct virtio_pci_softc {
110 	struct virtio_softc	sc_sc;
111 	pci_chipset_tag_t	sc_pc;
112 	pcitag_t		sc_ptag;
113 
114 	bus_space_tag_t		sc_iot;
115 	bus_space_handle_t	sc_ioh;
116 	bus_size_t		sc_iosize;
117 
118 	bus_space_tag_t		sc_bars_iot[4];
119 	bus_space_handle_t	sc_bars_ioh[4];
120 	bus_size_t		sc_bars_iosize[4];
121 
122 	bus_space_tag_t		sc_notify_iot;
123 	bus_space_handle_t	sc_notify_ioh;
124 	bus_size_t		sc_notify_iosize;
125 	unsigned int		sc_notify_off_multiplier;
126 
127 	bus_space_tag_t		sc_devcfg_iot;
128 	bus_space_handle_t	sc_devcfg_ioh;
129 	bus_size_t		sc_devcfg_iosize;
130 	/*
131 	 * With 0.9, the offset of the devcfg region in the io bar changes
132 	 * depending on MSI-X being enabled or not.
133 	 * With 1.0, this field is still used to remember if MSI-X is enabled
134 	 * or not.
135 	 */
136 	unsigned int		sc_devcfg_offset;
137 
138 	bus_space_tag_t		sc_isr_iot;
139 	bus_space_handle_t	sc_isr_ioh;
140 	bus_size_t		sc_isr_iosize;
141 
142 	struct virtio_pci_intr	*sc_intr;
143 	int			sc_nintr;
144 
145 	enum irq_type		sc_irq_type;
146 };
147 
148 struct virtio_pci_attach_args {
149 	struct virtio_attach_args	 vpa_va;
150 	struct pci_attach_args		*vpa_pa;
151 };
152 
153 
154 const struct cfattach virtio_pci_ca = {
155 	sizeof(struct virtio_pci_softc),
156 	virtio_pci_match,
157 	virtio_pci_attach,
158 	virtio_pci_detach,
159 	NULL
160 };
161 
162 const struct virtio_ops virtio_pci_ops = {
163 	virtio_pci_kick,
164 	virtio_pci_read_device_config_1,
165 	virtio_pci_read_device_config_2,
166 	virtio_pci_read_device_config_4,
167 	virtio_pci_read_device_config_8,
168 	virtio_pci_write_device_config_1,
169 	virtio_pci_write_device_config_2,
170 	virtio_pci_write_device_config_4,
171 	virtio_pci_write_device_config_8,
172 	virtio_pci_read_queue_size,
173 	virtio_pci_setup_queue,
174 	virtio_pci_setup_intrs,
175 	virtio_pci_get_status,
176 	virtio_pci_set_status,
177 	virtio_pci_negotiate_features,
178 	virtio_pci_poll_intr,
179 	virtio_pci_intr_barrier,
180 };
181 
182 static inline uint64_t
183 _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size)
184 {
185 	uint64_t val;
186 	switch (size) {
187 	case 1:
188 		val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off);
189 		break;
190 	case 2:
191 		val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off);
192 		break;
193 	case 4:
194 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
195 		break;
196 	case 8:
197 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
198 		    off + sizeof(uint32_t));
199 		val <<= 32;
200 		val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
201 		break;
202 	}
203 	return val;
204 }
205 
206 #define CREAD(sc, memb)  _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \
207     sizeof(((struct virtio_pci_common_cfg *)0)->memb))
208 
209 #define CWRITE(sc, memb, val)							\
210 	do {									\
211 		struct virtio_pci_common_cfg c;					\
212 		size_t off = offsetof(struct virtio_pci_common_cfg, memb);	\
213 		size_t size = sizeof(c.memb);					\
214 										\
215 		DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n",		\
216 		    __func__, __LINE__, off, size, (unsigned long long)val);	\
217 		switch (size) {							\
218 		case 1:								\
219 			bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);	\
220 			break;							\
221 		case 2:								\
222 			bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val);	\
223 			break;							\
224 		case 4:								\
225 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);	\
226 			break;							\
227 		case 8:								\
228 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off,		\
229 			    (val) & 0xffffffff);				\
230 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,		\
231 			    (off) + sizeof(uint32_t), (uint64_t)(val) >> 32);	\
232 			break;							\
233 		}								\
234 	} while (0)
235 
236 uint16_t
237 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
238 {
239 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
240 	uint16_t ret;
241 	if (sc->sc_sc.sc_version_1) {
242 		CWRITE(sc, queue_select, idx);
243 		ret = CREAD(sc, queue_size);
244 	} else {
245 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
246 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
247 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
248 		    VIRTIO_CONFIG_QUEUE_SIZE);
249 	}
250 	return ret;
251 }
252 
253 void
254 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
255     uint64_t addr)
256 {
257 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
258 	if (sc->sc_sc.sc_version_1) {
259 		CWRITE(sc, queue_select, vq->vq_index);
260 		if (addr == 0) {
261 			CWRITE(sc, queue_enable, 0);
262 			CWRITE(sc, queue_desc, 0);
263 			CWRITE(sc, queue_avail, 0);
264 			CWRITE(sc, queue_used, 0);
265 		} else {
266 			CWRITE(sc, queue_desc, addr);
267 			CWRITE(sc, queue_avail, addr + vq->vq_availoffset);
268 			CWRITE(sc, queue_used, addr + vq->vq_usedoffset);
269 			CWRITE(sc, queue_enable, 1);
270 			vq->vq_notify_off = CREAD(sc, queue_notify_off);
271 		}
272 	} else {
273 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
274 		    VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
275 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
276 		    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
277 	}
278 }
279 
280 void
281 virtio_pci_setup_intrs(struct virtio_softc *vsc)
282 {
283 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
284 	int i;
285 
286 	if (sc->sc_irq_type == IRQ_NO_MSIX)
287 		return;
288 
289 	for (i = 0; i < vsc->sc_nvqs; i++) {
290 		unsigned vec = vsc->sc_vqs[i].vq_intr_vec;
291 		virtio_pci_set_msix_queue_vector(sc, i, vec);
292 	}
293 	if (vsc->sc_config_change)
294 		virtio_pci_set_msix_config_vector(sc, 0);
295 }
296 
297 int
298 virtio_pci_get_status(struct virtio_softc *vsc)
299 {
300 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
301 
302 	if (sc->sc_sc.sc_version_1)
303 		return CREAD(sc, device_status);
304 	else
305 		return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
306 		    VIRTIO_CONFIG_DEVICE_STATUS);
307 }
308 
309 void
310 virtio_pci_set_status(struct virtio_softc *vsc, int status)
311 {
312 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
313 	int old = 0;
314 
315 	if (sc->sc_sc.sc_version_1) {
316 		if (status == 0) {
317 			CWRITE(sc, device_status, 0);
318 			while (CREAD(sc, device_status) != 0) {
319 				CPU_BUSY_CYCLE();
320 			}
321 		} else {
322 			old = CREAD(sc, device_status);
323 			CWRITE(sc, device_status, status|old);
324 		}
325 	} else {
326 		if (status == 0) {
327 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
328 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
329 			while (bus_space_read_1(sc->sc_iot, sc->sc_ioh,
330 			    VIRTIO_CONFIG_DEVICE_STATUS) != 0) {
331 				CPU_BUSY_CYCLE();
332 			}
333 		} else {
334 			old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
335 			    VIRTIO_CONFIG_DEVICE_STATUS);
336 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
337 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
338 		}
339 	}
340 }
341 
342 int
343 virtio_pci_match(struct device *parent, void *match, void *aux)
344 {
345 	struct pci_attach_args *pa;
346 
347 	pa = (struct pci_attach_args *)aux;
348 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
349 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
350 		return 1;
351 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET)
352 		return 0;
353 	/* virtio 0.9 */
354 	if (PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
355 	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
356 	    PCI_REVISION(pa->pa_class) == 0)
357 		return 1;
358 	/* virtio 1.0 */
359 	if (PCI_PRODUCT(pa->pa_id) >= 0x1040 &&
360 	    PCI_PRODUCT(pa->pa_id) <= 0x107f &&
361 	    PCI_REVISION(pa->pa_class) == 1)
362 		return 1;
363 	return 0;
364 }
365 
366 #if VIRTIO_DEBUG
367 void
368 virtio_pci_dump_caps(struct virtio_pci_softc *sc)
369 {
370 	pci_chipset_tag_t pc = sc->sc_pc;
371 	pcitag_t tag = sc->sc_ptag;
372 	int offset;
373 	union {
374 		pcireg_t reg[4];
375 		struct virtio_pci_cap vcap;
376 	} v;
377 
378 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0]))
379 		return;
380 
381 	printf("\n");
382 	do {
383 		for (int i = 0; i < 4; i++)
384 			v.reg[i] = pci_conf_read(pc, tag, offset + i * 4);
385 		printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n",
386 			__func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar,
387 			v.vcap.offset, v.vcap.length);
388 		offset = v.vcap.cap_next;
389 	} while (offset != 0);
390 }
391 #endif
392 
393 int
394 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen)
395 {
396 	pci_chipset_tag_t pc = sc->sc_pc;
397 	pcitag_t tag = sc->sc_ptag;
398 	unsigned int offset, i, len;
399 	union {
400 		pcireg_t reg[8];
401 		struct virtio_pci_cap vcap;
402 	} *v = buf;
403 
404 	if (buflen < sizeof(struct virtio_pci_cap))
405 		return ERANGE;
406 
407 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
408 		return ENOENT;
409 
410 	do {
411 		for (i = 0; i < 4; i++)
412 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
413 		if (v->vcap.cfg_type == cfg_type)
414 			break;
415 		offset = v->vcap.cap_next;
416 	} while (offset != 0);
417 
418 	if (offset == 0)
419 		return ENOENT;
420 
421 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
422 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
423 		if (len > buflen) {
424 			printf("%s: cap too large\n", __func__);
425 			return ERANGE;
426 		}
427 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
428 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
429 	}
430 
431 	return 0;
432 }
433 
434 
435 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
436 				sizeof(pcireg_t))
437 
438 int
439 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
440 {
441 	struct virtio_pci_cap common, isr, device;
442 	struct virtio_pci_notify_cap notify;
443 	int have_device_cfg = 0;
444 	bus_size_t bars[NMAPREG] = { 0 };
445 	int bars_idx[NMAPREG] = { 0 };
446 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
447 	int i, j = 0, ret = 0;
448 
449 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0)
450 		return ENODEV;
451 
452 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, &notify, sizeof(notify)) != 0)
453 		return ENODEV;
454 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0)
455 		return ENODEV;
456 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0)
457 		memset(&device, 0, sizeof(device));
458 	else
459 		have_device_cfg = 1;
460 
461 	/*
462 	 * XXX Maybe there are devices that offer the pci caps but not the
463 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
464 	 * XXX here and fall back to 0.9 out if not present.
465 	 */
466 
467 	/* Figure out which bars we need to map */
468 	for (i = 0; i < nitems(caps); i++) {
469 		int bar = caps[i]->bar;
470 		bus_size_t len = caps[i]->offset + caps[i]->length;
471 		if (caps[i]->length == 0)
472 			continue;
473 		if (bars[bar] < len)
474 			bars[bar] = len;
475 	}
476 
477 	for (i = 0; i < nitems(bars); i++) {
478 		int reg;
479 		pcireg_t type;
480 		if (bars[i] == 0)
481 			continue;
482 		reg = PCI_MAPREG_START + i * 4;
483 		type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg);
484 		if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j],
485 		    &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j],
486 		    bars[i])) {
487 			printf("%s: can't map bar %u \n",
488 			    sc->sc_sc.sc_dev.dv_xname, i);
489 			ret = EIO;
490 			goto err;
491 		}
492 		bars_idx[i] = j;
493 		j++;
494 	}
495 
496 	i = bars_idx[notify.cap.bar];
497 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
498 	    notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) {
499 		printf("%s: can't map notify i/o space\n",
500 		    sc->sc_sc.sc_dev.dv_xname);
501 		ret = EIO;
502 		goto err;
503 	}
504 	sc->sc_notify_iosize = notify.cap.length;
505 	sc->sc_notify_iot = sc->sc_bars_iot[i];
506 	sc->sc_notify_off_multiplier = notify.notify_off_multiplier;
507 
508 	if (have_device_cfg) {
509 		i = bars_idx[device.bar];
510 		if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
511 		    device.offset, device.length, &sc->sc_devcfg_ioh) != 0) {
512 			printf("%s: can't map devcfg i/o space\n",
513 			    sc->sc_sc.sc_dev.dv_xname);
514 			ret = EIO;
515 			goto err;
516 		}
517 		sc->sc_devcfg_iosize = device.length;
518 		sc->sc_devcfg_iot = sc->sc_bars_iot[i];
519 	}
520 
521 	i = bars_idx[isr.bar];
522 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
523 	    isr.offset, isr.length, &sc->sc_isr_ioh) != 0) {
524 		printf("%s: can't map isr i/o space\n",
525 		    sc->sc_sc.sc_dev.dv_xname);
526 		ret = EIO;
527 		goto err;
528 	}
529 	sc->sc_isr_iosize = isr.length;
530 	sc->sc_isr_iot = sc->sc_bars_iot[i];
531 
532 	i = bars_idx[common.bar];
533 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
534 	    common.offset, common.length, &sc->sc_ioh) != 0) {
535 		printf("%s: can't map common i/o space\n",
536 		    sc->sc_sc.sc_dev.dv_xname);
537 		ret = EIO;
538 		goto err;
539 	}
540 	sc->sc_iosize = common.length;
541 	sc->sc_iot = sc->sc_bars_iot[i];
542 
543 	sc->sc_sc.sc_version_1 = 1;
544 	return 0;
545 
546 err:
547 	/* there is no pci_mapreg_unmap() */
548 	return ret;
549 }
550 
551 int
552 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
553 {
554 	struct virtio_softc *vsc = &sc->sc_sc;
555 	pcireg_t type;
556 
557 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
558 	if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0,
559 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
560 		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
561 		return EIO;
562 	}
563 
564 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
565 	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
566 		printf("%s: can't map notify i/o space\n",
567 		    vsc->sc_dev.dv_xname);
568 		return EIO;
569 	}
570 	sc->sc_notify_iosize = 2;
571 	sc->sc_notify_iot = sc->sc_iot;
572 
573 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
574 	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
575 		printf("%s: can't map isr i/o space\n",
576 		    vsc->sc_dev.dv_xname);
577 		return EIO;
578 	}
579 	sc->sc_isr_iosize = 1;
580 	sc->sc_isr_iot = sc->sc_iot;
581 
582 	return 0;
583 }
584 
585 void
586 virtio_pci_attach(struct device *parent, struct device *self, void *aux)
587 {
588 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
589 	struct virtio_softc *vsc = &sc->sc_sc;
590 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
591 	pci_chipset_tag_t pc = pa->pa_pc;
592 	pcitag_t tag = pa->pa_tag;
593 	int revision, ret = ENODEV;
594 	pcireg_t id;
595 	char const *intrstr;
596 	pci_intr_handle_t ih;
597 	struct virtio_pci_attach_args vpa = { { 0 }, pa };
598 
599 	revision = PCI_REVISION(pa->pa_class);
600 	switch (revision) {
601 	case 0:
602 		/* subsystem ID shows what I am */
603 		id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
604 		break;
605 	case 1:
606 		id = PCI_PRODUCT(pa->pa_id) - 0x1040;
607 		break;
608 	default:
609 		printf("unknown revision 0x%02x; giving up\n", revision);
610 		return;
611 	}
612 
613 	sc->sc_pc = pc;
614 	sc->sc_ptag = pa->pa_tag;
615 	vsc->sc_dmat = pa->pa_dmat;
616 
617 #if defined(__i386__) || defined(__amd64__)
618 	/*
619 	 * For virtio, ignore normal MSI black/white-listing depending on the
620 	 * PCI bridge but enable it unconditionally.
621 	 */
622 	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
623 #endif
624 
625 #if VIRTIO_DEBUG
626 	virtio_pci_dump_caps(sc);
627 #endif
628 
629 	sc->sc_nintr = min(MAX_MSIX_VECS, pci_intr_msix_count(pa));
630 	sc->sc_nintr = max(sc->sc_nintr, 1);
631 	vpa.vpa_va.va_nintr = sc->sc_nintr;
632 
633 	sc->sc_intr = mallocarray(sc->sc_nintr, sizeof(*sc->sc_intr),
634 	    M_DEVBUF, M_WAITOK | M_ZERO);
635 
636 	vsc->sc_ops = &virtio_pci_ops;
637 	if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 &&
638 	    (revision == 1 ||
639 	     (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) {
640 		ret = virtio_pci_attach_10(sc, pa);
641 	}
642 	if (ret != 0 && revision == 0) {
643 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
644 		ret = virtio_pci_attach_09(sc, pa);
645 	}
646 	if (ret != 0) {
647 		printf(": Cannot attach (%d)\n", ret);
648 		goto fail_0;
649 	}
650 
651 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
652 	sc->sc_irq_type = IRQ_NO_MSIX;
653 	if (virtio_pci_adjust_config_region(sc) != 0)
654 		goto fail_0;
655 
656 	virtio_device_reset(vsc);
657 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
658 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
659 
660 	printf("\n");
661 	vpa.vpa_va.va_devid = id;
662 	vsc->sc_child = NULL;
663 	config_found(self, &vpa, NULL);
664 	if (vsc->sc_child == NULL) {
665 		printf("%s: no matching child driver; not configured\n",
666 		    vsc->sc_dev.dv_xname);
667 		goto fail_1;
668 	}
669 	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
670 		printf("%s: virtio configuration failed\n",
671 		    vsc->sc_dev.dv_xname);
672 		goto fail_1;
673 	}
674 
675 	if (virtio_pci_setup_msix(sc, &vpa, 0) == 0) {
676 		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
677 		intrstr = "msix per-VQ";
678 	} else if (virtio_pci_setup_msix(sc, &vpa, 1) == 0) {
679 		sc->sc_irq_type = IRQ_MSIX_SHARED;
680 		intrstr = "msix shared";
681 	} else {
682 		int (*ih_func)(void *) = virtio_pci_legacy_intr;
683 		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
684 			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
685 			goto fail_2;
686 		}
687 		intrstr = pci_intr_string(pc, ih);
688 		/*
689 		 * We always set the IPL_MPSAFE flag in order to do the relatively
690 		 * expensive ISR read without lock, and then grab the kernel lock in
691 		 * the interrupt handler.
692 		 */
693 		if (vsc->sc_ipl & IPL_MPSAFE)
694 			ih_func = virtio_pci_legacy_intr_mpsafe;
695 		sc->sc_intr[0].ih = pci_intr_establish(pc, ih,
696 		    vsc->sc_ipl | IPL_MPSAFE, ih_func, sc,
697 		    vsc->sc_child->dv_xname);
698 		if (sc->sc_intr[0].ih == NULL) {
699 			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
700 			if (intrstr != NULL)
701 				printf(" at %s", intrstr);
702 			printf("\n");
703 			goto fail_2;
704 		}
705 	}
706 	virtio_pci_setup_intrs(vsc);
707 	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
708 
709 	return;
710 
711 fail_2:
712 	config_detach(vsc->sc_child, 0);
713 fail_1:
714 	/* no pci_mapreg_unmap() or pci_intr_unmap() */
715 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
716 fail_0:
717 	free(sc->sc_intr, M_DEVBUF, sc->sc_nintr * sizeof(*sc->sc_intr));
718 }
719 
720 int
721 virtio_pci_detach(struct device *self, int flags)
722 {
723 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
724 	struct virtio_softc *vsc = &sc->sc_sc;
725 	int r;
726 
727 	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
728 		r = config_detach(vsc->sc_child, flags);
729 		if (r)
730 			return r;
731 	}
732 	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
733 	KASSERT(vsc->sc_vqs == 0);
734 	virtio_pci_free_irqs(sc);
735 	if (sc->sc_iosize)
736 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
737 	sc->sc_iosize = 0;
738 
739 	return 0;
740 }
741 
742 int
743 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
744 {
745 	if (sc->sc_sc.sc_version_1)
746 		return 0;
747 	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
748 	sc->sc_devcfg_iot = sc->sc_iot;
749 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
750 	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
751 		printf("%s: can't map config i/o space\n",
752 		    sc->sc_sc.sc_dev.dv_xname);
753 		return 1;
754 	}
755 	return 0;
756 }
757 
758 /*
759  * Feature negotiation.
760  * Prints available / negotiated features if guest_feature_names != NULL and
761  * VIRTIO_DEBUG is 1
762  */
763 int
764 virtio_pci_negotiate_features(struct virtio_softc *vsc,
765     const struct virtio_feature_name *guest_feature_names)
766 {
767 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
768 	uint64_t host, negotiated;
769 
770 	vsc->sc_active_features = 0;
771 
772 	/*
773 	 * We enable indirect descriptors by default. They can be switched
774 	 * off by setting bit 1 in the driver flags, see config(8)
775 	 */
776 	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
777 	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
778 		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
779 	} else if (guest_feature_names != NULL) {
780 		printf(" RingIndirectDesc disabled by UKC");
781 	}
782 
783 	/*
784 	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
785 	 * If it did, check if it is disabled by bit 2 in the driver flags.
786 	 */
787 	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
788 	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
789 	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
790 		if (guest_feature_names != NULL)
791 			printf(" RingEventIdx disabled by UKC");
792 		vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX;
793 	}
794 
795 	if (vsc->sc_version_1) {
796 		return virtio_pci_negotiate_features_10(vsc,
797 		    guest_feature_names);
798 	}
799 
800 	/* virtio 0.9 only */
801 	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
802 				VIRTIO_CONFIG_DEVICE_FEATURES);
803 	negotiated = host & vsc->sc_driver_features;
804 #if VIRTIO_DEBUG
805 	if (guest_feature_names)
806 		virtio_log_features(host, negotiated, guest_feature_names);
807 #endif
808 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
809 			  VIRTIO_CONFIG_GUEST_FEATURES, negotiated);
810 	vsc->sc_active_features = negotiated;
811 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
812 		vsc->sc_indirect = 1;
813 	else
814 		vsc->sc_indirect = 0;
815 	return 0;
816 }
817 
818 int
819 virtio_pci_negotiate_features_10(struct virtio_softc *vsc,
820     const struct virtio_feature_name *guest_feature_names)
821 {
822 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
823 	uint64_t host, negotiated;
824 
825 	vsc->sc_driver_features |= VIRTIO_F_VERSION_1;
826 	/*
827 	 * XXX Without this SEV doesn't work with a KVM/qemu hypervisor on
828 	 * XXX amd64.
829 	 */
830 	vsc->sc_driver_features |= VIRTIO_F_ACCESS_PLATFORM;
831 	/* notify on empty is 0.9 only */
832 	vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
833 	CWRITE(sc, device_feature_select, 0);
834 	host = CREAD(sc, device_feature);
835 	CWRITE(sc, device_feature_select, 1);
836 	host |= (uint64_t)CREAD(sc, device_feature) << 32;
837 
838 	negotiated = host & vsc->sc_driver_features;
839 #if VIRTIO_DEBUG
840 	if (guest_feature_names)
841 		virtio_log_features(host, negotiated, guest_feature_names);
842 #endif
843 	CWRITE(sc, driver_feature_select, 0);
844 	CWRITE(sc, driver_feature, negotiated & 0xffffffff);
845 	CWRITE(sc, driver_feature_select, 1);
846 	CWRITE(sc, driver_feature, negotiated >> 32);
847 	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
848 
849 	if ((CREAD(sc, device_status) &
850 	    VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
851 		printf("%s: Feature negotiation failed\n",
852 		    vsc->sc_dev.dv_xname);
853 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
854 		return ENXIO;
855 	}
856 	vsc->sc_active_features = negotiated;
857 
858 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
859 		vsc->sc_indirect = 1;
860 	else
861 		vsc->sc_indirect = 0;
862 
863 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
864 #if VIRTIO_DEBUG
865 		printf("%s: Host rejected Version_1\n", __func__);
866 #endif
867 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
868 		return EINVAL;
869 	}
870 	return 0;
871 }
872 
873 /*
874  * Device configuration registers.
875  */
876 uint8_t
877 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
878 {
879 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
880 	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
881 }
882 
883 uint16_t
884 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
885 {
886 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
887 	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
888 }
889 
890 uint32_t
891 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
892 {
893 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
894 	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
895 }
896 
897 uint64_t
898 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
899 {
900 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
901 	uint64_t r;
902 
903 	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
904 	    index + sizeof(uint32_t));
905 	r <<= 32;
906 	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
907 	return r;
908 }
909 
910 void
911 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
912     uint8_t value)
913 {
914 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
915 	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
916 }
917 
918 void
919 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
920     uint16_t value)
921 {
922 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
923 	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
924 }
925 
926 void
927 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
928 			     int index, uint32_t value)
929 {
930 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
931 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
932 }
933 
934 void
935 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
936 			     int index, uint64_t value)
937 {
938 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
939 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
940 	    index, value & 0xffffffff);
941 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
942 	    index + sizeof(uint32_t), value >> 32);
943 }
944 
945 int
946 virtio_pci_msix_establish(struct virtio_pci_softc *sc,
947     struct virtio_pci_attach_args *vpa, int idx,
948     int (*handler)(void *), void *ih_arg)
949 {
950 	struct virtio_softc *vsc = &sc->sc_sc;
951 	pci_intr_handle_t ih;
952 
953 	KASSERT(idx < sc->sc_nintr);
954 
955 	if (pci_intr_map_msix(vpa->vpa_pa, idx, &ih) != 0) {
956 #if VIRTIO_DEBUG
957 		printf("%s[%d]: pci_intr_map_msix failed\n",
958 		    vsc->sc_dev.dv_xname, idx);
959 #endif
960 		return 1;
961 	}
962 	snprintf(sc->sc_intr[idx].name, sizeof(sc->sc_intr[idx].name), "%s:%d",
963 	    vsc->sc_child->dv_xname, idx);
964 	sc->sc_intr[idx].ih = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
965 	    handler, ih_arg, sc->sc_intr[idx].name);
966 	if (sc->sc_intr[idx].ih == NULL) {
967 		printf("%s[%d]: couldn't establish msix interrupt\n",
968 		    vsc->sc_dev.dv_xname, idx);
969 		return 1;
970 	}
971 	return 0;
972 }
973 
974 void
975 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
976 {
977 	if (sc->sc_sc.sc_version_1) {
978 		CWRITE(sc, queue_select, idx);
979 		CWRITE(sc, queue_msix_vector, vector);
980 	} else {
981 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
982 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
983 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
984 		    VIRTIO_MSI_QUEUE_VECTOR, vector);
985 	}
986 }
987 
988 void
989 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
990 {
991 	if (sc->sc_sc.sc_version_1) {
992 		CWRITE(sc, config_msix_vector, vector);
993 	} else {
994 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
995 		    VIRTIO_MSI_CONFIG_VECTOR, vector);
996 	}
997 }
998 
999 
1000 void
1001 virtio_pci_free_irqs(struct virtio_pci_softc *sc)
1002 {
1003 	struct virtio_softc *vsc = &sc->sc_sc;
1004 	int i;
1005 
1006 	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1007 		for (i = 0; i < vsc->sc_nvqs; i++) {
1008 			virtio_pci_set_msix_queue_vector(sc, i,
1009 			    VIRTIO_MSI_NO_VECTOR);
1010 		}
1011 	}
1012 
1013 	for (i = 0; i < sc->sc_nintr; i++) {
1014 		if (sc->sc_intr[i].ih) {
1015 			pci_intr_disestablish(sc->sc_pc, sc->sc_intr[i].ih);
1016 			sc->sc_intr[i].ih = NULL;
1017 		}
1018 	}
1019 
1020 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1021 	virtio_pci_adjust_config_region(sc);
1022 }
1023 
1024 int
1025 virtio_pci_setup_msix(struct virtio_pci_softc *sc,
1026     struct virtio_pci_attach_args *vpa, int shared)
1027 {
1028 	struct virtio_softc *vsc = &sc->sc_sc;
1029 	int i;
1030 
1031 	/* Shared needs config + queue */
1032 	if (shared && vpa->vpa_va.va_nintr < 1 + 1)
1033 		return 1;
1034 	/* Per VQ needs config + N * queue */
1035 	if (!shared && vpa->vpa_va.va_nintr < 1 + vsc->sc_nvqs)
1036 		return 1;
1037 
1038 	if (virtio_pci_msix_establish(sc, vpa, 0, virtio_pci_config_intr, vsc))
1039 		return 1;
1040 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1041 	virtio_pci_adjust_config_region(sc);
1042 
1043 	if (shared) {
1044 		if (virtio_pci_msix_establish(sc, vpa, 1,
1045 		    virtio_pci_shared_queue_intr, vsc)) {
1046 			goto fail;
1047 		}
1048 
1049 		for (i = 0; i < vsc->sc_nvqs; i++)
1050 			vsc->sc_vqs[i].vq_intr_vec = 1;
1051 	} else {
1052 		for (i = 0; i < vsc->sc_nvqs; i++) {
1053 			if (virtio_pci_msix_establish(sc, vpa, i + 1,
1054 			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
1055 				goto fail;
1056 			}
1057 			vsc->sc_vqs[i].vq_intr_vec = i + 1;
1058 		}
1059 	}
1060 
1061 	return 0;
1062 fail:
1063 	virtio_pci_free_irqs(sc);
1064 	return 1;
1065 }
1066 
1067 void
1068 virtio_pci_intr_barrier(struct virtio_softc *vsc)
1069 {
1070 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1071 	int i;
1072 
1073 	for (i = 0; i < sc->sc_nintr; i++) {
1074 		if (sc->sc_intr[i].ih != NULL)
1075 			intr_barrier(sc->sc_intr[i].ih);
1076 	}
1077 }
1078 
1079 /*
1080  * Interrupt handler.
1081  */
1082 
1083 /*
1084  * Only used without MSI-X
1085  */
1086 int
1087 virtio_pci_legacy_intr(void *arg)
1088 {
1089 	struct virtio_pci_softc *sc = arg;
1090 	struct virtio_softc *vsc = &sc->sc_sc;
1091 	int isr, r = 0;
1092 
1093 	/* check and ack the interrupt */
1094 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1095 	if (isr == 0)
1096 		return 0;
1097 	KERNEL_LOCK();
1098 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1099 	    (vsc->sc_config_change != NULL)) {
1100 		r = (vsc->sc_config_change)(vsc);
1101 	}
1102 	r |= virtio_check_vqs(vsc);
1103 	KERNEL_UNLOCK();
1104 
1105 	return r;
1106 }
1107 
1108 int
1109 virtio_pci_legacy_intr_mpsafe(void *arg)
1110 {
1111 	struct virtio_pci_softc *sc = arg;
1112 	struct virtio_softc *vsc = &sc->sc_sc;
1113 	int isr, r = 0;
1114 
1115 	/* check and ack the interrupt */
1116 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1117 	if (isr == 0)
1118 		return 0;
1119 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1120 	    (vsc->sc_config_change != NULL)) {
1121 		r = (vsc->sc_config_change)(vsc);
1122 	}
1123 	r |= virtio_check_vqs(vsc);
1124 	return r;
1125 }
1126 
1127 /*
1128  * Only used with MSI-X
1129  */
1130 int
1131 virtio_pci_config_intr(void *arg)
1132 {
1133 	struct virtio_softc *vsc = arg;
1134 
1135 	if (vsc->sc_config_change != NULL)
1136 		return vsc->sc_config_change(vsc);
1137 	return 0;
1138 }
1139 
1140 /*
1141  * Only used with MSI-X
1142  */
1143 int
1144 virtio_pci_queue_intr(void *arg)
1145 {
1146 	struct virtqueue *vq = arg;
1147 	struct virtio_softc *vsc = vq->vq_owner;
1148 
1149 	return virtio_check_vq(vsc, vq);
1150 }
1151 
1152 int
1153 virtio_pci_shared_queue_intr(void *arg)
1154 {
1155 	struct virtio_softc *vsc = arg;
1156 
1157 	return virtio_check_vqs(vsc);
1158 }
1159 
1160 /*
1161  * Interrupt handler to be used when polling.
1162  * We cannot use isr here because it is not defined in MSI-X mode.
1163  */
1164 int
1165 virtio_pci_poll_intr(void *arg)
1166 {
1167 	struct virtio_pci_softc *sc = arg;
1168 	struct virtio_softc *vsc = &sc->sc_sc;
1169 	int r = 0;
1170 
1171 	if (vsc->sc_config_change != NULL)
1172 		r = (vsc->sc_config_change)(vsc);
1173 
1174 	r |= virtio_check_vqs(vsc);
1175 
1176 	return r;
1177 }
1178 
1179 void
1180 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
1181 {
1182 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1183 	unsigned offset = 0;
1184 	if (vsc->sc_version_1) {
1185 		offset = vsc->sc_vqs[idx].vq_notify_off *
1186 		    sc->sc_notify_off_multiplier;
1187 	}
1188 	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx);
1189 }
1190