xref: /openbsd-src/sys/dev/pci/virtio_pci.c (revision a9c160188cc8049e69a03d2a078123dd2c663835)
1 /*	$OpenBSD: virtio_pci.c,v 1.41 2024/09/02 08:22:08 sf Exp $	*/
2 /*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3 
4 /*
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/mutex.h>
34 
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/virtio_pcireg.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 
43 #define DNPRINTF(n,x...)				\
44     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
45 
46 
47 /*
48  * XXX: Before being used on big endian arches, the access to config registers
49  * XXX: needs to be reviewed/fixed. The non-device specific registers are
50  * XXX: PCI-endian while the device specific registers are native endian.
51  */
52 
53 #define MAX_MSIX_VECS	8
54 
55 struct virtio_pci_softc;
56 struct virtio_pci_attach_args;
57 
58 int		virtio_pci_match(struct device *, void *, void *);
59 void		virtio_pci_attach(struct device *, struct device *, void *);
60 int		virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
61 int		virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
62 int		virtio_pci_detach(struct device *, int);
63 
64 void		virtio_pci_kick(struct virtio_softc *, uint16_t);
65 int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
66 uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
67 uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
68 uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
69 uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
70 void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
71 void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
72 void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
73 void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
74 uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
75 void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
76 int		virtio_pci_get_status(struct virtio_softc *);
77 void		virtio_pci_set_status(struct virtio_softc *, int);
78 int		virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *);
79 int		virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *);
80 void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
81 void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
82 int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int, int (*)(void *), void *);
83 int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int);
84 void		virtio_pci_free_irqs(struct virtio_pci_softc *);
85 int		virtio_pci_poll_intr(void *);
86 int		virtio_pci_legacy_intr(void *);
87 int		virtio_pci_legacy_intr_mpsafe(void *);
88 int		virtio_pci_config_intr(void *);
89 int		virtio_pci_queue_intr(void *);
90 int		virtio_pci_shared_queue_intr(void *);
91 int		virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen);
92 #if VIRTIO_DEBUG
93 void virtio_pci_dump_caps(struct virtio_pci_softc *sc);
94 #endif
95 
96 enum irq_type {
97 	IRQ_NO_MSIX,
98 	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
99 	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
100 };
101 
102 struct virtio_pci_intr {
103 	char	 name[16];
104 	void	*ih;
105 };
106 
107 struct virtio_pci_softc {
108 	struct virtio_softc	sc_sc;
109 	pci_chipset_tag_t	sc_pc;
110 	pcitag_t		sc_ptag;
111 
112 	bus_space_tag_t		sc_iot;
113 	bus_space_handle_t	sc_ioh;
114 	bus_size_t		sc_iosize;
115 
116 	bus_space_tag_t		sc_bars_iot[4];
117 	bus_space_handle_t	sc_bars_ioh[4];
118 	bus_size_t		sc_bars_iosize[4];
119 
120 	bus_space_tag_t		sc_notify_iot;
121 	bus_space_handle_t	sc_notify_ioh;
122 	bus_size_t		sc_notify_iosize;
123 	unsigned int		sc_notify_off_multiplier;
124 
125 	bus_space_tag_t		sc_devcfg_iot;
126 	bus_space_handle_t	sc_devcfg_ioh;
127 	bus_size_t		sc_devcfg_iosize;
128 	/*
129 	 * With 0.9, the offset of the devcfg region in the io bar changes
130 	 * depending on MSI-X being enabled or not.
131 	 * With 1.0, this field is still used to remember if MSI-X is enabled
132 	 * or not.
133 	 */
134 	unsigned int		sc_devcfg_offset;
135 
136 	bus_space_tag_t		sc_isr_iot;
137 	bus_space_handle_t	sc_isr_ioh;
138 	bus_size_t		sc_isr_iosize;
139 
140 	struct virtio_pci_intr	*sc_intr;
141 	int			sc_nintr;
142 
143 	enum irq_type		sc_irq_type;
144 };
145 
146 struct virtio_pci_attach_args {
147 	struct virtio_attach_args	 vpa_va;
148 	struct pci_attach_args		*vpa_pa;
149 };
150 
151 
152 const struct cfattach virtio_pci_ca = {
153 	sizeof(struct virtio_pci_softc),
154 	virtio_pci_match,
155 	virtio_pci_attach,
156 	virtio_pci_detach,
157 	NULL
158 };
159 
160 const struct virtio_ops virtio_pci_ops = {
161 	virtio_pci_kick,
162 	virtio_pci_read_device_config_1,
163 	virtio_pci_read_device_config_2,
164 	virtio_pci_read_device_config_4,
165 	virtio_pci_read_device_config_8,
166 	virtio_pci_write_device_config_1,
167 	virtio_pci_write_device_config_2,
168 	virtio_pci_write_device_config_4,
169 	virtio_pci_write_device_config_8,
170 	virtio_pci_read_queue_size,
171 	virtio_pci_setup_queue,
172 	virtio_pci_get_status,
173 	virtio_pci_set_status,
174 	virtio_pci_negotiate_features,
175 	virtio_pci_poll_intr,
176 };
177 
178 static inline uint64_t
179 _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size)
180 {
181 	uint64_t val;
182 	switch (size) {
183 	case 1:
184 		val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off);
185 		break;
186 	case 2:
187 		val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off);
188 		break;
189 	case 4:
190 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
191 		break;
192 	case 8:
193 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
194 		    off + sizeof(uint32_t));
195 		val <<= 32;
196 		val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
197 		break;
198 	}
199 	return val;
200 }
201 
202 #define CREAD(sc, memb)  _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \
203     sizeof(((struct virtio_pci_common_cfg *)0)->memb))
204 
205 #define CWRITE(sc, memb, val)							\
206 	do {									\
207 		struct virtio_pci_common_cfg c;					\
208 		size_t off = offsetof(struct virtio_pci_common_cfg, memb);	\
209 		size_t size = sizeof(c.memb);					\
210 										\
211 		DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n",		\
212 		    __func__, __LINE__, off, size, (unsigned long long)val);	\
213 		switch (size) {							\
214 		case 1:								\
215 			bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);	\
216 			break;							\
217 		case 2:								\
218 			bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val);	\
219 			break;							\
220 		case 4:								\
221 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);	\
222 			break;							\
223 		case 8:								\
224 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off,		\
225 			    (val) & 0xffffffff);				\
226 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,		\
227 			    (off) + sizeof(uint32_t), (uint64_t)(val) >> 32);	\
228 			break;							\
229 		}								\
230 	} while (0)
231 
232 uint16_t
233 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
234 {
235 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
236 	uint16_t ret;
237 	if (sc->sc_sc.sc_version_1) {
238 		CWRITE(sc, queue_select, idx);
239 		ret = CREAD(sc, queue_size);
240 	} else {
241 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
242 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
243 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
244 		    VIRTIO_CONFIG_QUEUE_SIZE);
245 	}
246 	return ret;
247 }
248 
249 void
250 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
251     uint64_t addr)
252 {
253 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
254 	if (sc->sc_sc.sc_version_1) {
255 		CWRITE(sc, queue_select, vq->vq_index);
256 		if (addr == 0) {
257 			CWRITE(sc, queue_enable, 0);
258 			CWRITE(sc, queue_desc, 0);
259 			CWRITE(sc, queue_avail, 0);
260 			CWRITE(sc, queue_used, 0);
261 		} else {
262 			CWRITE(sc, queue_desc, addr);
263 			CWRITE(sc, queue_avail, addr + vq->vq_availoffset);
264 			CWRITE(sc, queue_used, addr + vq->vq_usedoffset);
265 			CWRITE(sc, queue_enable, 1);
266 			vq->vq_notify_off = CREAD(sc, queue_notify_off);
267 		}
268 	} else {
269 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
270 		    VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
271 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
272 		    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
273 	}
274 
275 	/*
276 	 * This path is only executed if this function is called after
277 	 * the child's attach function has finished. In other cases,
278 	 * it's done in virtio_pci_setup_msix().
279 	 */
280 	if (sc->sc_irq_type != IRQ_NO_MSIX) {
281 		int vec = 1;
282 		if (sc->sc_irq_type == IRQ_MSIX_PER_VQ)
283 		       vec += vq->vq_index;
284 		if (sc->sc_sc.sc_version_1) {
285 			CWRITE(sc, queue_msix_vector, vec);
286 		} else {
287 			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
288 			    VIRTIO_MSI_QUEUE_VECTOR, vec);
289 		}
290 	}
291 }
292 
293 int
294 virtio_pci_get_status(struct virtio_softc *vsc)
295 {
296 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
297 
298 	if (sc->sc_sc.sc_version_1)
299 		return CREAD(sc, device_status);
300 	else
301 		return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
302 		    VIRTIO_CONFIG_DEVICE_STATUS);
303 }
304 
305 void
306 virtio_pci_set_status(struct virtio_softc *vsc, int status)
307 {
308 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
309 	int old = 0;
310 
311 	if (sc->sc_sc.sc_version_1) {
312 		if (status == 0) {
313 			CWRITE(sc, device_status, 0);
314 			while (CREAD(sc, device_status) != 0) {
315 				CPU_BUSY_CYCLE();
316 			}
317 		} else {
318 			old = CREAD(sc, device_status);
319 			CWRITE(sc, device_status, status|old);
320 		}
321 	} else {
322 		if (status == 0) {
323 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
324 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
325 			while (bus_space_read_1(sc->sc_iot, sc->sc_ioh,
326 			    VIRTIO_CONFIG_DEVICE_STATUS) != 0) {
327 				CPU_BUSY_CYCLE();
328 			}
329 		} else {
330 			old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
331 			    VIRTIO_CONFIG_DEVICE_STATUS);
332 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
333 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
334 		}
335 	}
336 }
337 
338 int
339 virtio_pci_match(struct device *parent, void *match, void *aux)
340 {
341 	struct pci_attach_args *pa;
342 
343 	pa = (struct pci_attach_args *)aux;
344 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
345 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
346 		return 1;
347 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET)
348 		return 0;
349 	/* virtio 0.9 */
350 	if (PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
351 	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
352 	    PCI_REVISION(pa->pa_class) == 0)
353 		return 1;
354 	/* virtio 1.0 */
355 	if (PCI_PRODUCT(pa->pa_id) >= 0x1040 &&
356 	    PCI_PRODUCT(pa->pa_id) <= 0x107f &&
357 	    PCI_REVISION(pa->pa_class) == 1)
358 		return 1;
359 	return 0;
360 }
361 
362 #if VIRTIO_DEBUG
363 void
364 virtio_pci_dump_caps(struct virtio_pci_softc *sc)
365 {
366 	pci_chipset_tag_t pc = sc->sc_pc;
367 	pcitag_t tag = sc->sc_ptag;
368 	int offset;
369 	union {
370 		pcireg_t reg[4];
371 		struct virtio_pci_cap vcap;
372 	} v;
373 
374 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0]))
375 		return;
376 
377 	printf("\n");
378 	do {
379 		for (int i = 0; i < 4; i++)
380 			v.reg[i] = pci_conf_read(pc, tag, offset + i * 4);
381 		printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n",
382 			__func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar,
383 			v.vcap.offset, v.vcap.length);
384 		offset = v.vcap.cap_next;
385 	} while (offset != 0);
386 }
387 #endif
388 
389 int
390 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen)
391 {
392 	pci_chipset_tag_t pc = sc->sc_pc;
393 	pcitag_t tag = sc->sc_ptag;
394 	unsigned int offset, i, len;
395 	union {
396 		pcireg_t reg[8];
397 		struct virtio_pci_cap vcap;
398 	} *v = buf;
399 
400 	if (buflen < sizeof(struct virtio_pci_cap))
401 		return ERANGE;
402 
403 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
404 		return ENOENT;
405 
406 	do {
407 		for (i = 0; i < 4; i++)
408 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
409 		if (v->vcap.cfg_type == cfg_type)
410 			break;
411 		offset = v->vcap.cap_next;
412 	} while (offset != 0);
413 
414 	if (offset == 0)
415 		return ENOENT;
416 
417 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
418 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
419 		if (len > buflen) {
420 			printf("%s: cap too large\n", __func__);
421 			return ERANGE;
422 		}
423 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
424 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
425 	}
426 
427 	return 0;
428 }
429 
430 
431 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
432 				sizeof(pcireg_t))
433 
434 int
435 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
436 {
437 	struct virtio_pci_cap common, isr, device;
438 	struct virtio_pci_notify_cap notify;
439 	int have_device_cfg = 0;
440 	bus_size_t bars[NMAPREG] = { 0 };
441 	int bars_idx[NMAPREG] = { 0 };
442 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
443 	int i, j = 0, ret = 0;
444 
445 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0)
446 		return ENODEV;
447 
448 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, &notify, sizeof(notify)) != 0)
449 		return ENODEV;
450 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0)
451 		return ENODEV;
452 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0)
453 		memset(&device, 0, sizeof(device));
454 	else
455 		have_device_cfg = 1;
456 
457 	/*
458 	 * XXX Maybe there are devices that offer the pci caps but not the
459 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
460 	 * XXX here and fall back to 0.9 out if not present.
461 	 */
462 
463 	/* Figure out which bars we need to map */
464 	for (i = 0; i < nitems(caps); i++) {
465 		int bar = caps[i]->bar;
466 		bus_size_t len = caps[i]->offset + caps[i]->length;
467 		if (caps[i]->length == 0)
468 			continue;
469 		if (bars[bar] < len)
470 			bars[bar] = len;
471 	}
472 
473 	for (i = 0; i < nitems(bars); i++) {
474 		int reg;
475 		pcireg_t type;
476 		if (bars[i] == 0)
477 			continue;
478 		reg = PCI_MAPREG_START + i * 4;
479 		type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg);
480 		if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j],
481 		    &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j],
482 		    bars[i])) {
483 			printf("%s: can't map bar %u \n",
484 			    sc->sc_sc.sc_dev.dv_xname, i);
485 			ret = EIO;
486 			goto err;
487 		}
488 		bars_idx[i] = j;
489 		j++;
490 	}
491 
492 	i = bars_idx[notify.cap.bar];
493 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
494 	    notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) {
495 		printf("%s: can't map notify i/o space\n",
496 		    sc->sc_sc.sc_dev.dv_xname);
497 		ret = EIO;
498 		goto err;
499 	}
500 	sc->sc_notify_iosize = notify.cap.length;
501 	sc->sc_notify_iot = sc->sc_bars_iot[i];
502 	sc->sc_notify_off_multiplier = notify.notify_off_multiplier;
503 
504 	if (have_device_cfg) {
505 		i = bars_idx[device.bar];
506 		if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
507 		    device.offset, device.length, &sc->sc_devcfg_ioh) != 0) {
508 			printf("%s: can't map devcfg i/o space\n",
509 			    sc->sc_sc.sc_dev.dv_xname);
510 			ret = EIO;
511 			goto err;
512 		}
513 		sc->sc_devcfg_iosize = device.length;
514 		sc->sc_devcfg_iot = sc->sc_bars_iot[i];
515 	}
516 
517 	i = bars_idx[isr.bar];
518 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
519 	    isr.offset, isr.length, &sc->sc_isr_ioh) != 0) {
520 		printf("%s: can't map isr i/o space\n",
521 		    sc->sc_sc.sc_dev.dv_xname);
522 		ret = EIO;
523 		goto err;
524 	}
525 	sc->sc_isr_iosize = isr.length;
526 	sc->sc_isr_iot = sc->sc_bars_iot[i];
527 
528 	i = bars_idx[common.bar];
529 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
530 	    common.offset, common.length, &sc->sc_ioh) != 0) {
531 		printf("%s: can't map common i/o space\n",
532 		    sc->sc_sc.sc_dev.dv_xname);
533 		ret = EIO;
534 		goto err;
535 	}
536 	sc->sc_iosize = common.length;
537 	sc->sc_iot = sc->sc_bars_iot[i];
538 
539 	sc->sc_sc.sc_version_1 = 1;
540 	return 0;
541 
542 err:
543 	/* there is no pci_mapreg_unmap() */
544 	return ret;
545 }
546 
547 int
548 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
549 {
550 	struct virtio_softc *vsc = &sc->sc_sc;
551 	pcireg_t type;
552 
553 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
554 	if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0,
555 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
556 		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
557 		return EIO;
558 	}
559 
560 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
561 	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
562 		printf("%s: can't map notify i/o space\n",
563 		    vsc->sc_dev.dv_xname);
564 		return EIO;
565 	}
566 	sc->sc_notify_iosize = 2;
567 	sc->sc_notify_iot = sc->sc_iot;
568 
569 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
570 	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
571 		printf("%s: can't map isr i/o space\n",
572 		    vsc->sc_dev.dv_xname);
573 		return EIO;
574 	}
575 	sc->sc_isr_iosize = 1;
576 	sc->sc_isr_iot = sc->sc_iot;
577 
578 	return 0;
579 }
580 
581 void
582 virtio_pci_attach(struct device *parent, struct device *self, void *aux)
583 {
584 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
585 	struct virtio_softc *vsc = &sc->sc_sc;
586 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
587 	pci_chipset_tag_t pc = pa->pa_pc;
588 	pcitag_t tag = pa->pa_tag;
589 	int revision, ret = ENODEV;
590 	pcireg_t id;
591 	char const *intrstr;
592 	pci_intr_handle_t ih;
593 	struct virtio_pci_attach_args vpa = { { 0 }, pa };
594 
595 	revision = PCI_REVISION(pa->pa_class);
596 	switch (revision) {
597 	case 0:
598 		/* subsystem ID shows what I am */
599 		id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
600 		break;
601 	case 1:
602 		id = PCI_PRODUCT(pa->pa_id) - 0x1040;
603 		break;
604 	default:
605 		printf("unknown revision 0x%02x; giving up\n", revision);
606 		return;
607 	}
608 
609 	sc->sc_pc = pc;
610 	sc->sc_ptag = pa->pa_tag;
611 	vsc->sc_dmat = pa->pa_dmat;
612 
613 #if defined(__i386__) || defined(__amd64__)
614 	/*
615 	 * For virtio, ignore normal MSI black/white-listing depending on the
616 	 * PCI bridge but enable it unconditionally.
617 	 */
618 	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
619 #endif
620 
621 #if VIRTIO_DEBUG
622 	virtio_pci_dump_caps(sc);
623 #endif
624 
625 	sc->sc_nintr = min(MAX_MSIX_VECS, pci_intr_msix_count(pa));
626 	sc->sc_nintr = max(sc->sc_nintr, 1);
627 	vpa.vpa_va.va_nintr = sc->sc_nintr;
628 
629 	sc->sc_intr = mallocarray(sc->sc_nintr, sizeof(*sc->sc_intr),
630 	    M_DEVBUF, M_WAITOK | M_ZERO);
631 
632 	vsc->sc_ops = &virtio_pci_ops;
633 	if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 &&
634 	    (revision == 1 ||
635 	     (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) {
636 		ret = virtio_pci_attach_10(sc, pa);
637 	}
638 	if (ret != 0 && revision == 0) {
639 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
640 		ret = virtio_pci_attach_09(sc, pa);
641 	}
642 	if (ret != 0) {
643 		printf(": Cannot attach (%d)\n", ret);
644 		goto fail_0;
645 	}
646 
647 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
648 	sc->sc_irq_type = IRQ_NO_MSIX;
649 	if (virtio_pci_adjust_config_region(sc) != 0)
650 		goto fail_0;
651 
652 	virtio_device_reset(vsc);
653 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
654 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
655 
656 	printf("\n");
657 	vpa.vpa_va.va_devid = id;
658 	vsc->sc_child = NULL;
659 	config_found(self, &vpa, NULL);
660 	if (vsc->sc_child == NULL) {
661 		printf("%s: no matching child driver; not configured\n",
662 		    vsc->sc_dev.dv_xname);
663 		goto fail_1;
664 	}
665 	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
666 		printf("%s: virtio configuration failed\n",
667 		    vsc->sc_dev.dv_xname);
668 		goto fail_1;
669 	}
670 
671 	if (virtio_pci_setup_msix(sc, &vpa, 0) == 0) {
672 		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
673 		intrstr = "msix per-VQ";
674 	} else if (virtio_pci_setup_msix(sc, &vpa, 1) == 0) {
675 		sc->sc_irq_type = IRQ_MSIX_SHARED;
676 		intrstr = "msix shared";
677 	} else {
678 		int (*ih_func)(void *) = virtio_pci_legacy_intr;
679 		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
680 			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
681 			goto fail_2;
682 		}
683 		intrstr = pci_intr_string(pc, ih);
684 		/*
685 		 * We always set the IPL_MPSAFE flag in order to do the relatively
686 		 * expensive ISR read without lock, and then grab the kernel lock in
687 		 * the interrupt handler.
688 		 */
689 		if (vsc->sc_ipl & IPL_MPSAFE)
690 			ih_func = virtio_pci_legacy_intr_mpsafe;
691 		sc->sc_intr[0].ih = pci_intr_establish(pc, ih,
692 		    vsc->sc_ipl | IPL_MPSAFE, ih_func, sc,
693 		    vsc->sc_child->dv_xname);
694 		if (sc->sc_intr[0].ih == NULL) {
695 			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
696 			if (intrstr != NULL)
697 				printf(" at %s", intrstr);
698 			printf("\n");
699 			goto fail_2;
700 		}
701 	}
702 	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
703 
704 	return;
705 
706 fail_2:
707 	config_detach(vsc->sc_child, 0);
708 fail_1:
709 	/* no pci_mapreg_unmap() or pci_intr_unmap() */
710 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
711 fail_0:
712 	free(sc->sc_intr, M_DEVBUF, sc->sc_nintr * sizeof(*sc->sc_intr));
713 }
714 
715 int
716 virtio_pci_detach(struct device *self, int flags)
717 {
718 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
719 	struct virtio_softc *vsc = &sc->sc_sc;
720 	int r;
721 
722 	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
723 		r = config_detach(vsc->sc_child, flags);
724 		if (r)
725 			return r;
726 	}
727 	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
728 	KASSERT(vsc->sc_vqs == 0);
729 	virtio_pci_free_irqs(sc);
730 	if (sc->sc_iosize)
731 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
732 	sc->sc_iosize = 0;
733 
734 	return 0;
735 }
736 
737 int
738 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
739 {
740 	if (sc->sc_sc.sc_version_1)
741 		return 0;
742 	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
743 	sc->sc_devcfg_iot = sc->sc_iot;
744 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
745 	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
746 		printf("%s: can't map config i/o space\n",
747 		    sc->sc_sc.sc_dev.dv_xname);
748 		return 1;
749 	}
750 	return 0;
751 }
752 
753 /*
754  * Feature negotiation.
755  * Prints available / negotiated features if guest_feature_names != NULL and
756  * VIRTIO_DEBUG is 1
757  */
758 int
759 virtio_pci_negotiate_features(struct virtio_softc *vsc,
760     const struct virtio_feature_name *guest_feature_names)
761 {
762 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
763 	uint64_t host, negotiated;
764 
765 	vsc->sc_active_features = 0;
766 
767 	/*
768 	 * We enable indirect descriptors by default. They can be switched
769 	 * off by setting bit 1 in the driver flags, see config(8)
770 	 */
771 	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
772 	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
773 		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
774 	} else if (guest_feature_names != NULL) {
775 		printf(" RingIndirectDesc disabled by UKC");
776 	}
777 
778 	/*
779 	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
780 	 * If it did, check if it is disabled by bit 2 in the driver flags.
781 	 */
782 	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
783 	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
784 	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
785 		if (guest_feature_names != NULL)
786 			printf(" RingEventIdx disabled by UKC");
787 		vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX;
788 	}
789 
790 	if (vsc->sc_version_1) {
791 		return virtio_pci_negotiate_features_10(vsc,
792 		    guest_feature_names);
793 	}
794 
795 	/* virtio 0.9 only */
796 	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
797 				VIRTIO_CONFIG_DEVICE_FEATURES);
798 	negotiated = host & vsc->sc_driver_features;
799 #if VIRTIO_DEBUG
800 	if (guest_feature_names)
801 		virtio_log_features(host, negotiated, guest_feature_names);
802 #endif
803 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
804 			  VIRTIO_CONFIG_GUEST_FEATURES, negotiated);
805 	vsc->sc_active_features = negotiated;
806 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
807 		vsc->sc_indirect = 1;
808 	else
809 		vsc->sc_indirect = 0;
810 	return 0;
811 }
812 
813 int
814 virtio_pci_negotiate_features_10(struct virtio_softc *vsc,
815     const struct virtio_feature_name *guest_feature_names)
816 {
817 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
818 	uint64_t host, negotiated;
819 
820 	vsc->sc_driver_features |= VIRTIO_F_VERSION_1;
821 	/* notify on empty is 0.9 only */
822 	vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
823 	CWRITE(sc, device_feature_select, 0);
824 	host = CREAD(sc, device_feature);
825 	CWRITE(sc, device_feature_select, 1);
826 	host |= (uint64_t)CREAD(sc, device_feature) << 32;
827 
828 	negotiated = host & vsc->sc_driver_features;
829 #if VIRTIO_DEBUG
830 	if (guest_feature_names)
831 		virtio_log_features(host, negotiated, guest_feature_names);
832 #endif
833 	CWRITE(sc, driver_feature_select, 0);
834 	CWRITE(sc, driver_feature, negotiated & 0xffffffff);
835 	CWRITE(sc, driver_feature_select, 1);
836 	CWRITE(sc, driver_feature, negotiated >> 32);
837 	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
838 
839 	if ((CREAD(sc, device_status) &
840 	    VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
841 		printf("%s: Feature negotiation failed\n",
842 		    vsc->sc_dev.dv_xname);
843 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
844 		return ENXIO;
845 	}
846 	vsc->sc_active_features = negotiated;
847 
848 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
849 		vsc->sc_indirect = 1;
850 	else
851 		vsc->sc_indirect = 0;
852 
853 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
854 #if VIRTIO_DEBUG
855 		printf("%s: Host rejected Version_1\n", __func__);
856 #endif
857 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
858 		return EINVAL;
859 	}
860 	return 0;
861 }
862 
863 /*
864  * Device configuration registers.
865  */
866 uint8_t
867 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
868 {
869 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
870 	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
871 }
872 
873 uint16_t
874 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
875 {
876 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
877 	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
878 }
879 
880 uint32_t
881 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
882 {
883 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
884 	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
885 }
886 
887 uint64_t
888 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
889 {
890 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
891 	uint64_t r;
892 
893 	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
894 	    index + sizeof(uint32_t));
895 	r <<= 32;
896 	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
897 	return r;
898 }
899 
900 void
901 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
902     uint8_t value)
903 {
904 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
905 	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
906 }
907 
908 void
909 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
910     uint16_t value)
911 {
912 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
913 	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
914 }
915 
916 void
917 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
918 			     int index, uint32_t value)
919 {
920 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
921 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
922 }
923 
924 void
925 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
926 			     int index, uint64_t value)
927 {
928 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
929 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
930 	    index, value & 0xffffffff);
931 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
932 	    index + sizeof(uint32_t), value >> 32);
933 }
934 
935 int
936 virtio_pci_msix_establish(struct virtio_pci_softc *sc,
937     struct virtio_pci_attach_args *vpa, int idx,
938     int (*handler)(void *), void *ih_arg)
939 {
940 	struct virtio_softc *vsc = &sc->sc_sc;
941 	pci_intr_handle_t ih;
942 
943 	KASSERT(idx < sc->sc_nintr);
944 
945 	if (pci_intr_map_msix(vpa->vpa_pa, idx, &ih) != 0) {
946 #if VIRTIO_DEBUG
947 		printf("%s[%d]: pci_intr_map_msix failed\n",
948 		    vsc->sc_dev.dv_xname, idx);
949 #endif
950 		return 1;
951 	}
952 	snprintf(sc->sc_intr[idx].name, sizeof(sc->sc_intr[idx].name), "%s:%d",
953 	    vsc->sc_child->dv_xname, idx);
954 	sc->sc_intr[idx].ih = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
955 	    handler, ih_arg, sc->sc_intr[idx].name);
956 	if (sc->sc_intr[idx].ih == NULL) {
957 		printf("%s[%d]: couldn't establish msix interrupt\n",
958 		    vsc->sc_dev.dv_xname, idx);
959 		return 1;
960 	}
961 	return 0;
962 }
963 
964 void
965 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
966 {
967 	if (sc->sc_sc.sc_version_1) {
968 		CWRITE(sc, queue_select, idx);
969 		CWRITE(sc, queue_msix_vector, vector);
970 	} else {
971 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
972 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
973 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
974 		    VIRTIO_MSI_QUEUE_VECTOR, vector);
975 	}
976 }
977 
978 void
979 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
980 {
981 	if (sc->sc_sc.sc_version_1) {
982 		CWRITE(sc, config_msix_vector, vector);
983 	} else {
984 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
985 		    VIRTIO_MSI_CONFIG_VECTOR, vector);
986 	}
987 }
988 
989 
990 void
991 virtio_pci_free_irqs(struct virtio_pci_softc *sc)
992 {
993 	struct virtio_softc *vsc = &sc->sc_sc;
994 	int i;
995 
996 	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
997 		for (i = 0; i < vsc->sc_nvqs; i++) {
998 			virtio_pci_set_msix_queue_vector(sc, i,
999 			    VIRTIO_MSI_NO_VECTOR);
1000 		}
1001 	}
1002 
1003 	for (i = 0; i < sc->sc_nintr; i++) {
1004 		if (sc->sc_intr[i].ih) {
1005 			pci_intr_disestablish(sc->sc_pc, sc->sc_intr[i].ih);
1006 			sc->sc_intr[i].ih = NULL;
1007 		}
1008 	}
1009 
1010 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1011 	virtio_pci_adjust_config_region(sc);
1012 }
1013 
1014 int
1015 virtio_pci_setup_msix(struct virtio_pci_softc *sc,
1016     struct virtio_pci_attach_args *vpa, int shared)
1017 {
1018 	struct virtio_softc *vsc = &sc->sc_sc;
1019 	int i;
1020 
1021 	/* Shared needs config + queue */
1022 	if (shared && vpa->vpa_va.va_nintr < 1 + 1)
1023 		return 1;
1024 	/* Per VQ needs config + N * queue */
1025 	if (!shared && vpa->vpa_va.va_nintr < 1 + vsc->sc_nvqs)
1026 		return 1;
1027 
1028 	if (virtio_pci_msix_establish(sc, vpa, 0, virtio_pci_config_intr, vsc))
1029 		return 1;
1030 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1031 	virtio_pci_adjust_config_region(sc);
1032 	virtio_pci_set_msix_config_vector(sc, 0);
1033 
1034 	if (shared) {
1035 		if (virtio_pci_msix_establish(sc, vpa, 1,
1036 		    virtio_pci_shared_queue_intr, vsc)) {
1037 			goto fail;
1038 		}
1039 
1040 		for (i = 0; i < vsc->sc_nvqs; i++)
1041 			virtio_pci_set_msix_queue_vector(sc, i, 1);
1042 	} else {
1043 		for (i = 0; i < vsc->sc_nvqs; i++) {
1044 			if (virtio_pci_msix_establish(sc, vpa, i + 1,
1045 			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
1046 				goto fail;
1047 			}
1048 			virtio_pci_set_msix_queue_vector(sc, i, i + 1);
1049 		}
1050 	}
1051 
1052 	return 0;
1053 fail:
1054 	virtio_pci_free_irqs(sc);
1055 	return 1;
1056 }
1057 
1058 /*
1059  * Interrupt handler.
1060  */
1061 
1062 /*
1063  * Only used without MSI-X
1064  */
1065 int
1066 virtio_pci_legacy_intr(void *arg)
1067 {
1068 	struct virtio_pci_softc *sc = arg;
1069 	struct virtio_softc *vsc = &sc->sc_sc;
1070 	int isr, r = 0;
1071 
1072 	/* check and ack the interrupt */
1073 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1074 	if (isr == 0)
1075 		return 0;
1076 	KERNEL_LOCK();
1077 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1078 	    (vsc->sc_config_change != NULL)) {
1079 		r = (vsc->sc_config_change)(vsc);
1080 	}
1081 	r |= virtio_check_vqs(vsc);
1082 	KERNEL_UNLOCK();
1083 
1084 	return r;
1085 }
1086 
1087 int
1088 virtio_pci_legacy_intr_mpsafe(void *arg)
1089 {
1090 	struct virtio_pci_softc *sc = arg;
1091 	struct virtio_softc *vsc = &sc->sc_sc;
1092 	int isr, r = 0;
1093 
1094 	/* check and ack the interrupt */
1095 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1096 	if (isr == 0)
1097 		return 0;
1098 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1099 	    (vsc->sc_config_change != NULL)) {
1100 		r = (vsc->sc_config_change)(vsc);
1101 	}
1102 	r |= virtio_check_vqs(vsc);
1103 	return r;
1104 }
1105 
1106 /*
1107  * Only used with MSI-X
1108  */
1109 int
1110 virtio_pci_config_intr(void *arg)
1111 {
1112 	struct virtio_softc *vsc = arg;
1113 
1114 	if (vsc->sc_config_change != NULL)
1115 		return vsc->sc_config_change(vsc);
1116 	return 0;
1117 }
1118 
1119 /*
1120  * Only used with MSI-X
1121  */
1122 int
1123 virtio_pci_queue_intr(void *arg)
1124 {
1125 	struct virtqueue *vq = arg;
1126 	struct virtio_softc *vsc = vq->vq_owner;
1127 
1128 	return virtio_check_vq(vsc, vq);
1129 }
1130 
1131 int
1132 virtio_pci_shared_queue_intr(void *arg)
1133 {
1134 	struct virtio_softc *vsc = arg;
1135 
1136 	return virtio_check_vqs(vsc);
1137 }
1138 
1139 /*
1140  * Interrupt handler to be used when polling.
1141  * We cannot use isr here because it is not defined in MSI-X mode.
1142  */
1143 int
1144 virtio_pci_poll_intr(void *arg)
1145 {
1146 	struct virtio_pci_softc *sc = arg;
1147 	struct virtio_softc *vsc = &sc->sc_sc;
1148 	int r = 0;
1149 
1150 	if (vsc->sc_config_change != NULL)
1151 		r = (vsc->sc_config_change)(vsc);
1152 
1153 	r |= virtio_check_vqs(vsc);
1154 
1155 	return r;
1156 }
1157 
1158 void
1159 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
1160 {
1161 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1162 	unsigned offset = 0;
1163 	if (vsc->sc_version_1) {
1164 		offset = vsc->sc_vqs[idx].vq_notify_off *
1165 		    sc->sc_notify_off_multiplier;
1166 	}
1167 	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx);
1168 }
1169