xref: /openbsd-src/sys/dev/pci/virtio_pci.c (revision 77d0f8231d53e9aa7f66fbc7e23f42a854c10ff0)
1 /*	$OpenBSD: virtio_pci.c,v 1.48 2024/12/20 22:18:27 sf Exp $	*/
2 /*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3 
4 /*
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/mutex.h>
34 
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/virtio_pcireg.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 
43 #define DNPRINTF(n,x...)				\
44     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
45 
46 
47 /*
48  * XXX: Before being used on big endian arches, the access to config registers
49  * XXX: needs to be reviewed/fixed. The non-device specific registers are
50  * XXX: PCI-endian while the device specific registers are native endian.
51  */
52 
53 #define MAX_MSIX_VECS	8
54 
55 struct virtio_pci_softc;
56 struct virtio_pci_attach_args;
57 
58 int		virtio_pci_match(struct device *, void *, void *);
59 void		virtio_pci_attach(struct device *, struct device *, void *);
60 int		virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
61 int		virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
62 int		virtio_pci_detach(struct device *, int);
63 
64 void		virtio_pci_kick(struct virtio_softc *, uint16_t);
65 int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
66 uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
67 uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
68 uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
69 uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
70 void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
71 void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
72 void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
73 void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
74 uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
75 void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
76 void		virtio_pci_setup_intrs(struct virtio_softc *);
77 int		virtio_pci_attach_finish(struct virtio_softc *, struct virtio_attach_args *);
78 int		virtio_pci_get_status(struct virtio_softc *);
79 void		virtio_pci_set_status(struct virtio_softc *, int);
80 int		virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *);
81 int		virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *);
82 void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
83 void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
84 int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int, int (*)(void *), void *);
85 int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int);
86 void		virtio_pci_intr_barrier(struct virtio_softc *);
87 void		virtio_pci_free_irqs(struct virtio_pci_softc *);
88 int		virtio_pci_poll_intr(void *);
89 int		virtio_pci_legacy_intr(void *);
90 int		virtio_pci_legacy_intr_mpsafe(void *);
91 int		virtio_pci_config_intr(void *);
92 int		virtio_pci_queue_intr(void *);
93 int		virtio_pci_shared_queue_intr(void *);
94 int		virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen);
95 #if VIRTIO_DEBUG
96 void virtio_pci_dump_caps(struct virtio_pci_softc *sc);
97 #endif
98 
99 enum irq_type {
100 	IRQ_NO_MSIX,
101 	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
102 	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
103 };
104 
105 struct virtio_pci_intr {
106 	char	 name[16];
107 	void	*ih;
108 };
109 
110 struct virtio_pci_softc {
111 	struct virtio_softc	sc_sc;
112 	pci_chipset_tag_t	sc_pc;
113 	pcitag_t		sc_ptag;
114 
115 	bus_space_tag_t		sc_iot;
116 	bus_space_handle_t	sc_ioh;
117 	bus_size_t		sc_iosize;
118 
119 	bus_space_tag_t		sc_bars_iot[4];
120 	bus_space_handle_t	sc_bars_ioh[4];
121 	bus_size_t		sc_bars_iosize[4];
122 
123 	bus_space_tag_t		sc_notify_iot;
124 	bus_space_handle_t	sc_notify_ioh;
125 	bus_size_t		sc_notify_iosize;
126 	unsigned int		sc_notify_off_multiplier;
127 
128 	bus_space_tag_t		sc_devcfg_iot;
129 	bus_space_handle_t	sc_devcfg_ioh;
130 	bus_size_t		sc_devcfg_iosize;
131 	/*
132 	 * With 0.9, the offset of the devcfg region in the io bar changes
133 	 * depending on MSI-X being enabled or not.
134 	 * With 1.0, this field is still used to remember if MSI-X is enabled
135 	 * or not.
136 	 */
137 	unsigned int		sc_devcfg_offset;
138 
139 	bus_space_tag_t		sc_isr_iot;
140 	bus_space_handle_t	sc_isr_ioh;
141 	bus_size_t		sc_isr_iosize;
142 
143 	struct virtio_pci_intr	*sc_intr;
144 	int			sc_nintr;
145 
146 	enum irq_type		sc_irq_type;
147 };
148 
149 struct virtio_pci_attach_args {
150 	struct virtio_attach_args	 vpa_va;
151 	struct pci_attach_args		*vpa_pa;
152 };
153 
154 
155 const struct cfattach virtio_pci_ca = {
156 	sizeof(struct virtio_pci_softc),
157 	virtio_pci_match,
158 	virtio_pci_attach,
159 	virtio_pci_detach,
160 	NULL
161 };
162 
163 const struct virtio_ops virtio_pci_ops = {
164 	virtio_pci_kick,
165 	virtio_pci_read_device_config_1,
166 	virtio_pci_read_device_config_2,
167 	virtio_pci_read_device_config_4,
168 	virtio_pci_read_device_config_8,
169 	virtio_pci_write_device_config_1,
170 	virtio_pci_write_device_config_2,
171 	virtio_pci_write_device_config_4,
172 	virtio_pci_write_device_config_8,
173 	virtio_pci_read_queue_size,
174 	virtio_pci_setup_queue,
175 	virtio_pci_setup_intrs,
176 	virtio_pci_get_status,
177 	virtio_pci_set_status,
178 	virtio_pci_negotiate_features,
179 	virtio_pci_attach_finish,
180 	virtio_pci_poll_intr,
181 	virtio_pci_intr_barrier,
182 };
183 
184 static inline uint64_t
185 _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size)
186 {
187 	uint64_t val;
188 	switch (size) {
189 	case 1:
190 		val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off);
191 		break;
192 	case 2:
193 		val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off);
194 		break;
195 	case 4:
196 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
197 		break;
198 	case 8:
199 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
200 		    off + sizeof(uint32_t));
201 		val <<= 32;
202 		val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
203 		break;
204 	}
205 	return val;
206 }
207 
208 #define CREAD(sc, memb)  _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \
209     sizeof(((struct virtio_pci_common_cfg *)0)->memb))
210 
211 #define CWRITE(sc, memb, val)							\
212 	do {									\
213 		struct virtio_pci_common_cfg c;					\
214 		size_t off = offsetof(struct virtio_pci_common_cfg, memb);	\
215 		size_t size = sizeof(c.memb);					\
216 										\
217 		DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n",		\
218 		    __func__, __LINE__, off, size, (unsigned long long)val);	\
219 		switch (size) {							\
220 		case 1:								\
221 			bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);	\
222 			break;							\
223 		case 2:								\
224 			bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val);	\
225 			break;							\
226 		case 4:								\
227 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);	\
228 			break;							\
229 		case 8:								\
230 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off,		\
231 			    (val) & 0xffffffff);				\
232 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,		\
233 			    (off) + sizeof(uint32_t), (uint64_t)(val) >> 32);	\
234 			break;							\
235 		}								\
236 	} while (0)
237 
238 uint16_t
239 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
240 {
241 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
242 	uint16_t ret;
243 	if (sc->sc_sc.sc_version_1) {
244 		CWRITE(sc, queue_select, idx);
245 		ret = CREAD(sc, queue_size);
246 	} else {
247 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
248 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
249 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
250 		    VIRTIO_CONFIG_QUEUE_SIZE);
251 	}
252 	return ret;
253 }
254 
255 void
256 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
257     uint64_t addr)
258 {
259 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
260 	if (sc->sc_sc.sc_version_1) {
261 		CWRITE(sc, queue_select, vq->vq_index);
262 		if (addr == 0) {
263 			CWRITE(sc, queue_enable, 0);
264 			CWRITE(sc, queue_desc, 0);
265 			CWRITE(sc, queue_avail, 0);
266 			CWRITE(sc, queue_used, 0);
267 		} else {
268 			CWRITE(sc, queue_desc, addr);
269 			CWRITE(sc, queue_avail, addr + vq->vq_availoffset);
270 			CWRITE(sc, queue_used, addr + vq->vq_usedoffset);
271 			CWRITE(sc, queue_enable, 1);
272 			vq->vq_notify_off = CREAD(sc, queue_notify_off);
273 		}
274 	} else {
275 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
276 		    VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
277 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
278 		    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
279 	}
280 }
281 
282 void
283 virtio_pci_setup_intrs(struct virtio_softc *vsc)
284 {
285 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
286 	int i;
287 
288 	if (sc->sc_irq_type == IRQ_NO_MSIX)
289 		return;
290 
291 	for (i = 0; i < vsc->sc_nvqs; i++) {
292 		unsigned vec = vsc->sc_vqs[i].vq_intr_vec;
293 		virtio_pci_set_msix_queue_vector(sc, i, vec);
294 	}
295 	if (vsc->sc_config_change)
296 		virtio_pci_set_msix_config_vector(sc, 0);
297 }
298 
299 int
300 virtio_pci_get_status(struct virtio_softc *vsc)
301 {
302 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
303 
304 	if (sc->sc_sc.sc_version_1)
305 		return CREAD(sc, device_status);
306 	else
307 		return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
308 		    VIRTIO_CONFIG_DEVICE_STATUS);
309 }
310 
311 void
312 virtio_pci_set_status(struct virtio_softc *vsc, int status)
313 {
314 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
315 	int old = 0;
316 
317 	if (sc->sc_sc.sc_version_1) {
318 		if (status == 0) {
319 			CWRITE(sc, device_status, 0);
320 			while (CREAD(sc, device_status) != 0) {
321 				CPU_BUSY_CYCLE();
322 			}
323 		} else {
324 			old = CREAD(sc, device_status);
325 			CWRITE(sc, device_status, status|old);
326 		}
327 	} else {
328 		if (status == 0) {
329 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
330 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
331 			while (bus_space_read_1(sc->sc_iot, sc->sc_ioh,
332 			    VIRTIO_CONFIG_DEVICE_STATUS) != 0) {
333 				CPU_BUSY_CYCLE();
334 			}
335 		} else {
336 			old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
337 			    VIRTIO_CONFIG_DEVICE_STATUS);
338 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
339 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
340 		}
341 	}
342 }
343 
344 int
345 virtio_pci_match(struct device *parent, void *match, void *aux)
346 {
347 	struct pci_attach_args *pa;
348 
349 	pa = (struct pci_attach_args *)aux;
350 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
351 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
352 		return 1;
353 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET)
354 		return 0;
355 	/* virtio 0.9 */
356 	if (PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
357 	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
358 	    PCI_REVISION(pa->pa_class) == 0)
359 		return 1;
360 	/* virtio 1.0 */
361 	if (PCI_PRODUCT(pa->pa_id) >= 0x1040 &&
362 	    PCI_PRODUCT(pa->pa_id) <= 0x107f &&
363 	    PCI_REVISION(pa->pa_class) == 1)
364 		return 1;
365 	return 0;
366 }
367 
368 #if VIRTIO_DEBUG
369 void
370 virtio_pci_dump_caps(struct virtio_pci_softc *sc)
371 {
372 	pci_chipset_tag_t pc = sc->sc_pc;
373 	pcitag_t tag = sc->sc_ptag;
374 	int offset;
375 	union {
376 		pcireg_t reg[4];
377 		struct virtio_pci_cap vcap;
378 	} v;
379 
380 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0]))
381 		return;
382 
383 	printf("\n");
384 	do {
385 		for (int i = 0; i < 4; i++)
386 			v.reg[i] = pci_conf_read(pc, tag, offset + i * 4);
387 		printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n",
388 			__func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar,
389 			v.vcap.offset, v.vcap.length);
390 		offset = v.vcap.cap_next;
391 	} while (offset != 0);
392 }
393 #endif
394 
395 int
396 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen)
397 {
398 	pci_chipset_tag_t pc = sc->sc_pc;
399 	pcitag_t tag = sc->sc_ptag;
400 	unsigned int offset, i, len;
401 	union {
402 		pcireg_t reg[8];
403 		struct virtio_pci_cap vcap;
404 	} *v = buf;
405 
406 	if (buflen < sizeof(struct virtio_pci_cap))
407 		return ERANGE;
408 
409 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
410 		return ENOENT;
411 
412 	do {
413 		for (i = 0; i < 4; i++)
414 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
415 		if (v->vcap.cfg_type == cfg_type)
416 			break;
417 		offset = v->vcap.cap_next;
418 	} while (offset != 0);
419 
420 	if (offset == 0)
421 		return ENOENT;
422 
423 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
424 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
425 		if (len > buflen) {
426 			printf("%s: cap too large\n", __func__);
427 			return ERANGE;
428 		}
429 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
430 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
431 	}
432 
433 	return 0;
434 }
435 
436 
437 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
438 				sizeof(pcireg_t))
439 
440 int
441 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
442 {
443 	struct virtio_pci_cap common, isr, device;
444 	struct virtio_pci_notify_cap notify;
445 	int have_device_cfg = 0;
446 	bus_size_t bars[NMAPREG] = { 0 };
447 	int bars_idx[NMAPREG] = { 0 };
448 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
449 	int i, j = 0, ret = 0;
450 
451 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0)
452 		return ENODEV;
453 
454 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, &notify, sizeof(notify)) != 0)
455 		return ENODEV;
456 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0)
457 		return ENODEV;
458 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0)
459 		memset(&device, 0, sizeof(device));
460 	else
461 		have_device_cfg = 1;
462 
463 	/*
464 	 * XXX Maybe there are devices that offer the pci caps but not the
465 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
466 	 * XXX here and fall back to 0.9 out if not present.
467 	 */
468 
469 	/* Figure out which bars we need to map */
470 	for (i = 0; i < nitems(caps); i++) {
471 		int bar = caps[i]->bar;
472 		bus_size_t len = caps[i]->offset + caps[i]->length;
473 		if (caps[i]->length == 0)
474 			continue;
475 		if (bars[bar] < len)
476 			bars[bar] = len;
477 	}
478 
479 	for (i = 0; i < nitems(bars); i++) {
480 		int reg;
481 		pcireg_t type;
482 		if (bars[i] == 0)
483 			continue;
484 		reg = PCI_MAPREG_START + i * 4;
485 		type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg);
486 		if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j],
487 		    &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j],
488 		    bars[i])) {
489 			printf("%s: can't map bar %u \n",
490 			    sc->sc_sc.sc_dev.dv_xname, i);
491 			ret = EIO;
492 			goto err;
493 		}
494 		bars_idx[i] = j;
495 		j++;
496 	}
497 
498 	i = bars_idx[notify.cap.bar];
499 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
500 	    notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) {
501 		printf("%s: can't map notify i/o space\n",
502 		    sc->sc_sc.sc_dev.dv_xname);
503 		ret = EIO;
504 		goto err;
505 	}
506 	sc->sc_notify_iosize = notify.cap.length;
507 	sc->sc_notify_iot = sc->sc_bars_iot[i];
508 	sc->sc_notify_off_multiplier = notify.notify_off_multiplier;
509 
510 	if (have_device_cfg) {
511 		i = bars_idx[device.bar];
512 		if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
513 		    device.offset, device.length, &sc->sc_devcfg_ioh) != 0) {
514 			printf("%s: can't map devcfg i/o space\n",
515 			    sc->sc_sc.sc_dev.dv_xname);
516 			ret = EIO;
517 			goto err;
518 		}
519 		sc->sc_devcfg_iosize = device.length;
520 		sc->sc_devcfg_iot = sc->sc_bars_iot[i];
521 	}
522 
523 	i = bars_idx[isr.bar];
524 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
525 	    isr.offset, isr.length, &sc->sc_isr_ioh) != 0) {
526 		printf("%s: can't map isr i/o space\n",
527 		    sc->sc_sc.sc_dev.dv_xname);
528 		ret = EIO;
529 		goto err;
530 	}
531 	sc->sc_isr_iosize = isr.length;
532 	sc->sc_isr_iot = sc->sc_bars_iot[i];
533 
534 	i = bars_idx[common.bar];
535 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
536 	    common.offset, common.length, &sc->sc_ioh) != 0) {
537 		printf("%s: can't map common i/o space\n",
538 		    sc->sc_sc.sc_dev.dv_xname);
539 		ret = EIO;
540 		goto err;
541 	}
542 	sc->sc_iosize = common.length;
543 	sc->sc_iot = sc->sc_bars_iot[i];
544 
545 	sc->sc_sc.sc_version_1 = 1;
546 	return 0;
547 
548 err:
549 	/* there is no pci_mapreg_unmap() */
550 	return ret;
551 }
552 
553 int
554 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
555 {
556 	struct virtio_softc *vsc = &sc->sc_sc;
557 	pcireg_t type;
558 
559 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
560 	if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0,
561 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
562 		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
563 		return EIO;
564 	}
565 
566 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
567 	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
568 		printf("%s: can't map notify i/o space\n",
569 		    vsc->sc_dev.dv_xname);
570 		return EIO;
571 	}
572 	sc->sc_notify_iosize = 2;
573 	sc->sc_notify_iot = sc->sc_iot;
574 
575 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
576 	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
577 		printf("%s: can't map isr i/o space\n",
578 		    vsc->sc_dev.dv_xname);
579 		return EIO;
580 	}
581 	sc->sc_isr_iosize = 1;
582 	sc->sc_isr_iot = sc->sc_iot;
583 
584 	return 0;
585 }
586 
587 void
588 virtio_pci_attach(struct device *parent, struct device *self, void *aux)
589 {
590 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
591 	struct virtio_softc *vsc = &sc->sc_sc;
592 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
593 	pci_chipset_tag_t pc = pa->pa_pc;
594 	pcitag_t tag = pa->pa_tag;
595 	int revision, ret = ENODEV;
596 	pcireg_t id;
597 	struct virtio_pci_attach_args vpa = { { 0 }, pa };
598 
599 	revision = PCI_REVISION(pa->pa_class);
600 	switch (revision) {
601 	case 0:
602 		/* subsystem ID shows what I am */
603 		id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
604 		break;
605 	case 1:
606 		id = PCI_PRODUCT(pa->pa_id) - 0x1040;
607 		break;
608 	default:
609 		printf("unknown revision 0x%02x; giving up\n", revision);
610 		return;
611 	}
612 
613 	sc->sc_pc = pc;
614 	sc->sc_ptag = pa->pa_tag;
615 	vsc->sc_dmat = pa->pa_dmat;
616 
617 #if defined(__i386__) || defined(__amd64__)
618 	/*
619 	 * For virtio, ignore normal MSI black/white-listing depending on the
620 	 * PCI bridge but enable it unconditionally.
621 	 */
622 	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
623 #endif
624 
625 #if VIRTIO_DEBUG
626 	virtio_pci_dump_caps(sc);
627 #endif
628 
629 	sc->sc_nintr = min(MAX_MSIX_VECS, pci_intr_msix_count(pa));
630 	sc->sc_nintr = max(sc->sc_nintr, 1);
631 	vpa.vpa_va.va_nintr = sc->sc_nintr;
632 
633 	sc->sc_intr = mallocarray(sc->sc_nintr, sizeof(*sc->sc_intr),
634 	    M_DEVBUF, M_WAITOK | M_ZERO);
635 
636 	vsc->sc_ops = &virtio_pci_ops;
637 	if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 &&
638 	    (revision == 1 ||
639 	     (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) {
640 		ret = virtio_pci_attach_10(sc, pa);
641 	}
642 	if (ret != 0 && revision == 0) {
643 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
644 		ret = virtio_pci_attach_09(sc, pa);
645 	}
646 	if (ret != 0) {
647 		printf(": Cannot attach (%d)\n", ret);
648 		goto free;
649 	}
650 
651 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
652 	sc->sc_irq_type = IRQ_NO_MSIX;
653 	if (virtio_pci_adjust_config_region(sc) != 0)
654 		goto err;
655 
656 	virtio_device_reset(vsc);
657 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
658 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
659 
660 	printf("\n");
661 	vpa.vpa_va.va_devid = id;
662 	vsc->sc_child = NULL;
663 	config_found(self, &vpa, NULL);
664 	if (vsc->sc_child == NULL) {
665 		printf("%s: no matching child driver; not configured\n",
666 		    vsc->sc_dev.dv_xname);
667 		goto err;
668 	}
669 	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
670 		printf("%s: virtio configuration failed\n",
671 		    vsc->sc_dev.dv_xname);
672 		goto err;
673 	}
674 
675 	return;
676 
677 err:
678 	/* no pci_mapreg_unmap() or pci_intr_unmap() */
679 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
680 free:
681 	free(sc->sc_intr, M_DEVBUF, sc->sc_nintr * sizeof(*sc->sc_intr));
682 }
683 
684 int
685 virtio_pci_attach_finish(struct virtio_softc *vsc,
686     struct virtio_attach_args *va)
687 {
688 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
689 	struct virtio_pci_attach_args *vpa =
690 	    (struct virtio_pci_attach_args *)va;
691 	pci_intr_handle_t ih;
692 	pci_chipset_tag_t pc = vpa->vpa_pa->pa_pc;
693 	char const *intrstr;
694 
695 	if (virtio_pci_setup_msix(sc, vpa, 0) == 0) {
696 		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
697 		intrstr = "msix per-VQ";
698 	} else if (virtio_pci_setup_msix(sc, vpa, 1) == 0) {
699 		sc->sc_irq_type = IRQ_MSIX_SHARED;
700 		intrstr = "msix shared";
701 	} else {
702 		int (*ih_func)(void *) = virtio_pci_legacy_intr;
703 		if (pci_intr_map_msi(vpa->vpa_pa, &ih) != 0 &&
704 		    pci_intr_map(vpa->vpa_pa, &ih) != 0) {
705 			printf("%s: couldn't map interrupt\n",
706 			    vsc->sc_dev.dv_xname);
707 			return -EIO;
708 		}
709 		intrstr = pci_intr_string(pc, ih);
710 		/*
711 		 * We always set the IPL_MPSAFE flag in order to do the relatively
712 		 * expensive ISR read without lock, and then grab the kernel lock in
713 		 * the interrupt handler.
714 		 */
715 		if (vsc->sc_ipl & IPL_MPSAFE)
716 			ih_func = virtio_pci_legacy_intr_mpsafe;
717 		sc->sc_intr[0].ih = pci_intr_establish(pc, ih,
718 		    vsc->sc_ipl | IPL_MPSAFE, ih_func, sc,
719 		    vsc->sc_child->dv_xname);
720 		if (sc->sc_intr[0].ih == NULL) {
721 			printf("%s: couldn't establish interrupt",
722 			    vsc->sc_dev.dv_xname);
723 			if (intrstr != NULL)
724 				printf(" at %s", intrstr);
725 			printf("\n");
726 			return -EIO;
727 		}
728 	}
729 
730 	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
731 	return 0;
732 }
733 
734 int
735 virtio_pci_detach(struct device *self, int flags)
736 {
737 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
738 	struct virtio_softc *vsc = &sc->sc_sc;
739 	int r;
740 
741 	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
742 		r = config_detach(vsc->sc_child, flags);
743 		if (r)
744 			return r;
745 	}
746 	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
747 	KASSERT(vsc->sc_vqs == 0);
748 	virtio_pci_free_irqs(sc);
749 	if (sc->sc_iosize)
750 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
751 	sc->sc_iosize = 0;
752 
753 	return 0;
754 }
755 
756 int
757 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
758 {
759 	if (sc->sc_sc.sc_version_1)
760 		return 0;
761 	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
762 	sc->sc_devcfg_iot = sc->sc_iot;
763 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
764 	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
765 		printf("%s: can't map config i/o space\n",
766 		    sc->sc_sc.sc_dev.dv_xname);
767 		return 1;
768 	}
769 	return 0;
770 }
771 
772 /*
773  * Feature negotiation.
774  * Prints available / negotiated features if guest_feature_names != NULL and
775  * VIRTIO_DEBUG is 1
776  */
777 int
778 virtio_pci_negotiate_features(struct virtio_softc *vsc,
779     const struct virtio_feature_name *guest_feature_names)
780 {
781 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
782 	uint64_t host, negotiated;
783 
784 	vsc->sc_active_features = 0;
785 
786 	/*
787 	 * We enable indirect descriptors by default. They can be switched
788 	 * off by setting bit 1 in the driver flags, see config(8)
789 	 */
790 	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
791 	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
792 		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
793 	} else if (guest_feature_names != NULL) {
794 		printf(" RingIndirectDesc disabled by UKC");
795 	}
796 
797 	/*
798 	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
799 	 * If it did, check if it is disabled by bit 2 in the driver flags.
800 	 */
801 	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
802 	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
803 	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
804 		if (guest_feature_names != NULL)
805 			printf(" RingEventIdx disabled by UKC");
806 		vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX;
807 	}
808 
809 	if (vsc->sc_version_1) {
810 		return virtio_pci_negotiate_features_10(vsc,
811 		    guest_feature_names);
812 	}
813 
814 	/* virtio 0.9 only */
815 	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
816 				VIRTIO_CONFIG_DEVICE_FEATURES);
817 	negotiated = host & vsc->sc_driver_features;
818 #if VIRTIO_DEBUG
819 	if (guest_feature_names)
820 		virtio_log_features(host, negotiated, guest_feature_names);
821 #endif
822 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
823 			  VIRTIO_CONFIG_GUEST_FEATURES, negotiated);
824 	vsc->sc_active_features = negotiated;
825 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
826 		vsc->sc_indirect = 1;
827 	else
828 		vsc->sc_indirect = 0;
829 	return 0;
830 }
831 
832 int
833 virtio_pci_negotiate_features_10(struct virtio_softc *vsc,
834     const struct virtio_feature_name *guest_feature_names)
835 {
836 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
837 	uint64_t host, negotiated;
838 
839 	vsc->sc_driver_features |= VIRTIO_F_VERSION_1;
840 	/*
841 	 * XXX Without this SEV doesn't work with a KVM/qemu hypervisor on
842 	 * XXX amd64.
843 	 */
844 	vsc->sc_driver_features |= VIRTIO_F_ACCESS_PLATFORM;
845 	/* notify on empty is 0.9 only */
846 	vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
847 	CWRITE(sc, device_feature_select, 0);
848 	host = CREAD(sc, device_feature);
849 	CWRITE(sc, device_feature_select, 1);
850 	host |= (uint64_t)CREAD(sc, device_feature) << 32;
851 
852 	negotiated = host & vsc->sc_driver_features;
853 #if VIRTIO_DEBUG
854 	if (guest_feature_names)
855 		virtio_log_features(host, negotiated, guest_feature_names);
856 #endif
857 	CWRITE(sc, driver_feature_select, 0);
858 	CWRITE(sc, driver_feature, negotiated & 0xffffffff);
859 	CWRITE(sc, driver_feature_select, 1);
860 	CWRITE(sc, driver_feature, negotiated >> 32);
861 	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
862 
863 	if ((CREAD(sc, device_status) &
864 	    VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
865 		printf("%s: Feature negotiation failed\n",
866 		    vsc->sc_dev.dv_xname);
867 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
868 		return ENXIO;
869 	}
870 	vsc->sc_active_features = negotiated;
871 
872 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
873 		vsc->sc_indirect = 1;
874 	else
875 		vsc->sc_indirect = 0;
876 
877 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
878 #if VIRTIO_DEBUG
879 		printf("%s: Host rejected Version_1\n", __func__);
880 #endif
881 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
882 		return EINVAL;
883 	}
884 	return 0;
885 }
886 
887 /*
888  * Device configuration registers.
889  */
890 uint8_t
891 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
892 {
893 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
894 	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
895 }
896 
897 uint16_t
898 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
899 {
900 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
901 	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
902 }
903 
904 uint32_t
905 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
906 {
907 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
908 	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
909 }
910 
911 uint64_t
912 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
913 {
914 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
915 	uint64_t r;
916 
917 	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
918 	    index + sizeof(uint32_t));
919 	r <<= 32;
920 	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
921 	return r;
922 }
923 
924 void
925 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
926     uint8_t value)
927 {
928 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
929 	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
930 }
931 
932 void
933 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
934     uint16_t value)
935 {
936 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
937 	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
938 }
939 
940 void
941 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
942 			     int index, uint32_t value)
943 {
944 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
945 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
946 }
947 
948 void
949 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
950 			     int index, uint64_t value)
951 {
952 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
953 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
954 	    index, value & 0xffffffff);
955 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
956 	    index + sizeof(uint32_t), value >> 32);
957 }
958 
959 int
960 virtio_pci_msix_establish(struct virtio_pci_softc *sc,
961     struct virtio_pci_attach_args *vpa, int idx,
962     int (*handler)(void *), void *ih_arg)
963 {
964 	struct virtio_softc *vsc = &sc->sc_sc;
965 	pci_intr_handle_t ih;
966 
967 	KASSERT(idx < sc->sc_nintr);
968 
969 	if (pci_intr_map_msix(vpa->vpa_pa, idx, &ih) != 0) {
970 #if VIRTIO_DEBUG
971 		printf("%s[%d]: pci_intr_map_msix failed\n",
972 		    vsc->sc_dev.dv_xname, idx);
973 #endif
974 		return 1;
975 	}
976 	snprintf(sc->sc_intr[idx].name, sizeof(sc->sc_intr[idx].name), "%s:%d",
977 	    vsc->sc_child->dv_xname, idx);
978 	sc->sc_intr[idx].ih = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
979 	    handler, ih_arg, sc->sc_intr[idx].name);
980 	if (sc->sc_intr[idx].ih == NULL) {
981 		printf("%s[%d]: couldn't establish msix interrupt\n",
982 		    vsc->sc_dev.dv_xname, idx);
983 		return 1;
984 	}
985 	return 0;
986 }
987 
988 void
989 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
990 {
991 	if (sc->sc_sc.sc_version_1) {
992 		CWRITE(sc, queue_select, idx);
993 		CWRITE(sc, queue_msix_vector, vector);
994 	} else {
995 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
996 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
997 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
998 		    VIRTIO_MSI_QUEUE_VECTOR, vector);
999 	}
1000 }
1001 
1002 void
1003 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
1004 {
1005 	if (sc->sc_sc.sc_version_1) {
1006 		CWRITE(sc, config_msix_vector, vector);
1007 	} else {
1008 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
1009 		    VIRTIO_MSI_CONFIG_VECTOR, vector);
1010 	}
1011 }
1012 
1013 
1014 void
1015 virtio_pci_free_irqs(struct virtio_pci_softc *sc)
1016 {
1017 	struct virtio_softc *vsc = &sc->sc_sc;
1018 	int i;
1019 
1020 	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1021 		for (i = 0; i < vsc->sc_nvqs; i++) {
1022 			virtio_pci_set_msix_queue_vector(sc, i,
1023 			    VIRTIO_MSI_NO_VECTOR);
1024 		}
1025 	}
1026 
1027 	for (i = 0; i < sc->sc_nintr; i++) {
1028 		if (sc->sc_intr[i].ih) {
1029 			pci_intr_disestablish(sc->sc_pc, sc->sc_intr[i].ih);
1030 			sc->sc_intr[i].ih = NULL;
1031 		}
1032 	}
1033 
1034 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1035 	virtio_pci_adjust_config_region(sc);
1036 }
1037 
1038 int
1039 virtio_pci_setup_msix(struct virtio_pci_softc *sc,
1040     struct virtio_pci_attach_args *vpa, int shared)
1041 {
1042 	struct virtio_softc *vsc = &sc->sc_sc;
1043 	int i;
1044 
1045 	/* Shared needs config + queue */
1046 	if (shared && vpa->vpa_va.va_nintr < 1 + 1)
1047 		return 1;
1048 	/* Per VQ needs config + N * queue */
1049 	if (!shared && vpa->vpa_va.va_nintr < 1 + vsc->sc_nvqs)
1050 		return 1;
1051 
1052 	if (virtio_pci_msix_establish(sc, vpa, 0, virtio_pci_config_intr, vsc))
1053 		return 1;
1054 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1055 	virtio_pci_adjust_config_region(sc);
1056 
1057 	if (shared) {
1058 		if (virtio_pci_msix_establish(sc, vpa, 1,
1059 		    virtio_pci_shared_queue_intr, vsc)) {
1060 			goto fail;
1061 		}
1062 
1063 		for (i = 0; i < vsc->sc_nvqs; i++)
1064 			vsc->sc_vqs[i].vq_intr_vec = 1;
1065 	} else {
1066 		for (i = 0; i < vsc->sc_nvqs; i++) {
1067 			if (virtio_pci_msix_establish(sc, vpa, i + 1,
1068 			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
1069 				goto fail;
1070 			}
1071 			vsc->sc_vqs[i].vq_intr_vec = i + 1;
1072 		}
1073 	}
1074 
1075 	return 0;
1076 fail:
1077 	virtio_pci_free_irqs(sc);
1078 	return 1;
1079 }
1080 
1081 void
1082 virtio_pci_intr_barrier(struct virtio_softc *vsc)
1083 {
1084 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1085 	int i;
1086 
1087 	for (i = 0; i < sc->sc_nintr; i++) {
1088 		if (sc->sc_intr[i].ih != NULL)
1089 			intr_barrier(sc->sc_intr[i].ih);
1090 	}
1091 }
1092 
1093 /*
1094  * Interrupt handler.
1095  */
1096 
1097 /*
1098  * Only used without MSI-X
1099  */
1100 int
1101 virtio_pci_legacy_intr(void *arg)
1102 {
1103 	struct virtio_pci_softc *sc = arg;
1104 	struct virtio_softc *vsc = &sc->sc_sc;
1105 	int isr, r = 0;
1106 
1107 	/* check and ack the interrupt */
1108 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1109 	if (isr == 0)
1110 		return 0;
1111 	KERNEL_LOCK();
1112 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1113 	    (vsc->sc_config_change != NULL)) {
1114 		r = (vsc->sc_config_change)(vsc);
1115 	}
1116 	r |= virtio_check_vqs(vsc);
1117 	KERNEL_UNLOCK();
1118 
1119 	return r;
1120 }
1121 
1122 int
1123 virtio_pci_legacy_intr_mpsafe(void *arg)
1124 {
1125 	struct virtio_pci_softc *sc = arg;
1126 	struct virtio_softc *vsc = &sc->sc_sc;
1127 	int isr, r = 0;
1128 
1129 	/* check and ack the interrupt */
1130 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1131 	if (isr == 0)
1132 		return 0;
1133 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1134 	    (vsc->sc_config_change != NULL)) {
1135 		r = (vsc->sc_config_change)(vsc);
1136 	}
1137 	r |= virtio_check_vqs(vsc);
1138 	return r;
1139 }
1140 
1141 /*
1142  * Only used with MSI-X
1143  */
1144 int
1145 virtio_pci_config_intr(void *arg)
1146 {
1147 	struct virtio_softc *vsc = arg;
1148 
1149 	if (vsc->sc_config_change != NULL)
1150 		return vsc->sc_config_change(vsc);
1151 	return 0;
1152 }
1153 
1154 /*
1155  * Only used with MSI-X
1156  */
1157 int
1158 virtio_pci_queue_intr(void *arg)
1159 {
1160 	struct virtqueue *vq = arg;
1161 	struct virtio_softc *vsc = vq->vq_owner;
1162 
1163 	return virtio_check_vq(vsc, vq);
1164 }
1165 
1166 int
1167 virtio_pci_shared_queue_intr(void *arg)
1168 {
1169 	struct virtio_softc *vsc = arg;
1170 
1171 	return virtio_check_vqs(vsc);
1172 }
1173 
1174 /*
1175  * Interrupt handler to be used when polling.
1176  * We cannot use isr here because it is not defined in MSI-X mode.
1177  */
1178 int
1179 virtio_pci_poll_intr(void *arg)
1180 {
1181 	struct virtio_pci_softc *sc = arg;
1182 	struct virtio_softc *vsc = &sc->sc_sc;
1183 	int r = 0;
1184 
1185 	if (vsc->sc_config_change != NULL)
1186 		r = (vsc->sc_config_change)(vsc);
1187 
1188 	r |= virtio_check_vqs(vsc);
1189 
1190 	return r;
1191 }
1192 
1193 void
1194 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
1195 {
1196 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1197 	unsigned offset = 0;
1198 	if (vsc->sc_version_1) {
1199 		offset = vsc->sc_vqs[idx].vq_notify_off *
1200 		    sc->sc_notify_off_multiplier;
1201 	}
1202 	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx);
1203 }
1204