xref: /openbsd-src/sys/dev/pci/virtio_pci.c (revision dc275227176c1aad7ca774d4d07f055bf683ffb0)
1 /*	$OpenBSD: virtio_pci.c,v 1.39 2024/08/26 19:37:54 sf Exp $	*/
2 /*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3 
4 /*
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/mutex.h>
34 
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/virtio_pcireg.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 
43 #define DNPRINTF(n,x...)				\
44     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
45 
46 
47 /*
48  * XXX: Before being used on big endian arches, the access to config registers
49  * XXX: needs to be reviewed/fixed. The non-device specific registers are
50  * XXX: PCI-endian while the device specific registers are native endian.
51  */
52 
53 #define MAX_MSIX_VECS	8
54 
55 struct virtio_pci_softc;
56 struct virtio_pci_attach_args;
57 
58 int		virtio_pci_match(struct device *, void *, void *);
59 void		virtio_pci_attach(struct device *, struct device *, void *);
60 int		virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
61 int		virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
62 int		virtio_pci_detach(struct device *, int);
63 
64 void		virtio_pci_kick(struct virtio_softc *, uint16_t);
65 int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
66 uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
67 uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
68 uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
69 uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
70 void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
71 void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
72 void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
73 void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
74 uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
75 void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
76 int		virtio_pci_get_status(struct virtio_softc *);
77 void		virtio_pci_set_status(struct virtio_softc *, int);
78 int		virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *);
79 int		virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *);
80 void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
81 void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
82 int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int, int (*)(void *), void *);
83 int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct virtio_pci_attach_args *, int);
84 void		virtio_pci_free_irqs(struct virtio_pci_softc *);
85 int		virtio_pci_poll_intr(void *);
86 int		virtio_pci_legacy_intr(void *);
87 int		virtio_pci_legacy_intr_mpsafe(void *);
88 int		virtio_pci_config_intr(void *);
89 int		virtio_pci_queue_intr(void *);
90 int		virtio_pci_shared_queue_intr(void *);
91 int		virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen);
92 #if VIRTIO_DEBUG
93 void virtio_pci_dump_caps(struct virtio_pci_softc *sc);
94 #endif
95 
96 enum irq_type {
97 	IRQ_NO_MSIX,
98 	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
99 	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
100 };
101 
102 struct virtio_pci_softc {
103 	struct virtio_softc	sc_sc;
104 	pci_chipset_tag_t	sc_pc;
105 	pcitag_t		sc_ptag;
106 
107 	bus_space_tag_t		sc_iot;
108 	bus_space_handle_t	sc_ioh;
109 	bus_size_t		sc_iosize;
110 
111 	bus_space_tag_t		sc_bars_iot[4];
112 	bus_space_handle_t	sc_bars_ioh[4];
113 	bus_size_t		sc_bars_iosize[4];
114 
115 	bus_space_tag_t		sc_notify_iot;
116 	bus_space_handle_t	sc_notify_ioh;
117 	bus_size_t		sc_notify_iosize;
118 	unsigned int		sc_notify_off_multiplier;
119 
120 	bus_space_tag_t		sc_devcfg_iot;
121 	bus_space_handle_t	sc_devcfg_ioh;
122 	bus_size_t		sc_devcfg_iosize;
123 	/*
124 	 * With 0.9, the offset of the devcfg region in the io bar changes
125 	 * depending on MSI-X being enabled or not.
126 	 * With 1.0, this field is still used to remember if MSI-X is enabled
127 	 * or not.
128 	 */
129 	unsigned int		sc_devcfg_offset;
130 
131 	bus_space_tag_t		sc_isr_iot;
132 	bus_space_handle_t	sc_isr_ioh;
133 	bus_size_t		sc_isr_iosize;
134 
135 	void			*sc_ih[MAX_MSIX_VECS];
136 
137 	enum irq_type		sc_irq_type;
138 };
139 
140 struct virtio_pci_attach_args {
141 	struct virtio_attach_args	 vpa_va;
142 	struct pci_attach_args		*vpa_pa;
143 };
144 
145 
146 const struct cfattach virtio_pci_ca = {
147 	sizeof(struct virtio_pci_softc),
148 	virtio_pci_match,
149 	virtio_pci_attach,
150 	virtio_pci_detach,
151 	NULL
152 };
153 
154 struct virtio_ops virtio_pci_ops = {
155 	virtio_pci_kick,
156 	virtio_pci_read_device_config_1,
157 	virtio_pci_read_device_config_2,
158 	virtio_pci_read_device_config_4,
159 	virtio_pci_read_device_config_8,
160 	virtio_pci_write_device_config_1,
161 	virtio_pci_write_device_config_2,
162 	virtio_pci_write_device_config_4,
163 	virtio_pci_write_device_config_8,
164 	virtio_pci_read_queue_size,
165 	virtio_pci_setup_queue,
166 	virtio_pci_get_status,
167 	virtio_pci_set_status,
168 	virtio_pci_negotiate_features,
169 	virtio_pci_poll_intr,
170 };
171 
172 static inline uint64_t
173 _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size)
174 {
175 	uint64_t val;
176 	switch (size) {
177 	case 1:
178 		val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off);
179 		break;
180 	case 2:
181 		val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off);
182 		break;
183 	case 4:
184 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
185 		break;
186 	case 8:
187 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
188 		    off + sizeof(uint32_t));
189 		val <<= 32;
190 		val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
191 		break;
192 	}
193 	return val;
194 }
195 
196 #define CREAD(sc, memb)  _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \
197     sizeof(((struct virtio_pci_common_cfg *)0)->memb))
198 
199 #define CWRITE(sc, memb, val)							\
200 	do {									\
201 		struct virtio_pci_common_cfg c;					\
202 		size_t off = offsetof(struct virtio_pci_common_cfg, memb);	\
203 		size_t size = sizeof(c.memb);					\
204 										\
205 		DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n",		\
206 		    __func__, __LINE__, off, size, (unsigned long long)val);	\
207 		switch (size) {							\
208 		case 1:								\
209 			bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);	\
210 			break;							\
211 		case 2:								\
212 			bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val);	\
213 			break;							\
214 		case 4:								\
215 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);	\
216 			break;							\
217 		case 8:								\
218 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off,		\
219 			    (val) & 0xffffffff);				\
220 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,		\
221 			    (off) + sizeof(uint32_t), (uint64_t)(val) >> 32);	\
222 			break;							\
223 		}								\
224 	} while (0)
225 
226 uint16_t
227 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
228 {
229 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
230 	uint16_t ret;
231 	if (sc->sc_sc.sc_version_1) {
232 		CWRITE(sc, queue_select, idx);
233 		ret = CREAD(sc, queue_size);
234 	} else {
235 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
236 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
237 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
238 		    VIRTIO_CONFIG_QUEUE_SIZE);
239 	}
240 	return ret;
241 }
242 
243 void
244 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
245     uint64_t addr)
246 {
247 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
248 	if (sc->sc_sc.sc_version_1) {
249 		CWRITE(sc, queue_select, vq->vq_index);
250 		if (addr == 0) {
251 			CWRITE(sc, queue_enable, 0);
252 			CWRITE(sc, queue_desc, 0);
253 			CWRITE(sc, queue_avail, 0);
254 			CWRITE(sc, queue_used, 0);
255 		} else {
256 			CWRITE(sc, queue_desc, addr);
257 			CWRITE(sc, queue_avail, addr + vq->vq_availoffset);
258 			CWRITE(sc, queue_used, addr + vq->vq_usedoffset);
259 			CWRITE(sc, queue_enable, 1);
260 			vq->vq_notify_off = CREAD(sc, queue_notify_off);
261 		}
262 	} else {
263 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
264 		    VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
265 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
266 		    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
267 	}
268 
269 	/*
270 	 * This path is only executed if this function is called after
271 	 * the child's attach function has finished. In other cases,
272 	 * it's done in virtio_pci_setup_msix().
273 	 */
274 	if (sc->sc_irq_type != IRQ_NO_MSIX) {
275 		int vec = 1;
276 		if (sc->sc_irq_type == IRQ_MSIX_PER_VQ)
277 		       vec += vq->vq_index;
278 		if (sc->sc_sc.sc_version_1) {
279 			CWRITE(sc, queue_msix_vector, vec);
280 		} else {
281 			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
282 			    VIRTIO_MSI_QUEUE_VECTOR, vec);
283 		}
284 	}
285 }
286 
287 int
288 virtio_pci_get_status(struct virtio_softc *vsc)
289 {
290 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
291 
292 	if (sc->sc_sc.sc_version_1)
293 		return CREAD(sc, device_status);
294 	else
295 		return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
296 		    VIRTIO_CONFIG_DEVICE_STATUS);
297 }
298 
299 void
300 virtio_pci_set_status(struct virtio_softc *vsc, int status)
301 {
302 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
303 	int old = 0;
304 
305 	if (sc->sc_sc.sc_version_1) {
306 		if (status == 0) {
307 			CWRITE(sc, device_status, 0);
308 			while (CREAD(sc, device_status) != 0) {
309 				CPU_BUSY_CYCLE();
310 			}
311 		} else {
312 			old = CREAD(sc, device_status);
313 			CWRITE(sc, device_status, status|old);
314 		}
315 	} else {
316 		if (status == 0) {
317 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
318 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
319 			while (bus_space_read_1(sc->sc_iot, sc->sc_ioh,
320 			    VIRTIO_CONFIG_DEVICE_STATUS) != 0) {
321 				CPU_BUSY_CYCLE();
322 			}
323 		} else {
324 			old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
325 			    VIRTIO_CONFIG_DEVICE_STATUS);
326 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
327 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
328 		}
329 	}
330 }
331 
332 int
333 virtio_pci_match(struct device *parent, void *match, void *aux)
334 {
335 	struct pci_attach_args *pa;
336 
337 	pa = (struct pci_attach_args *)aux;
338 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
339 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
340 		return 1;
341 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET)
342 		return 0;
343 	/* virtio 0.9 */
344 	if (PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
345 	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
346 	    PCI_REVISION(pa->pa_class) == 0)
347 		return 1;
348 	/* virtio 1.0 */
349 	if (PCI_PRODUCT(pa->pa_id) >= 0x1040 &&
350 	    PCI_PRODUCT(pa->pa_id) <= 0x107f &&
351 	    PCI_REVISION(pa->pa_class) == 1)
352 		return 1;
353 	return 0;
354 }
355 
356 #if VIRTIO_DEBUG
357 void
358 virtio_pci_dump_caps(struct virtio_pci_softc *sc)
359 {
360 	pci_chipset_tag_t pc = sc->sc_pc;
361 	pcitag_t tag = sc->sc_ptag;
362 	int offset;
363 	union {
364 		pcireg_t reg[4];
365 		struct virtio_pci_cap vcap;
366 	} v;
367 
368 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0]))
369 		return;
370 
371 	printf("\n");
372 	do {
373 		for (int i = 0; i < 4; i++)
374 			v.reg[i] = pci_conf_read(pc, tag, offset + i * 4);
375 		printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n",
376 			__func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar,
377 			v.vcap.offset, v.vcap.length);
378 		offset = v.vcap.cap_next;
379 	} while (offset != 0);
380 }
381 #endif
382 
383 int
384 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen)
385 {
386 	pci_chipset_tag_t pc = sc->sc_pc;
387 	pcitag_t tag = sc->sc_ptag;
388 	unsigned int offset, i, len;
389 	union {
390 		pcireg_t reg[8];
391 		struct virtio_pci_cap vcap;
392 	} *v = buf;
393 
394 	if (buflen < sizeof(struct virtio_pci_cap))
395 		return ERANGE;
396 
397 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
398 		return ENOENT;
399 
400 	do {
401 		for (i = 0; i < 4; i++)
402 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
403 		if (v->vcap.cfg_type == cfg_type)
404 			break;
405 		offset = v->vcap.cap_next;
406 	} while (offset != 0);
407 
408 	if (offset == 0)
409 		return ENOENT;
410 
411 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
412 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
413 		if (len > buflen) {
414 			printf("%s: cap too large\n", __func__);
415 			return ERANGE;
416 		}
417 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
418 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
419 	}
420 
421 	return 0;
422 }
423 
424 
425 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
426 				sizeof(pcireg_t))
427 
428 int
429 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
430 {
431 	struct virtio_pci_cap common, isr, device;
432 	struct virtio_pci_notify_cap notify;
433 	int have_device_cfg = 0;
434 	bus_size_t bars[NMAPREG] = { 0 };
435 	int bars_idx[NMAPREG] = { 0 };
436 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
437 	int i, j = 0, ret = 0;
438 
439 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0)
440 		return ENODEV;
441 
442 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, &notify, sizeof(notify)) != 0)
443 		return ENODEV;
444 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0)
445 		return ENODEV;
446 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0)
447 		memset(&device, 0, sizeof(device));
448 	else
449 		have_device_cfg = 1;
450 
451 	/*
452 	 * XXX Maybe there are devices that offer the pci caps but not the
453 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
454 	 * XXX here and fall back to 0.9 out if not present.
455 	 */
456 
457 	/* Figure out which bars we need to map */
458 	for (i = 0; i < nitems(caps); i++) {
459 		int bar = caps[i]->bar;
460 		bus_size_t len = caps[i]->offset + caps[i]->length;
461 		if (caps[i]->length == 0)
462 			continue;
463 		if (bars[bar] < len)
464 			bars[bar] = len;
465 	}
466 
467 	for (i = 0; i < nitems(bars); i++) {
468 		int reg;
469 		pcireg_t type;
470 		if (bars[i] == 0)
471 			continue;
472 		reg = PCI_MAPREG_START + i * 4;
473 		type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg);
474 		if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j],
475 		    &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j],
476 		    bars[i])) {
477 			printf("%s: can't map bar %u \n",
478 			    sc->sc_sc.sc_dev.dv_xname, i);
479 			ret = EIO;
480 			goto err;
481 		}
482 		bars_idx[i] = j;
483 		j++;
484 	}
485 
486 	i = bars_idx[notify.cap.bar];
487 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
488 	    notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) {
489 		printf("%s: can't map notify i/o space\n",
490 		    sc->sc_sc.sc_dev.dv_xname);
491 		ret = EIO;
492 		goto err;
493 	}
494 	sc->sc_notify_iosize = notify.cap.length;
495 	sc->sc_notify_iot = sc->sc_bars_iot[i];
496 	sc->sc_notify_off_multiplier = notify.notify_off_multiplier;
497 
498 	if (have_device_cfg) {
499 		i = bars_idx[device.bar];
500 		if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
501 		    device.offset, device.length, &sc->sc_devcfg_ioh) != 0) {
502 			printf("%s: can't map devcfg i/o space\n",
503 			    sc->sc_sc.sc_dev.dv_xname);
504 			ret = EIO;
505 			goto err;
506 		}
507 		sc->sc_devcfg_iosize = device.length;
508 		sc->sc_devcfg_iot = sc->sc_bars_iot[i];
509 	}
510 
511 	i = bars_idx[isr.bar];
512 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
513 	    isr.offset, isr.length, &sc->sc_isr_ioh) != 0) {
514 		printf("%s: can't map isr i/o space\n",
515 		    sc->sc_sc.sc_dev.dv_xname);
516 		ret = EIO;
517 		goto err;
518 	}
519 	sc->sc_isr_iosize = isr.length;
520 	sc->sc_isr_iot = sc->sc_bars_iot[i];
521 
522 	i = bars_idx[common.bar];
523 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
524 	    common.offset, common.length, &sc->sc_ioh) != 0) {
525 		printf("%s: can't map common i/o space\n",
526 		    sc->sc_sc.sc_dev.dv_xname);
527 		ret = EIO;
528 		goto err;
529 	}
530 	sc->sc_iosize = common.length;
531 	sc->sc_iot = sc->sc_bars_iot[i];
532 
533 	sc->sc_sc.sc_version_1 = 1;
534 	return 0;
535 
536 err:
537 	/* there is no pci_mapreg_unmap() */
538 	return ret;
539 }
540 
541 int
542 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
543 {
544 	struct virtio_softc *vsc = &sc->sc_sc;
545 	pcireg_t type;
546 
547 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
548 	if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0,
549 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
550 		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
551 		return EIO;
552 	}
553 
554 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
555 	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
556 		printf("%s: can't map notify i/o space\n",
557 		    vsc->sc_dev.dv_xname);
558 		return EIO;
559 	}
560 	sc->sc_notify_iosize = 2;
561 	sc->sc_notify_iot = sc->sc_iot;
562 
563 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
564 	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
565 		printf("%s: can't map isr i/o space\n",
566 		    vsc->sc_dev.dv_xname);
567 		return EIO;
568 	}
569 	sc->sc_isr_iosize = 1;
570 	sc->sc_isr_iot = sc->sc_iot;
571 
572 	return 0;
573 }
574 
575 void
576 virtio_pci_attach(struct device *parent, struct device *self, void *aux)
577 {
578 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
579 	struct virtio_softc *vsc = &sc->sc_sc;
580 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
581 	pci_chipset_tag_t pc = pa->pa_pc;
582 	pcitag_t tag = pa->pa_tag;
583 	int revision, ret = ENODEV;
584 	pcireg_t id;
585 	char const *intrstr;
586 	pci_intr_handle_t ih;
587 	struct virtio_pci_attach_args vpa = { { 0 }, pa };
588 	int n;
589 
590 	revision = PCI_REVISION(pa->pa_class);
591 	switch (revision) {
592 	case 0:
593 		/* subsystem ID shows what I am */
594 		id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
595 		break;
596 	case 1:
597 		id = PCI_PRODUCT(pa->pa_id) - 0x1040;
598 		break;
599 	default:
600 		printf("unknown revision 0x%02x; giving up\n", revision);
601 		return;
602 	}
603 
604 	sc->sc_pc = pc;
605 	sc->sc_ptag = pa->pa_tag;
606 	vsc->sc_dmat = pa->pa_dmat;
607 
608 #if defined(__i386__) || defined(__amd64__)
609 	/*
610 	 * For virtio, ignore normal MSI black/white-listing depending on the
611 	 * PCI bridge but enable it unconditionally.
612 	 */
613 	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
614 #endif
615 
616 #if VIRTIO_DEBUG
617 	virtio_pci_dump_caps(sc);
618 #endif
619 
620 	n = MIN(MAX_MSIX_VECS, pci_intr_msix_count(pa));
621 	n = MAX(n, 1);
622 	vpa.vpa_va.va_nintr = n;
623 
624 	vsc->sc_ops = &virtio_pci_ops;
625 	if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 &&
626 	    (revision == 1 ||
627 	     (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) {
628 		ret = virtio_pci_attach_10(sc, pa);
629 	}
630 	if (ret != 0 && revision == 0) {
631 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
632 		ret = virtio_pci_attach_09(sc, pa);
633 	}
634 	if (ret != 0) {
635 		printf(": Cannot attach (%d)\n", ret);
636 		return;
637 	}
638 
639 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
640 	sc->sc_irq_type = IRQ_NO_MSIX;
641 	if (virtio_pci_adjust_config_region(sc) != 0)
642 		return;
643 
644 	virtio_device_reset(vsc);
645 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
646 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
647 
648 	printf("\n");
649 	vpa.vpa_va.va_devid = id;
650 	vsc->sc_child = NULL;
651 	config_found(self, &vpa, NULL);
652 	if (vsc->sc_child == NULL) {
653 		printf("%s: no matching child driver; not configured\n",
654 		    vsc->sc_dev.dv_xname);
655 		goto fail_1;
656 	}
657 	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
658 		printf("%s: virtio configuration failed\n",
659 		    vsc->sc_dev.dv_xname);
660 		goto fail_1;
661 	}
662 
663 	if (virtio_pci_setup_msix(sc, &vpa, 0) == 0) {
664 		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
665 		intrstr = "msix per-VQ";
666 	} else if (virtio_pci_setup_msix(sc, &vpa, 1) == 0) {
667 		sc->sc_irq_type = IRQ_MSIX_SHARED;
668 		intrstr = "msix shared";
669 	} else {
670 		int (*ih_func)(void *) = virtio_pci_legacy_intr;
671 		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
672 			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
673 			goto fail_2;
674 		}
675 		intrstr = pci_intr_string(pc, ih);
676 		/*
677 		 * We always set the IPL_MPSAFE flag in order to do the relatively
678 		 * expensive ISR read without lock, and then grab the kernel lock in
679 		 * the interrupt handler.
680 		 */
681 		if (vsc->sc_ipl & IPL_MPSAFE)
682 			ih_func = virtio_pci_legacy_intr_mpsafe;
683 		sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE,
684 		    ih_func, sc, vsc->sc_dev.dv_xname);
685 		if (sc->sc_ih[0] == NULL) {
686 			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
687 			if (intrstr != NULL)
688 				printf(" at %s", intrstr);
689 			printf("\n");
690 			goto fail_2;
691 		}
692 	}
693 	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
694 
695 	return;
696 
697 fail_2:
698 	config_detach(vsc->sc_child, 0);
699 fail_1:
700 	/* no pci_mapreg_unmap() or pci_intr_unmap() */
701 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
702 }
703 
704 int
705 virtio_pci_detach(struct device *self, int flags)
706 {
707 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
708 	struct virtio_softc *vsc = &sc->sc_sc;
709 	int r;
710 
711 	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
712 		r = config_detach(vsc->sc_child, flags);
713 		if (r)
714 			return r;
715 	}
716 	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
717 	KASSERT(vsc->sc_vqs == 0);
718 	virtio_pci_free_irqs(sc);
719 	if (sc->sc_iosize)
720 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
721 	sc->sc_iosize = 0;
722 
723 	return 0;
724 }
725 
726 int
727 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
728 {
729 	if (sc->sc_sc.sc_version_1)
730 		return 0;
731 	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
732 	sc->sc_devcfg_iot = sc->sc_iot;
733 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
734 	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
735 		printf("%s: can't map config i/o space\n",
736 		    sc->sc_sc.sc_dev.dv_xname);
737 		return 1;
738 	}
739 	return 0;
740 }
741 
742 /*
743  * Feature negotiation.
744  * Prints available / negotiated features if guest_feature_names != NULL and
745  * VIRTIO_DEBUG is 1
746  */
747 int
748 virtio_pci_negotiate_features(struct virtio_softc *vsc,
749     const struct virtio_feature_name *guest_feature_names)
750 {
751 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
752 	uint64_t host, negotiated;
753 
754 	vsc->sc_active_features = 0;
755 
756 	/*
757 	 * We enable indirect descriptors by default. They can be switched
758 	 * off by setting bit 1 in the driver flags, see config(8)
759 	 */
760 	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
761 	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
762 		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
763 	} else if (guest_feature_names != NULL) {
764 		printf(" RingIndirectDesc disabled by UKC");
765 	}
766 
767 	/*
768 	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
769 	 * If it did, check if it is disabled by bit 2 in the driver flags.
770 	 */
771 	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
772 	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
773 	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
774 		if (guest_feature_names != NULL)
775 			printf(" RingEventIdx disabled by UKC");
776 		vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX;
777 	}
778 
779 	if (vsc->sc_version_1) {
780 		return virtio_pci_negotiate_features_10(vsc,
781 		    guest_feature_names);
782 	}
783 
784 	/* virtio 0.9 only */
785 	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
786 				VIRTIO_CONFIG_DEVICE_FEATURES);
787 	negotiated = host & vsc->sc_driver_features;
788 #if VIRTIO_DEBUG
789 	if (guest_feature_names)
790 		virtio_log_features(host, negotiated, guest_feature_names);
791 #endif
792 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
793 			  VIRTIO_CONFIG_GUEST_FEATURES, negotiated);
794 	vsc->sc_active_features = negotiated;
795 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
796 		vsc->sc_indirect = 1;
797 	else
798 		vsc->sc_indirect = 0;
799 	return 0;
800 }
801 
802 int
803 virtio_pci_negotiate_features_10(struct virtio_softc *vsc,
804     const struct virtio_feature_name *guest_feature_names)
805 {
806 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
807 	uint64_t host, negotiated;
808 
809 	vsc->sc_driver_features |= VIRTIO_F_VERSION_1;
810 	/* notify on empty is 0.9 only */
811 	vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
812 	CWRITE(sc, device_feature_select, 0);
813 	host = CREAD(sc, device_feature);
814 	CWRITE(sc, device_feature_select, 1);
815 	host |= (uint64_t)CREAD(sc, device_feature) << 32;
816 
817 	negotiated = host & vsc->sc_driver_features;
818 #if VIRTIO_DEBUG
819 	if (guest_feature_names)
820 		virtio_log_features(host, negotiated, guest_feature_names);
821 #endif
822 	CWRITE(sc, driver_feature_select, 0);
823 	CWRITE(sc, driver_feature, negotiated & 0xffffffff);
824 	CWRITE(sc, driver_feature_select, 1);
825 	CWRITE(sc, driver_feature, negotiated >> 32);
826 	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
827 
828 	if ((CREAD(sc, device_status) &
829 	    VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
830 		printf("%s: Feature negotiation failed\n",
831 		    vsc->sc_dev.dv_xname);
832 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
833 		return ENXIO;
834 	}
835 	vsc->sc_active_features = negotiated;
836 
837 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
838 		vsc->sc_indirect = 1;
839 	else
840 		vsc->sc_indirect = 0;
841 
842 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
843 #if VIRTIO_DEBUG
844 		printf("%s: Host rejected Version_1\n", __func__);
845 #endif
846 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
847 		return EINVAL;
848 	}
849 	return 0;
850 }
851 
852 /*
853  * Device configuration registers.
854  */
855 uint8_t
856 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
857 {
858 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
859 	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
860 }
861 
862 uint16_t
863 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
864 {
865 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
866 	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
867 }
868 
869 uint32_t
870 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
871 {
872 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
873 	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
874 }
875 
876 uint64_t
877 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
878 {
879 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
880 	uint64_t r;
881 
882 	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
883 	    index + sizeof(uint32_t));
884 	r <<= 32;
885 	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
886 	return r;
887 }
888 
889 void
890 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
891     uint8_t value)
892 {
893 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
894 	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
895 }
896 
897 void
898 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
899     uint16_t value)
900 {
901 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
902 	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
903 }
904 
905 void
906 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
907 			     int index, uint32_t value)
908 {
909 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
910 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
911 }
912 
913 void
914 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
915 			     int index, uint64_t value)
916 {
917 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
918 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
919 	    index, value & 0xffffffff);
920 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
921 	    index + sizeof(uint32_t), value >> 32);
922 }
923 
924 int
925 virtio_pci_msix_establish(struct virtio_pci_softc *sc,
926     struct virtio_pci_attach_args *vpa, int idx,
927     int (*handler)(void *), void *ih_arg)
928 {
929 	struct virtio_softc *vsc = &sc->sc_sc;
930 	pci_intr_handle_t ih;
931 
932 	if (pci_intr_map_msix(vpa->vpa_pa, idx, &ih) != 0) {
933 #if VIRTIO_DEBUG
934 		printf("%s[%d]: pci_intr_map_msix failed\n",
935 		    vsc->sc_dev.dv_xname, idx);
936 #endif
937 		return 1;
938 	}
939 	sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
940 	    handler, ih_arg, vsc->sc_dev.dv_xname);
941 	if (sc->sc_ih[idx] == NULL) {
942 		printf("%s[%d]: couldn't establish msix interrupt\n",
943 		    vsc->sc_dev.dv_xname, idx);
944 		return 1;
945 	}
946 	return 0;
947 }
948 
949 void
950 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
951 {
952 	if (sc->sc_sc.sc_version_1) {
953 		CWRITE(sc, queue_select, idx);
954 		CWRITE(sc, queue_msix_vector, vector);
955 	} else {
956 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
957 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
958 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
959 		    VIRTIO_MSI_QUEUE_VECTOR, vector);
960 	}
961 }
962 
963 void
964 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
965 {
966 	if (sc->sc_sc.sc_version_1) {
967 		CWRITE(sc, config_msix_vector, vector);
968 	} else {
969 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
970 		    VIRTIO_MSI_CONFIG_VECTOR, vector);
971 	}
972 }
973 
974 
975 void
976 virtio_pci_free_irqs(struct virtio_pci_softc *sc)
977 {
978 	struct virtio_softc *vsc = &sc->sc_sc;
979 	int i;
980 
981 	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
982 		for (i = 0; i < vsc->sc_nvqs; i++) {
983 			virtio_pci_set_msix_queue_vector(sc, i,
984 			    VIRTIO_MSI_NO_VECTOR);
985 		}
986 	}
987 
988 	for (i = 0; i < MAX_MSIX_VECS; i++) {
989 		if (sc->sc_ih[i]) {
990 			pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]);
991 			sc->sc_ih[i] = NULL;
992 		}
993 	}
994 
995 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
996 	virtio_pci_adjust_config_region(sc);
997 }
998 
999 int
1000 virtio_pci_setup_msix(struct virtio_pci_softc *sc,
1001     struct virtio_pci_attach_args *vpa, int shared)
1002 {
1003 	struct virtio_softc *vsc = &sc->sc_sc;
1004 	int i;
1005 
1006 	/* Shared needs config + queue */
1007 	if (shared && vpa->vpa_va.va_nintr < 1 + 1)
1008 		return 1;
1009 	/* Per VQ needs config + N * queue */
1010 	if (!shared && vpa->vpa_va.va_nintr < 1 + vsc->sc_nvqs)
1011 		return 1;
1012 
1013 	if (virtio_pci_msix_establish(sc, vpa, 0, virtio_pci_config_intr, vsc))
1014 		return 1;
1015 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1016 	virtio_pci_adjust_config_region(sc);
1017 	virtio_pci_set_msix_config_vector(sc, 0);
1018 
1019 	if (shared) {
1020 		if (virtio_pci_msix_establish(sc, vpa, 1,
1021 		    virtio_pci_shared_queue_intr, vsc)) {
1022 			goto fail;
1023 		}
1024 
1025 		for (i = 0; i < vsc->sc_nvqs; i++)
1026 			virtio_pci_set_msix_queue_vector(sc, i, 1);
1027 	} else {
1028 		for (i = 0; i < vsc->sc_nvqs; i++) {
1029 			if (virtio_pci_msix_establish(sc, vpa, i + 1,
1030 			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
1031 				goto fail;
1032 			}
1033 			virtio_pci_set_msix_queue_vector(sc, i, i + 1);
1034 		}
1035 	}
1036 
1037 	return 0;
1038 fail:
1039 	virtio_pci_free_irqs(sc);
1040 	return 1;
1041 }
1042 
1043 /*
1044  * Interrupt handler.
1045  */
1046 
1047 /*
1048  * Only used without MSI-X
1049  */
1050 int
1051 virtio_pci_legacy_intr(void *arg)
1052 {
1053 	struct virtio_pci_softc *sc = arg;
1054 	struct virtio_softc *vsc = &sc->sc_sc;
1055 	int isr, r = 0;
1056 
1057 	/* check and ack the interrupt */
1058 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1059 	if (isr == 0)
1060 		return 0;
1061 	KERNEL_LOCK();
1062 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1063 	    (vsc->sc_config_change != NULL)) {
1064 		r = (vsc->sc_config_change)(vsc);
1065 	}
1066 	r |= virtio_check_vqs(vsc);
1067 	KERNEL_UNLOCK();
1068 
1069 	return r;
1070 }
1071 
1072 int
1073 virtio_pci_legacy_intr_mpsafe(void *arg)
1074 {
1075 	struct virtio_pci_softc *sc = arg;
1076 	struct virtio_softc *vsc = &sc->sc_sc;
1077 	int isr, r = 0;
1078 
1079 	/* check and ack the interrupt */
1080 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1081 	if (isr == 0)
1082 		return 0;
1083 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1084 	    (vsc->sc_config_change != NULL)) {
1085 		r = (vsc->sc_config_change)(vsc);
1086 	}
1087 	r |= virtio_check_vqs(vsc);
1088 	return r;
1089 }
1090 
1091 /*
1092  * Only used with MSI-X
1093  */
1094 int
1095 virtio_pci_config_intr(void *arg)
1096 {
1097 	struct virtio_softc *vsc = arg;
1098 
1099 	if (vsc->sc_config_change != NULL)
1100 		return vsc->sc_config_change(vsc);
1101 	return 0;
1102 }
1103 
1104 /*
1105  * Only used with MSI-X
1106  */
1107 int
1108 virtio_pci_queue_intr(void *arg)
1109 {
1110 	struct virtqueue *vq = arg;
1111 	struct virtio_softc *vsc = vq->vq_owner;
1112 
1113 	return virtio_check_vq(vsc, vq);
1114 }
1115 
1116 int
1117 virtio_pci_shared_queue_intr(void *arg)
1118 {
1119 	struct virtio_softc *vsc = arg;
1120 
1121 	return virtio_check_vqs(vsc);
1122 }
1123 
1124 /*
1125  * Interrupt handler to be used when polling.
1126  * We cannot use isr here because it is not defined in MSI-X mode.
1127  */
1128 int
1129 virtio_pci_poll_intr(void *arg)
1130 {
1131 	struct virtio_pci_softc *sc = arg;
1132 	struct virtio_softc *vsc = &sc->sc_sc;
1133 	int r = 0;
1134 
1135 	if (vsc->sc_config_change != NULL)
1136 		r = (vsc->sc_config_change)(vsc);
1137 
1138 	r |= virtio_check_vqs(vsc);
1139 
1140 	return r;
1141 }
1142 
1143 void
1144 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
1145 {
1146 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1147 	unsigned offset = 0;
1148 	if (vsc->sc_version_1) {
1149 		offset = vsc->sc_vqs[idx].vq_notify_off *
1150 		    sc->sc_notify_off_multiplier;
1151 	}
1152 	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx);
1153 }
1154