xref: /openbsd-src/sys/dev/pci/virtio_pci.c (revision 3bef86f7bc2197c76d5fec5b22e91f84e96ed5e5)
1 /*	$OpenBSD: virtio_pci.c,v 1.36 2024/01/15 02:35:23 dv Exp $	*/
2 /*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3 
4 /*
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/mutex.h>
34 
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/virtio_pcireg.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 
43 #define DNPRINTF(n,x...)				\
44     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
45 
46 
47 /*
48  * XXX: Before being used on big endian arches, the access to config registers
49  * XXX: needs to be reviewed/fixed. The non-device specific registers are
50  * XXX: PCI-endian while the device specific registers are native endian.
51  */
52 
53 #define MAX_MSIX_VECS	8
54 
55 struct virtio_pci_softc;
56 
57 int		virtio_pci_match(struct device *, void *, void *);
58 void		virtio_pci_attach(struct device *, struct device *, void *);
59 int		virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
60 int		virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
61 int		virtio_pci_detach(struct device *, int);
62 
63 void		virtio_pci_kick(struct virtio_softc *, uint16_t);
64 int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
65 uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
66 uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
67 uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
68 uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
69 void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
70 void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
71 void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
72 void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
73 uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
74 void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
75 void		virtio_pci_set_status(struct virtio_softc *, int);
76 int		virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *);
77 int		virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *);
78 void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
79 void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
80 int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct pci_attach_args *, int, int (*)(void *), void *);
81 int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct pci_attach_args *, int);
82 void		virtio_pci_free_irqs(struct virtio_pci_softc *);
83 int		virtio_pci_poll_intr(void *);
84 int		virtio_pci_legacy_intr(void *);
85 int		virtio_pci_legacy_intr_mpsafe(void *);
86 int		virtio_pci_config_intr(void *);
87 int		virtio_pci_queue_intr(void *);
88 int		virtio_pci_shared_queue_intr(void *);
89 int		virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen);
90 #if VIRTIO_DEBUG
91 void virtio_pci_dump_caps(struct virtio_pci_softc *sc);
92 #endif
93 
94 enum irq_type {
95 	IRQ_NO_MSIX,
96 	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
97 	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
98 };
99 
100 struct virtio_pci_softc {
101 	struct virtio_softc	sc_sc;
102 	pci_chipset_tag_t	sc_pc;
103 	pcitag_t		sc_ptag;
104 
105 	bus_space_tag_t		sc_iot;
106 	bus_space_handle_t	sc_ioh;
107 	bus_size_t		sc_iosize;
108 
109 	bus_space_tag_t		sc_bars_iot[4];
110 	bus_space_handle_t	sc_bars_ioh[4];
111 	bus_size_t		sc_bars_iosize[4];
112 
113 	bus_space_tag_t		sc_notify_iot;
114 	bus_space_handle_t	sc_notify_ioh;
115 	bus_size_t		sc_notify_iosize;
116 	unsigned int		sc_notify_off_multiplier;
117 
118 	bus_space_tag_t		sc_devcfg_iot;
119 	bus_space_handle_t	sc_devcfg_ioh;
120 	bus_size_t		sc_devcfg_iosize;
121 	/*
122 	 * With 0.9, the offset of the devcfg region in the io bar changes
123 	 * depending on MSI-X being enabled or not.
124 	 * With 1.0, this field is still used to remember if MSI-X is enabled
125 	 * or not.
126 	 */
127 	unsigned int		sc_devcfg_offset;
128 
129 	bus_space_tag_t		sc_isr_iot;
130 	bus_space_handle_t	sc_isr_ioh;
131 	bus_size_t		sc_isr_iosize;
132 
133 	void			*sc_ih[MAX_MSIX_VECS];
134 
135 	enum irq_type		sc_irq_type;
136 };
137 
138 const struct cfattach virtio_pci_ca = {
139 	sizeof(struct virtio_pci_softc),
140 	virtio_pci_match,
141 	virtio_pci_attach,
142 	virtio_pci_detach,
143 	NULL
144 };
145 
146 struct virtio_ops virtio_pci_ops = {
147 	virtio_pci_kick,
148 	virtio_pci_read_device_config_1,
149 	virtio_pci_read_device_config_2,
150 	virtio_pci_read_device_config_4,
151 	virtio_pci_read_device_config_8,
152 	virtio_pci_write_device_config_1,
153 	virtio_pci_write_device_config_2,
154 	virtio_pci_write_device_config_4,
155 	virtio_pci_write_device_config_8,
156 	virtio_pci_read_queue_size,
157 	virtio_pci_setup_queue,
158 	virtio_pci_set_status,
159 	virtio_pci_negotiate_features,
160 	virtio_pci_poll_intr,
161 };
162 
163 static inline
164 uint64_t _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size)
165 {
166 	uint64_t val;
167 	switch (size) {
168 	case 1:
169 		val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off);
170 		break;
171 	case 2:
172 		val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off);
173 		break;
174 	case 4:
175 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
176 		break;
177 	case 8:
178 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
179 		    off + sizeof(uint32_t));
180 		val <<= 32;
181 		val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
182 		break;
183 	}
184 	return val;
185 }
186 
187 #define CREAD(sc, memb)  _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \
188     sizeof(((struct virtio_pci_common_cfg *)0)->memb))
189 
190 #define CWRITE(sc, memb, val)							\
191 	do {									\
192 		struct virtio_pci_common_cfg c;					\
193 		size_t off = offsetof(struct virtio_pci_common_cfg, memb);	\
194 		size_t size = sizeof(c.memb);					\
195 										\
196 		DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n",		\
197 		    __func__, __LINE__, off, size, (unsigned long long)val);	\
198 		switch (size) {							\
199 		case 1:								\
200 			bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);	\
201 			break;							\
202 		case 2:								\
203 			bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val);	\
204 			break;							\
205 		case 4:								\
206 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);	\
207 			break;							\
208 		case 8:								\
209 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off,		\
210 			    (val) & 0xffffffff);				\
211 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,		\
212 			    (off) + sizeof(uint32_t), (uint64_t)(val) >> 32);	\
213 			break;							\
214 		}								\
215 	} while (0)
216 
217 uint16_t
218 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
219 {
220 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
221 	uint16_t ret;
222 	if (sc->sc_sc.sc_version_1) {
223 		CWRITE(sc, queue_select, idx);
224 		ret = CREAD(sc, queue_size);
225 	} else {
226 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
227 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
228 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
229 		    VIRTIO_CONFIG_QUEUE_SIZE);
230 	}
231 	return ret;
232 }
233 
234 void
235 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
236     uint64_t addr)
237 {
238 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
239 	if (sc->sc_sc.sc_version_1) {
240 		CWRITE(sc, queue_select, vq->vq_index);
241 		if (addr == 0) {
242 			CWRITE(sc, queue_enable, 0);
243 			CWRITE(sc, queue_desc, 0);
244 			CWRITE(sc, queue_avail, 0);
245 			CWRITE(sc, queue_used, 0);
246 		} else {
247 			CWRITE(sc, queue_desc, addr);
248 			CWRITE(sc, queue_avail, addr + vq->vq_availoffset);
249 			CWRITE(sc, queue_used, addr + vq->vq_usedoffset);
250 			CWRITE(sc, queue_enable, 1);
251 			vq->vq_notify_off = CREAD(sc, queue_notify_off);
252 		}
253 	} else {
254 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
255 		    VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
256 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
257 		    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
258 	}
259 
260 	/*
261 	 * This path is only executed if this function is called after
262 	 * the child's attach function has finished. In other cases,
263 	 * it's done in virtio_pci_setup_msix().
264 	 */
265 	if (sc->sc_irq_type != IRQ_NO_MSIX) {
266 		int vec = 1;
267 		if (sc->sc_irq_type == IRQ_MSIX_PER_VQ)
268 		       vec += vq->vq_index;
269 		if (sc->sc_sc.sc_version_1) {
270 			CWRITE(sc, queue_msix_vector, vec);
271 		} else {
272 			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
273 			    VIRTIO_MSI_QUEUE_VECTOR, vec);
274 		}
275 	}
276 }
277 
278 void
279 virtio_pci_set_status(struct virtio_softc *vsc, int status)
280 {
281 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
282 	int old = 0;
283 
284 	if (sc->sc_sc.sc_version_1) {
285 		if (status == 0) {
286 			CWRITE(sc, device_status, 0);
287 			while (CREAD(sc, device_status) != 0) {
288 				CPU_BUSY_CYCLE();
289 			}
290 		} else {
291 			old = CREAD(sc, device_status);
292 			CWRITE(sc, device_status, status|old);
293 		}
294 	} else {
295 		if (status == 0) {
296 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
297 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
298 			while (bus_space_read_1(sc->sc_iot, sc->sc_ioh,
299 			    VIRTIO_CONFIG_DEVICE_STATUS) != 0) {
300 				CPU_BUSY_CYCLE();
301 			}
302 		} else {
303 			old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
304 			    VIRTIO_CONFIG_DEVICE_STATUS);
305 			bus_space_write_1(sc->sc_iot, sc->sc_ioh,
306 			    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
307 		}
308 	}
309 }
310 
311 int
312 virtio_pci_match(struct device *parent, void *match, void *aux)
313 {
314 	struct pci_attach_args *pa;
315 
316 	pa = (struct pci_attach_args *)aux;
317 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
318 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
319 		return 1;
320 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET)
321 		return 0;
322 	/* virtio 0.9 */
323 	if (PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
324 	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
325 	    PCI_REVISION(pa->pa_class) == 0)
326 		return 1;
327 	/* virtio 1.0 */
328 	if (PCI_PRODUCT(pa->pa_id) >= 0x1040 &&
329 	    PCI_PRODUCT(pa->pa_id) <= 0x107f &&
330 	    PCI_REVISION(pa->pa_class) == 1)
331 		return 1;
332 	return 0;
333 }
334 
335 #if VIRTIO_DEBUG
336 void
337 virtio_pci_dump_caps(struct virtio_pci_softc *sc)
338 {
339 	pci_chipset_tag_t pc = sc->sc_pc;
340 	pcitag_t tag = sc->sc_ptag;
341 	int offset;
342 	union {
343 		pcireg_t reg[4];
344 		struct virtio_pci_cap vcap;
345 	} v;
346 
347 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0]))
348 		return;
349 
350 	printf("\n");
351 	do {
352 		for (int i = 0; i < 4; i++)
353 			v.reg[i] = pci_conf_read(pc, tag, offset + i * 4);
354 		printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n",
355 			__func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar,
356 			v.vcap.offset, v.vcap.length);
357 		offset = v.vcap.cap_next;
358 	} while (offset != 0);
359 }
360 #endif
361 
362 int
363 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen)
364 {
365 	pci_chipset_tag_t pc = sc->sc_pc;
366 	pcitag_t tag = sc->sc_ptag;
367 	unsigned int offset, i, len;
368 	union {
369 		pcireg_t reg[8];
370 		struct virtio_pci_cap vcap;
371 	} *v = buf;
372 
373 	if (buflen < sizeof(struct virtio_pci_cap))
374 		return ERANGE;
375 
376 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
377 		return ENOENT;
378 
379 	do {
380 		for (i = 0; i < 4; i++)
381 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
382 		if (v->vcap.cfg_type == cfg_type)
383 			break;
384 		offset = v->vcap.cap_next;
385 	} while (offset != 0);
386 
387 	if (offset == 0)
388 		return ENOENT;
389 
390 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
391 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
392 		if (len > buflen) {
393 			printf("%s: cap too large\n", __func__);
394 			return ERANGE;
395 		}
396 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
397 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
398 	}
399 
400 	return 0;
401 }
402 
403 
404 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
405 				sizeof(pcireg_t))
406 
407 int
408 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
409 {
410 	struct virtio_pci_cap common, isr, device;
411 	struct virtio_pci_notify_cap notify;
412 	int have_device_cfg = 0;
413 	bus_size_t bars[NMAPREG] = { 0 };
414 	int bars_idx[NMAPREG] = { 0 };
415 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
416 	int i, j = 0, ret = 0;
417 
418 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0)
419 		return ENODEV;
420 
421 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, &notify, sizeof(notify)) != 0)
422 		return ENODEV;
423 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0)
424 		return ENODEV;
425 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0)
426 		memset(&device, 0, sizeof(device));
427 	else
428 		have_device_cfg = 1;
429 
430 	/*
431 	 * XXX Maybe there are devices that offer the pci caps but not the
432 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
433 	 * XXX here and fall back to 0.9 out if not present.
434 	 */
435 
436 	/* Figure out which bars we need to map */
437 	for (i = 0; i < nitems(caps); i++) {
438 		int bar = caps[i]->bar;
439 		bus_size_t len = caps[i]->offset + caps[i]->length;
440 		if (caps[i]->length == 0)
441 			continue;
442 		if (bars[bar] < len)
443 			bars[bar] = len;
444 	}
445 
446 	for (i = 0; i < nitems(bars); i++) {
447 		int reg;
448 		pcireg_t type;
449 		if (bars[i] == 0)
450 			continue;
451 		reg = PCI_MAPREG_START + i * 4;
452 		type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg);
453 		if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j],
454 		    &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j],
455 		    bars[i])) {
456 			printf("%s: can't map bar %u \n",
457 			    sc->sc_sc.sc_dev.dv_xname, i);
458 			ret = EIO;
459 			goto err;
460 		}
461 		bars_idx[i] = j;
462 		j++;
463 	}
464 
465 	i = bars_idx[notify.cap.bar];
466 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
467 	    notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) {
468 		printf("%s: can't map notify i/o space\n",
469 		    sc->sc_sc.sc_dev.dv_xname);
470 		ret = EIO;
471 		goto err;
472 	}
473 	sc->sc_notify_iosize = notify.cap.length;
474 	sc->sc_notify_iot = sc->sc_bars_iot[i];
475 	sc->sc_notify_off_multiplier = notify.notify_off_multiplier;
476 
477 	if (have_device_cfg) {
478 		i = bars_idx[device.bar];
479 		if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
480 		    device.offset, device.length, &sc->sc_devcfg_ioh) != 0) {
481 			printf("%s: can't map devcfg i/o space\n",
482 			    sc->sc_sc.sc_dev.dv_xname);
483 			ret = EIO;
484 			goto err;
485 		}
486 		sc->sc_devcfg_iosize = device.length;
487 		sc->sc_devcfg_iot = sc->sc_bars_iot[i];
488 	}
489 
490 	i = bars_idx[isr.bar];
491 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
492 	    isr.offset, isr.length, &sc->sc_isr_ioh) != 0) {
493 		printf("%s: can't map isr i/o space\n",
494 		    sc->sc_sc.sc_dev.dv_xname);
495 		ret = EIO;
496 		goto err;
497 	}
498 	sc->sc_isr_iosize = isr.length;
499 	sc->sc_isr_iot = sc->sc_bars_iot[i];
500 
501 	i = bars_idx[common.bar];
502 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
503 	    common.offset, common.length, &sc->sc_ioh) != 0) {
504 		printf("%s: can't map common i/o space\n",
505 		    sc->sc_sc.sc_dev.dv_xname);
506 		ret = EIO;
507 		goto err;
508 	}
509 	sc->sc_iosize = common.length;
510 	sc->sc_iot = sc->sc_bars_iot[i];
511 
512 	sc->sc_sc.sc_version_1 = 1;
513 	return 0;
514 
515 err:
516 	/* there is no pci_mapreg_unmap() */
517 	return ret;
518 }
519 
520 int
521 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
522 {
523 	struct virtio_softc *vsc = &sc->sc_sc;
524 	pcireg_t type;
525 
526 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
527 	if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0,
528 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
529 		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
530 		return EIO;
531 	}
532 
533 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
534 	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
535 		printf("%s: can't map notify i/o space\n",
536 		    vsc->sc_dev.dv_xname);
537 		return EIO;
538 	}
539 	sc->sc_notify_iosize = 2;
540 	sc->sc_notify_iot = sc->sc_iot;
541 
542 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
543 	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
544 		printf("%s: can't map isr i/o space\n",
545 		    vsc->sc_dev.dv_xname);
546 		return EIO;
547 	}
548 	sc->sc_isr_iosize = 1;
549 	sc->sc_isr_iot = sc->sc_iot;
550 
551 	return 0;
552 }
553 
554 void
555 virtio_pci_attach(struct device *parent, struct device *self, void *aux)
556 {
557 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
558 	struct virtio_softc *vsc = &sc->sc_sc;
559 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
560 	pci_chipset_tag_t pc = pa->pa_pc;
561 	pcitag_t tag = pa->pa_tag;
562 	int revision, ret = ENODEV;
563 	pcireg_t id;
564 	char const *intrstr;
565 	pci_intr_handle_t ih;
566 
567 	revision = PCI_REVISION(pa->pa_class);
568 	switch (revision) {
569 	case 0:
570 		/* subsystem ID shows what I am */
571 		id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
572 		break;
573 	case 1:
574 		id = PCI_PRODUCT(pa->pa_id) - 0x1040;
575 		break;
576 	default:
577 		printf("unknown revision 0x%02x; giving up\n", revision);
578 		return;
579 	}
580 
581 	sc->sc_pc = pc;
582 	sc->sc_ptag = pa->pa_tag;
583 	vsc->sc_dmat = pa->pa_dmat;
584 
585 #if defined(__i386__) || defined(__amd64__)
586 	/*
587 	 * For virtio, ignore normal MSI black/white-listing depending on the
588 	 * PCI bridge but enable it unconditionally.
589 	 */
590 	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
591 #endif
592 
593 #if VIRTIO_DEBUG
594 	virtio_pci_dump_caps(sc);
595 #endif
596 
597 	vsc->sc_ops = &virtio_pci_ops;
598 	if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 &&
599 	    (revision == 1 ||
600 	     (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) {
601 		ret = virtio_pci_attach_10(sc, pa);
602 	}
603 	if (ret != 0 && revision == 0) {
604 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
605 		ret = virtio_pci_attach_09(sc, pa);
606 	}
607 	if (ret != 0) {
608 		printf(": Cannot attach (%d)\n", ret);
609 		return;
610 	}
611 
612 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
613 	sc->sc_irq_type = IRQ_NO_MSIX;
614 	if (virtio_pci_adjust_config_region(sc) != 0)
615 		return;
616 
617 	virtio_device_reset(vsc);
618 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
619 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
620 
621 	printf("\n");
622 	vsc->sc_childdevid = id;
623 	vsc->sc_child = NULL;
624 	config_found(self, sc, NULL);
625 	if (vsc->sc_child == NULL) {
626 		printf("%s: no matching child driver; not configured\n",
627 		    vsc->sc_dev.dv_xname);
628 		goto fail_1;
629 	}
630 	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
631 		printf("%s: virtio configuration failed\n",
632 		    vsc->sc_dev.dv_xname);
633 		goto fail_1;
634 	}
635 
636 	if (virtio_pci_setup_msix(sc, pa, 0) == 0) {
637 		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
638 		intrstr = "msix per-VQ";
639 	} else if (virtio_pci_setup_msix(sc, pa, 1) == 0) {
640 		sc->sc_irq_type = IRQ_MSIX_SHARED;
641 		intrstr = "msix shared";
642 	} else {
643 		int (*ih_func)(void *) = virtio_pci_legacy_intr;
644 		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
645 			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
646 			goto fail_2;
647 		}
648 		intrstr = pci_intr_string(pc, ih);
649 		/*
650 		 * We always set the IPL_MPSAFE flag in order to do the relatively
651 		 * expensive ISR read without lock, and then grab the kernel lock in
652 		 * the interrupt handler.
653 		 */
654 		if (vsc->sc_ipl & IPL_MPSAFE)
655 			ih_func = virtio_pci_legacy_intr_mpsafe;
656 		sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE,
657 		    ih_func, sc, vsc->sc_dev.dv_xname);
658 		if (sc->sc_ih[0] == NULL) {
659 			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
660 			if (intrstr != NULL)
661 				printf(" at %s", intrstr);
662 			printf("\n");
663 			goto fail_2;
664 		}
665 	}
666 	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
667 
668 	return;
669 
670 fail_2:
671 	config_detach(vsc->sc_child, 0);
672 fail_1:
673 	/* no pci_mapreg_unmap() or pci_intr_unmap() */
674 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
675 }
676 
677 int
678 virtio_pci_detach(struct device *self, int flags)
679 {
680 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
681 	struct virtio_softc *vsc = &sc->sc_sc;
682 	int r;
683 
684 	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
685 		r = config_detach(vsc->sc_child, flags);
686 		if (r)
687 			return r;
688 	}
689 	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
690 	KASSERT(vsc->sc_vqs == 0);
691 	virtio_pci_free_irqs(sc);
692 	if (sc->sc_iosize)
693 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
694 	sc->sc_iosize = 0;
695 
696 	return 0;
697 }
698 
699 int
700 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
701 {
702 	if (sc->sc_sc.sc_version_1)
703 		return 0;
704 	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
705 	sc->sc_devcfg_iot = sc->sc_iot;
706 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
707 	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
708 		printf("%s: can't map config i/o space\n",
709 		    sc->sc_sc.sc_dev.dv_xname);
710 		return 1;
711 	}
712 	return 0;
713 }
714 
715 /*
716  * Feature negotiation.
717  * Prints available / negotiated features if guest_feature_names != NULL and
718  * VIRTIO_DEBUG is 1
719  */
720 int
721 virtio_pci_negotiate_features(struct virtio_softc *vsc,
722     const struct virtio_feature_name *guest_feature_names)
723 {
724 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
725 	uint64_t host, negotiated;
726 
727 	vsc->sc_active_features = 0;
728 
729 	/*
730 	 * We enable indirect descriptors by default. They can be switched
731 	 * off by setting bit 1 in the driver flags, see config(8)
732 	 */
733 	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
734 	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
735 		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
736 	} else if (guest_feature_names != NULL) {
737 		printf(" RingIndirectDesc disabled by UKC");
738 	}
739 
740 	/*
741 	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
742 	 * If it did, check if it is disabled by bit 2 in the driver flags.
743 	 */
744 	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
745 	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
746 	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
747 		if (guest_feature_names != NULL)
748 			printf(" RingEventIdx disabled by UKC");
749 		vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX;
750 	}
751 
752 	if (vsc->sc_version_1) {
753 		return virtio_pci_negotiate_features_10(vsc,
754 		    guest_feature_names);
755 	}
756 
757 	/* virtio 0.9 only */
758 	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
759 				VIRTIO_CONFIG_DEVICE_FEATURES);
760 	negotiated = host & vsc->sc_driver_features;
761 #if VIRTIO_DEBUG
762 	if (guest_feature_names)
763 		virtio_log_features(host, negotiated, guest_feature_names);
764 #endif
765 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
766 			  VIRTIO_CONFIG_GUEST_FEATURES, negotiated);
767 	vsc->sc_active_features = negotiated;
768 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
769 		vsc->sc_indirect = 1;
770 	else
771 		vsc->sc_indirect = 0;
772 	return 0;
773 }
774 
775 int
776 virtio_pci_negotiate_features_10(struct virtio_softc *vsc,
777     const struct virtio_feature_name *guest_feature_names)
778 {
779 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
780 	uint64_t host, negotiated;
781 
782 	vsc->sc_driver_features |= VIRTIO_F_VERSION_1;
783 	/* notify on empty is 0.9 only */
784 	vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
785 	CWRITE(sc, device_feature_select, 0);
786 	host = CREAD(sc, device_feature);
787 	CWRITE(sc, device_feature_select, 1);
788 	host |= (uint64_t)CREAD(sc, device_feature) << 32;
789 
790 	negotiated = host & vsc->sc_driver_features;
791 #if VIRTIO_DEBUG
792 	if (guest_feature_names)
793 		virtio_log_features(host, negotiated, guest_feature_names);
794 #endif
795 	CWRITE(sc, driver_feature_select, 0);
796 	CWRITE(sc, driver_feature, negotiated & 0xffffffff);
797 	CWRITE(sc, driver_feature_select, 1);
798 	CWRITE(sc, driver_feature, negotiated >> 32);
799 	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
800 
801 	if ((CREAD(sc, device_status) &
802 	    VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
803 		printf("%s: Feature negotiation failed\n",
804 		    vsc->sc_dev.dv_xname);
805 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
806 		return ENXIO;
807 	}
808 	vsc->sc_active_features = negotiated;
809 
810 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
811 		vsc->sc_indirect = 1;
812 	else
813 		vsc->sc_indirect = 0;
814 
815 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
816 #if VIRTIO_DEBUG
817 		printf("%s: Host rejected Version_1\n", __func__);
818 #endif
819 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
820 		return EINVAL;
821 	}
822 	return 0;
823 }
824 
825 /*
826  * Device configuration registers.
827  */
828 uint8_t
829 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
830 {
831 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
832 	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
833 }
834 
835 uint16_t
836 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
837 {
838 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
839 	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
840 }
841 
842 uint32_t
843 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
844 {
845 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
846 	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
847 }
848 
849 uint64_t
850 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
851 {
852 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
853 	uint64_t r;
854 
855 	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
856 	    index + sizeof(uint32_t));
857 	r <<= 32;
858 	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
859 	return r;
860 }
861 
862 void
863 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
864     uint8_t value)
865 {
866 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
867 	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
868 }
869 
870 void
871 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
872     uint16_t value)
873 {
874 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
875 	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
876 }
877 
878 void
879 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
880 			     int index, uint32_t value)
881 {
882 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
883 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
884 }
885 
886 void
887 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
888 			     int index, uint64_t value)
889 {
890 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
891 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
892 	    index, value & 0xffffffff);
893 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
894 	    index + sizeof(uint32_t), value >> 32);
895 }
896 
897 int
898 virtio_pci_msix_establish(struct virtio_pci_softc *sc,
899     struct pci_attach_args *pa, int idx, int (*handler)(void *), void *ih_arg)
900 {
901 	struct virtio_softc *vsc = &sc->sc_sc;
902 	pci_intr_handle_t ih;
903 
904 	if (pci_intr_map_msix(pa, idx, &ih) != 0) {
905 #if VIRTIO_DEBUG
906 		printf("%s[%d]: pci_intr_map_msix failed\n",
907 		    vsc->sc_dev.dv_xname, idx);
908 #endif
909 		return 1;
910 	}
911 	sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
912 	    handler, ih_arg, vsc->sc_dev.dv_xname);
913 	if (sc->sc_ih[idx] == NULL) {
914 		printf("%s[%d]: couldn't establish msix interrupt\n",
915 		    vsc->sc_dev.dv_xname, idx);
916 		return 1;
917 	}
918 	return 0;
919 }
920 
921 void
922 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
923 {
924 	if (sc->sc_sc.sc_version_1) {
925 		CWRITE(sc, queue_select, idx);
926 		CWRITE(sc, queue_msix_vector, vector);
927 	} else {
928 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
929 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
930 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
931 		    VIRTIO_MSI_QUEUE_VECTOR, vector);
932 	}
933 }
934 
935 void
936 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
937 {
938 	if (sc->sc_sc.sc_version_1) {
939 		CWRITE(sc, config_msix_vector, vector);
940 	} else {
941 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
942 		    VIRTIO_MSI_CONFIG_VECTOR, vector);
943 	}
944 }
945 
946 
947 void
948 virtio_pci_free_irqs(struct virtio_pci_softc *sc)
949 {
950 	struct virtio_softc *vsc = &sc->sc_sc;
951 	int i;
952 
953 	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
954 		for (i = 0; i < vsc->sc_nvqs; i++) {
955 			virtio_pci_set_msix_queue_vector(sc, i,
956 			    VIRTIO_MSI_NO_VECTOR);
957 		}
958 	}
959 
960 	for (i = 0; i < MAX_MSIX_VECS; i++) {
961 		if (sc->sc_ih[i]) {
962 			pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]);
963 			sc->sc_ih[i] = NULL;
964 		}
965 	}
966 
967 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
968 	virtio_pci_adjust_config_region(sc);
969 }
970 
971 int
972 virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa,
973     int shared)
974 {
975 	struct virtio_softc *vsc = &sc->sc_sc;
976 	int i;
977 
978 	/* Shared needs config + queue */
979 	if (shared && pci_intr_msix_count(pa) < 1 + 1)
980 		return 1;
981 	/* Per VQ needs config + N * queue */
982 	if (!shared && pci_intr_msix_count(pa) < 1 + vsc->sc_nvqs)
983 		return 1;
984 
985 	if (virtio_pci_msix_establish(sc, pa, 0, virtio_pci_config_intr, vsc))
986 		return 1;
987 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
988 	virtio_pci_adjust_config_region(sc);
989 	virtio_pci_set_msix_config_vector(sc, 0);
990 
991 	if (shared) {
992 		if (virtio_pci_msix_establish(sc, pa, 1,
993 		    virtio_pci_shared_queue_intr, vsc)) {
994 			goto fail;
995 		}
996 
997 		for (i = 0; i < vsc->sc_nvqs; i++)
998 			virtio_pci_set_msix_queue_vector(sc, i, 1);
999 	} else {
1000 		for (i = 0; i < vsc->sc_nvqs; i++) {
1001 			if (virtio_pci_msix_establish(sc, pa, i + 1,
1002 			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
1003 				goto fail;
1004 			}
1005 			virtio_pci_set_msix_queue_vector(sc, i, i + 1);
1006 		}
1007 	}
1008 
1009 	return 0;
1010 fail:
1011 	virtio_pci_free_irqs(sc);
1012 	return 1;
1013 }
1014 
1015 /*
1016  * Interrupt handler.
1017  */
1018 
1019 /*
1020  * Only used without MSI-X
1021  */
1022 int
1023 virtio_pci_legacy_intr(void *arg)
1024 {
1025 	struct virtio_pci_softc *sc = arg;
1026 	struct virtio_softc *vsc = &sc->sc_sc;
1027 	int isr, r = 0;
1028 
1029 	/* check and ack the interrupt */
1030 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1031 	if (isr == 0)
1032 		return 0;
1033 	KERNEL_LOCK();
1034 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1035 	    (vsc->sc_config_change != NULL)) {
1036 		r = (vsc->sc_config_change)(vsc);
1037 	}
1038 	r |= virtio_check_vqs(vsc);
1039 	KERNEL_UNLOCK();
1040 
1041 	return r;
1042 }
1043 
1044 int
1045 virtio_pci_legacy_intr_mpsafe(void *arg)
1046 {
1047 	struct virtio_pci_softc *sc = arg;
1048 	struct virtio_softc *vsc = &sc->sc_sc;
1049 	int isr, r = 0;
1050 
1051 	/* check and ack the interrupt */
1052 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1053 	if (isr == 0)
1054 		return 0;
1055 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1056 	    (vsc->sc_config_change != NULL)) {
1057 		r = (vsc->sc_config_change)(vsc);
1058 	}
1059 	r |= virtio_check_vqs(vsc);
1060 	return r;
1061 }
1062 
1063 /*
1064  * Only used with MSI-X
1065  */
1066 int
1067 virtio_pci_config_intr(void *arg)
1068 {
1069 	struct virtio_softc *vsc = arg;
1070 
1071 	if (vsc->sc_config_change != NULL)
1072 		return vsc->sc_config_change(vsc);
1073 	return 0;
1074 }
1075 
1076 /*
1077  * Only used with MSI-X
1078  */
1079 int
1080 virtio_pci_queue_intr(void *arg)
1081 {
1082 	struct virtqueue *vq = arg;
1083 	struct virtio_softc *vsc = vq->vq_owner;
1084 
1085 	return virtio_check_vq(vsc, vq);
1086 }
1087 
1088 int
1089 virtio_pci_shared_queue_intr(void *arg)
1090 {
1091 	struct virtio_softc *vsc = arg;
1092 
1093 	return virtio_check_vqs(vsc);
1094 }
1095 
1096 /*
1097  * Interrupt handler to be used when polling.
1098  * We cannot use isr here because it is not defined in MSI-X mode.
1099  */
1100 int
1101 virtio_pci_poll_intr(void *arg)
1102 {
1103 	struct virtio_pci_softc *sc = arg;
1104 	struct virtio_softc *vsc = &sc->sc_sc;
1105 	int r = 0;
1106 
1107 	if (vsc->sc_config_change != NULL)
1108 		r = (vsc->sc_config_change)(vsc);
1109 
1110 	r |= virtio_check_vqs(vsc);
1111 
1112 	return r;
1113 }
1114 
1115 void
1116 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
1117 {
1118 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1119 	unsigned offset = 0;
1120 	if (vsc->sc_version_1) {
1121 		offset = vsc->sc_vqs[idx].vq_notify_off *
1122 		    sc->sc_notify_off_multiplier;
1123 	}
1124 	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx);
1125 }
1126