xref: /netbsd-src/sys/dev/pci/virtio_pci.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /* $NetBSD: virtio_pci.c,v 1.28 2021/02/05 19:18:23 reinoud Exp $ */
2 
3 /*
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.28 2021/02/05 19:18:23 reinoud Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 
40 #include <sys/device.h>
41 
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 
46 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
47 #include <dev/pci/virtio_pcireg.h>
48 
49 #define VIRTIO_PRIVATE
50 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
51 
52 
53 static int	virtio_pci_match(device_t, cfdata_t, void *);
54 static void	virtio_pci_attach(device_t, device_t, void *);
55 static int	virtio_pci_rescan(device_t, const char *, const int *);
56 static int	virtio_pci_detach(device_t, int);
57 
58 
59 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
60 				sizeof(pcireg_t))
61 struct virtio_pci_softc {
62 	struct virtio_softc	sc_sc;
63 
64 	/* IO space */
65 	bus_space_tag_t		sc_iot;
66 	bus_space_handle_t	sc_ioh;
67 	bus_size_t		sc_iosize;
68 	bus_size_t		sc_mapped_iosize;
69 
70 	/* BARs */
71 	bus_space_tag_t		sc_bars_iot[NMAPREG];
72 	bus_space_handle_t	sc_bars_ioh[NMAPREG];
73 	bus_size_t		sc_bars_iosize[NMAPREG];
74 
75 	/* notify space */
76 	bus_space_tag_t		sc_notify_iot;
77 	bus_space_handle_t	sc_notify_ioh;
78 	bus_size_t		sc_notify_iosize;
79 	uint32_t		sc_notify_off_multiplier;
80 
81 	/* isr space */
82 	bus_space_tag_t		sc_isr_iot;
83 	bus_space_handle_t	sc_isr_ioh;
84 	bus_size_t		sc_isr_iosize;
85 
86 	/* generic */
87 	struct pci_attach_args	sc_pa;
88 	pci_intr_handle_t	*sc_ihp;
89 	void			**sc_ihs;
90 	int			sc_ihs_num;
91 	int			sc_devcfg_offset;	/* for 0.9 */
92 };
93 
94 static int	virtio_pci_attach_09(device_t, void *);
95 static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
96 static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
97 static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
98 static void	virtio_pci_set_status_09(struct virtio_softc *, int);
99 static void	virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
100 
101 static int	virtio_pci_attach_10(device_t, void *);
102 static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
103 static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
104 static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
105 static void	virtio_pci_set_status_10(struct virtio_softc *, int);
106 static void	virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
107 static int	virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
108 
109 static int	virtio_pci_setup_interrupts(struct virtio_softc *);
110 static void	virtio_pci_free_interrupts(struct virtio_softc *);
111 static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
112 static int	virtio_pci_intr(void *arg);
113 static int	virtio_pci_msix_queue_intr(void *);
114 static int	virtio_pci_msix_config_intr(void *);
115 static int	virtio_pci_setup_msix_vectors_09(struct virtio_softc *);
116 static int	virtio_pci_setup_msix_vectors_10(struct virtio_softc *);
117 static int	virtio_pci_setup_msix_interrupts(struct virtio_softc *,
118 		    struct pci_attach_args *);
119 static int	virtio_pci_setup_intx_interrupt(struct virtio_softc *,
120 		    struct pci_attach_args *);
121 
122 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
123 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
124 
125 /*
126  * When using PCI attached virtio on aarch64-eb under Qemu, the IO space
127  * suddenly read BIG_ENDIAN where it should stay LITTLE_ENDIAN. The data read
128  * 1 byte at a time seem OK but reading bigger lengths result in swapped
129  * endian. This is most notable on reading 8 byters since we can't use
130  * bus_space_{read,write}_8().
131  */
132 
133 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
134 #	define READ_ENDIAN_09	BIG_ENDIAN	/* should be LITTLE_ENDIAN */
135 #	define READ_ENDIAN_10	BIG_ENDIAN
136 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
137 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
138 #elif BYTE_ORDER == BIG_ENDIAN
139 #	define READ_ENDIAN_09	LITTLE_ENDIAN
140 #	define READ_ENDIAN_10	BIG_ENDIAN
141 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
142 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
143 #else /* little endian */
144 #	define READ_ENDIAN_09	LITTLE_ENDIAN
145 #	define READ_ENDIAN_10	LITTLE_ENDIAN
146 #	define STRUCT_ENDIAN_09	LITTLE_ENDIAN
147 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
148 #endif
149 
150 
151 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
152     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
153     virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
154 
155 static const struct virtio_ops virtio_pci_ops_09 = {
156 	.kick = virtio_pci_kick_09,
157 	.read_queue_size = virtio_pci_read_queue_size_09,
158 	.setup_queue = virtio_pci_setup_queue_09,
159 	.set_status = virtio_pci_set_status_09,
160 	.neg_features = virtio_pci_negotiate_features_09,
161 	.setup_interrupts = virtio_pci_setup_interrupts,
162 	.free_interrupts = virtio_pci_free_interrupts,
163 };
164 
165 static const struct virtio_ops virtio_pci_ops_10 = {
166 	.kick = virtio_pci_kick_10,
167 	.read_queue_size = virtio_pci_read_queue_size_10,
168 	.setup_queue = virtio_pci_setup_queue_10,
169 	.set_status = virtio_pci_set_status_10,
170 	.neg_features = virtio_pci_negotiate_features_10,
171 	.setup_interrupts = virtio_pci_setup_interrupts,
172 	.free_interrupts = virtio_pci_free_interrupts,
173 };
174 
175 static int
176 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
177 {
178 	struct pci_attach_args *pa;
179 
180 	pa = (struct pci_attach_args *)aux;
181 	switch (PCI_VENDOR(pa->pa_id)) {
182 	case PCI_VENDOR_QUMRANET:
183 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
184 		      PCI_PRODUCT(pa->pa_id)) &&
185 		     (PCI_PRODUCT(pa->pa_id) <=
186 		      PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
187 	              PCI_REVISION(pa->pa_class) == 0)
188 			return 1;
189 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
190 		      PCI_PRODUCT(pa->pa_id)) &&
191 		     (PCI_PRODUCT(pa->pa_id) <=
192 		      PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
193 		      PCI_REVISION(pa->pa_class) == 1)
194 			return 1;
195 		break;
196 	}
197 
198 	return 0;
199 }
200 
201 static void
202 virtio_pci_attach(device_t parent, device_t self, void *aux)
203 {
204 	struct virtio_pci_softc * const psc = device_private(self);
205 	struct virtio_softc * const sc = &psc->sc_sc;
206 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
207 	pci_chipset_tag_t pc = pa->pa_pc;
208 	pcitag_t tag = pa->pa_tag;
209 	int revision;
210 	int ret;
211 	pcireg_t id;
212 	pcireg_t csr;
213 
214 	revision = PCI_REVISION(pa->pa_class);
215 	switch (revision) {
216 	case 0:
217 		/* subsystem ID shows what I am */
218 		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
219 		break;
220 	case 1:
221 		/* pci product number shows what I am */
222 		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
223 		break;
224 	default:
225 		aprint_normal(": unknown revision 0x%02x; giving up\n",
226 			      revision);
227 		return;
228 	}
229 
230 	aprint_normal("\n");
231 	aprint_naive("\n");
232 	virtio_print_device_type(self, id, revision);
233 
234 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
235 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
236 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
237 
238 	sc->sc_dev = self;
239 	psc->sc_pa = *pa;
240 	psc->sc_iot = pa->pa_iot;
241 
242 	sc->sc_dmat = pa->pa_dmat;
243 	if (pci_dma64_available(pa))
244 		sc->sc_dmat = pa->pa_dmat64;
245 
246 	/* attach is dependent on revision */
247 	ret = 0;
248 	if (revision == 1) {
249 		/* try to attach 1.0 */
250 		ret = virtio_pci_attach_10(self, aux);
251 	}
252 	if (ret == 0 && revision == 0) {
253 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
254 		ret = virtio_pci_attach_09(self, aux);
255 	}
256 	if (ret) {
257 		aprint_error_dev(self, "cannot attach (%d)\n", ret);
258 		return;
259 	}
260 	KASSERT(sc->sc_ops);
261 
262 	/* preset config region */
263 	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
264 	if (virtio_pci_adjust_config_region(psc))
265 		return;
266 
267 	/* generic */
268 	virtio_device_reset(sc);
269 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
270 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
271 
272 	sc->sc_childdevid = id;
273 	sc->sc_child = NULL;
274 	virtio_pci_rescan(self, "virtio", 0);
275 	return;
276 }
277 
278 /* ARGSUSED */
279 static int
280 virtio_pci_rescan(device_t self, const char *attr, const int *scan_flags)
281 {
282 	struct virtio_pci_softc * const psc = device_private(self);
283 	struct virtio_softc * const sc = &psc->sc_sc;
284 	struct virtio_attach_args va;
285 
286 	if (sc->sc_child)	/* Child already attached? */
287 		return 0;
288 
289 	memset(&va, 0, sizeof(va));
290 	va.sc_childdevid = sc->sc_childdevid;
291 
292 	config_found_ia(self, attr, &va, NULL);
293 
294 	if (virtio_attach_failed(sc))
295 		return 0;
296 
297 	return 0;
298 }
299 
300 
301 static int
302 virtio_pci_detach(device_t self, int flags)
303 {
304 	struct virtio_pci_softc * const psc = device_private(self);
305 	struct virtio_softc * const sc = &psc->sc_sc;
306 	int r;
307 
308 	if (sc->sc_child != NULL) {
309 		r = config_detach(sc->sc_child, flags);
310 		if (r)
311 			return r;
312 	}
313 
314 	/* Check that child detached properly */
315 	KASSERT(sc->sc_child == NULL);
316 	KASSERT(sc->sc_vqs == NULL);
317 	KASSERT(psc->sc_ihs_num == 0);
318 
319 	if (psc->sc_iosize)
320 		bus_space_unmap(psc->sc_iot, psc->sc_ioh,
321 			psc->sc_mapped_iosize);
322 	psc->sc_iosize = 0;
323 
324 	return 0;
325 }
326 
327 
328 static int
329 virtio_pci_attach_09(device_t self, void *aux)
330 	//struct virtio_pci_softc *psc, struct pci_attach_args *pa)
331 {
332 	struct virtio_pci_softc * const psc = device_private(self);
333 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
334 	struct virtio_softc * const sc = &psc->sc_sc;
335 //	pci_chipset_tag_t pc = pa->pa_pc;
336 //	pcitag_t tag = pa->pa_tag;
337 
338 	/* complete IO region */
339 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
340 			   &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
341 		aprint_error_dev(self, "can't map i/o space\n");
342 		return EIO;
343 	}
344 	psc->sc_mapped_iosize = psc->sc_iosize;
345 
346 	/* queue space */
347 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
348 			VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
349 		aprint_error_dev(self, "can't map notify i/o space\n");
350 		return EIO;
351 	}
352 	psc->sc_notify_iosize = 2;
353 	psc->sc_notify_iot = psc->sc_iot;
354 
355 	/* ISR space */
356 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
357 			VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
358 		aprint_error_dev(self, "can't map isr i/o space\n");
359 		return EIO;
360 	}
361 	psc->sc_isr_iosize = 1;
362 	psc->sc_isr_iot = psc->sc_iot;
363 
364 	/* set our version 0.9 ops */
365 	sc->sc_ops = &virtio_pci_ops_09;
366 	sc->sc_bus_endian    = READ_ENDIAN_09;
367 	sc->sc_struct_endian = STRUCT_ENDIAN_09;
368 	return 0;
369 }
370 
371 
372 static int
373 virtio_pci_attach_10(device_t self, void *aux)
374 {
375 	struct virtio_pci_softc * const psc = device_private(self);
376 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
377 	struct virtio_softc * const sc = &psc->sc_sc;
378 	pci_chipset_tag_t pc = pa->pa_pc;
379 	pcitag_t tag = pa->pa_tag;
380 
381 	struct virtio_pci_cap common, isr, device;
382 	struct virtio_pci_notify_cap notify;
383 	int have_device_cfg = 0;
384 	bus_size_t bars[NMAPREG] = { 0 };
385 	int bars_idx[NMAPREG] = { 0 };
386 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
387 	int i, j, ret = 0;
388 
389 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
390 			&common, sizeof(common)))
391 		return ENODEV;
392 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
393 			&notify, sizeof(notify)))
394 		return ENODEV;
395 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
396 			&isr, sizeof(isr)))
397 		return ENODEV;
398 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
399 			&device, sizeof(device)))
400 		memset(&device, 0, sizeof(device));
401 	else
402 		have_device_cfg = 1;
403 
404 	/* Figure out which bars we need to map */
405 	for (i = 0; i < __arraycount(caps); i++) {
406 		int bar = caps[i]->bar;
407 		bus_size_t len = caps[i]->offset + caps[i]->length;
408 		if (caps[i]->length == 0)
409 			continue;
410 		if (bars[bar] < len)
411 			bars[bar] = len;
412 	}
413 
414 	for (i = j = 0; i < __arraycount(bars); i++) {
415 		int reg;
416 		pcireg_t type;
417 		if (bars[i] == 0)
418 			continue;
419 		reg = PCI_MAPREG_START + i * 4;
420 		type = pci_mapreg_type(pc, tag, reg);
421 		if (pci_mapreg_map(pa, reg, type, 0,
422 				&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
423 				NULL, &psc->sc_bars_iosize[j])) {
424 			aprint_error_dev(self, "can't map bar %u \n", i);
425 			ret = EIO;
426 			goto err;
427 		}
428 		aprint_debug_dev(self,
429 		    "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
430 		    j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
431 		bars_idx[i] = j;
432 		j++;
433 	}
434 
435 	i = bars_idx[notify.cap.bar];
436 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
437 			notify.cap.offset, notify.cap.length,
438 			&psc->sc_notify_ioh)) {
439 		aprint_error_dev(self, "can't map notify i/o space\n");
440 		ret = EIO;
441 		goto err;
442 	}
443 	psc->sc_notify_iosize = notify.cap.length;
444 	psc->sc_notify_iot = psc->sc_bars_iot[i];
445 	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
446 
447 	if (have_device_cfg) {
448 		i = bars_idx[device.bar];
449 		if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
450 				device.offset, device.length,
451 				&sc->sc_devcfg_ioh)) {
452 			aprint_error_dev(self, "can't map devcfg i/o space\n");
453 			ret = EIO;
454 			goto err;
455 		}
456 		aprint_debug_dev(self,
457 			"device.offset = 0x%x, device.length = 0x%x\n",
458 			device.offset, device.length);
459 		sc->sc_devcfg_iosize = device.length;
460 		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
461 	}
462 
463 	i = bars_idx[isr.bar];
464 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
465 			isr.offset, isr.length, &psc->sc_isr_ioh)) {
466 		aprint_error_dev(self, "can't map isr i/o space\n");
467 		ret = EIO;
468 		goto err;
469 	}
470 	psc->sc_isr_iosize = isr.length;
471 	psc->sc_isr_iot = psc->sc_bars_iot[i];
472 
473 	i = bars_idx[common.bar];
474 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
475 			common.offset, common.length, &psc->sc_ioh)) {
476 		aprint_error_dev(self, "can't map common i/o space\n");
477 		ret = EIO;
478 		goto err;
479 	}
480 	psc->sc_iosize = common.length;
481 	psc->sc_iot = psc->sc_bars_iot[i];
482 	psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
483 
484 	psc->sc_sc.sc_version_1 = 1;
485 
486 	/* set our version 1.0 ops */
487 	sc->sc_ops = &virtio_pci_ops_10;
488 	sc->sc_bus_endian    = READ_ENDIAN_10;
489 	sc->sc_struct_endian = STRUCT_ENDIAN_10;
490 	return 0;
491 
492 err:
493 	/* undo our pci_mapreg_map()s */
494 	for (i = 0; i < __arraycount(bars); i++) {
495 		if (psc->sc_bars_iosize[i] == 0)
496 			continue;
497 		bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
498 				psc->sc_bars_iosize[i]);
499 	}
500 	return ret;
501 }
502 
503 /* v1.0 attach helper */
504 static int
505 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
506 {
507 	device_t self = psc->sc_sc.sc_dev;
508 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
509 	pcitag_t tag = psc->sc_pa.pa_tag;
510 	unsigned int offset, i, len;
511 	union {
512 		pcireg_t reg[8];
513 		struct virtio_pci_cap vcap;
514 	} *v = buf;
515 
516 	if (buflen < sizeof(struct virtio_pci_cap))
517 		return ERANGE;
518 
519 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
520 		return ENOENT;
521 
522 	do {
523 		for (i = 0; i < 4; i++)
524 			v->reg[i] =
525 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
526 		if (v->vcap.cfg_type == cfg_type)
527 			break;
528 		offset = v->vcap.cap_next;
529 	} while (offset != 0);
530 
531 	if (offset == 0)
532 		return ENOENT;
533 
534 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
535 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
536 		if (len > buflen) {
537 			aprint_error_dev(self, "%s cap too large\n", __func__);
538 			return ERANGE;
539 		}
540 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
541 			v->reg[i] =
542 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
543 	}
544 
545 	/* endian fixup */
546 	v->vcap.offset = le32toh(v->vcap.offset);
547 	v->vcap.length = le32toh(v->vcap.length);
548 	return 0;
549 }
550 
551 
552 /* -------------------------------------
553  * Version 0.9 support
554  * -------------------------------------*/
555 
556 static void
557 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
558 {
559 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
560 
561 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
562 }
563 
564 /* only applicable for v 0.9 but also called for 1.0 */
565 static int
566 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
567 {
568 	struct virtio_softc * const sc = (struct virtio_softc *) psc;
569 	device_t self = psc->sc_sc.sc_dev;
570 
571 	if (psc->sc_sc.sc_version_1)
572 		return 0;
573 
574 	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
575 	sc->sc_devcfg_iot = psc->sc_iot;
576 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
577 			psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
578 			&sc->sc_devcfg_ioh)) {
579 		aprint_error_dev(self, "can't map config i/o space\n");
580 		return EIO;
581 	}
582 
583 	return 0;
584 }
585 
586 static uint16_t
587 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
588 {
589 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
590 
591 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
592 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
593 	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
594 	    VIRTIO_CONFIG_QUEUE_SIZE);
595 }
596 
597 static void
598 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
599 {
600 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
601 
602 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
603 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
604 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
605 	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
606 
607 	if (psc->sc_ihs_num > 1) {
608 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
609 		if (sc->sc_child_mq)
610 			vec += idx;
611 		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
612 		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
613 	}
614 }
615 
616 static void
617 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
618 {
619 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
620 	int old = 0;
621 
622 	if (status != 0) {
623 	    old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
624 		VIRTIO_CONFIG_DEVICE_STATUS);
625 	}
626 	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
627 	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
628 }
629 
630 static void
631 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
632 {
633 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
634 	uint32_t r;
635 
636 	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
637 	    VIRTIO_CONFIG_DEVICE_FEATURES);
638 
639 	r &= guest_features;
640 
641 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
642 	    VIRTIO_CONFIG_GUEST_FEATURES, r);
643 
644 	sc->sc_active_features = r;
645 }
646 
647 /* -------------------------------------
648  * Version 1.0 support
649  * -------------------------------------*/
650 
651 static void
652 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
653 {
654 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
655 	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
656 		psc->sc_notify_off_multiplier;
657 
658 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
659 }
660 
661 
662 static uint16_t
663 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
664 {
665 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
666 	bus_space_tag_t	   iot = psc->sc_iot;
667 	bus_space_handle_t ioh = psc->sc_ioh;
668 
669 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
670 	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
671 }
672 
673 /*
674  * By definition little endian only in v1.0 and 8 byters are allowed to be
675  * written as two 4 byters
676  *
677  * This is not a general purpose function that can be used in any
678  * driver. Virtio specifically allows the 8 byte bus transaction
679  * to be split into two 4 byte transactions. Do not copy/use it
680  * in other device drivers unless you know that the device accepts it.
681  */
682 static __inline void
683 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
684      bus_size_t offset, uint64_t value)
685 {
686 #if defined(__HAVE_BUS_SPACE_8)
687 	bus_space_write_8(iot, ioh, offset, value);
688 #elif _QUAD_HIGHWORD
689 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
690 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
691 #else
692 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
693 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
694 #endif
695 }
696 
697 static void
698 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
699 {
700 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
701 	struct virtqueue *vq = &sc->sc_vqs[idx];
702 	bus_space_tag_t	   iot = psc->sc_iot;
703 	bus_space_handle_t ioh = psc->sc_ioh;
704 	KASSERT(vq->vq_index == idx);
705 
706 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
707 	if (addr == 0) {
708 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
709 		virtio_pci_bus_space_write_8(iot, ioh,
710 		    VIRTIO_CONFIG1_QUEUE_DESC,   0);
711 		virtio_pci_bus_space_write_8(iot, ioh,
712 		    VIRTIO_CONFIG1_QUEUE_AVAIL,  0);
713 		virtio_pci_bus_space_write_8(iot, ioh,
714 		    VIRTIO_CONFIG1_QUEUE_USED,   0);
715 	} else {
716 		virtio_pci_bus_space_write_8(iot, ioh,
717 			VIRTIO_CONFIG1_QUEUE_DESC, addr);
718 		virtio_pci_bus_space_write_8(iot, ioh,
719 			VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
720 		virtio_pci_bus_space_write_8(iot, ioh,
721 			VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
722 		bus_space_write_2(iot, ioh,
723 			VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
724 		vq->vq_notify_off = bus_space_read_2(iot, ioh,
725 			VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
726 	}
727 
728 	if (psc->sc_ihs_num > 1) {
729 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
730 		if (sc->sc_child_mq)
731 			vec += idx;
732 		bus_space_write_2(iot, ioh,
733 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
734 	}
735 }
736 
737 static void
738 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
739 {
740 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
741 	bus_space_tag_t	   iot = psc->sc_iot;
742 	bus_space_handle_t ioh = psc->sc_ioh;
743 	int old = 0;
744 
745 	if (status)
746 		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
747 	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
748 }
749 
750 void
751 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
752 {
753 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
754 	device_t self          =  sc->sc_dev;
755 	bus_space_tag_t	   iot = psc->sc_iot;
756 	bus_space_handle_t ioh = psc->sc_ioh;
757 	uint64_t host, negotiated, device_status;
758 
759 	guest_features |= VIRTIO_F_VERSION_1;
760 	/* notify on empty is 0.9 only */
761 	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
762 	sc->sc_active_features = 0;
763 
764 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
765 	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
766 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
767 	host |= (uint64_t)
768 		bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
769 
770 	negotiated = host & guest_features;
771 
772 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
773 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
774 			negotiated & 0xffffffff);
775 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
776 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
777 			negotiated >> 32);
778 	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
779 
780 	device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
781 	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
782 		aprint_error_dev(self, "feature negotiation failed\n");
783 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
784 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
785 		return;
786 	}
787 
788 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
789 		aprint_error_dev(self, "host rejected version 1\n");
790 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
791 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
792 		return;
793 	}
794 
795 	sc->sc_active_features = negotiated;
796 	return;
797 }
798 
799 
800 /* -------------------------------------
801  * Generic PCI interrupt code
802  * -------------------------------------*/
803 
804 static int
805 virtio_pci_setup_msix_vectors_10(struct virtio_softc *sc)
806 {
807 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
808 	device_t self          =  sc->sc_dev;
809 	bus_space_tag_t	   iot = psc->sc_iot;
810 	bus_space_handle_t ioh = psc->sc_ioh;
811 	int vector, ret, qid;
812 
813 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
814 	bus_space_write_2(iot, ioh,
815 		VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
816 	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
817 	if (ret != vector) {
818 		aprint_error_dev(self, "can't set config msix vector\n");
819 		return -1;
820 	}
821 
822 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
823 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
824 
825 		if (sc->sc_child_mq)
826 			vector += qid;
827 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
828 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
829 			vector);
830 		ret = bus_space_read_2(iot, ioh,
831 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
832 		if (ret != vector) {
833 			aprint_error_dev(self, "can't set queue %d "
834 				"msix vector\n", qid);
835 			return -1;
836 		}
837 	}
838 
839 	return 0;
840 }
841 
842 static int
843 virtio_pci_setup_msix_vectors_09(struct virtio_softc *sc)
844 {
845 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
846 	device_t self = sc->sc_dev;
847 	int offset, vector, ret, qid;
848 
849 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
850 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
851 
852 	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
853 	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
854 	aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
855 	    vector, ret);
856 	if (ret != vector) {
857 		aprint_error_dev(self, "can't set config msix vector\n");
858 		return -1;
859 	}
860 
861 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
862 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
863 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
864 
865 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
866 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
867 
868 		if (sc->sc_child_mq)
869 			vector += qid;
870 
871 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
872 		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
873 		aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
874 		    vector, ret);
875 		if (ret != vector) {
876 			aprint_error_dev(self, "can't set queue %d "
877 				"msix vector\n", qid);
878 			return -1;
879 		}
880 	}
881 
882 	return 0;
883 }
884 
885 static int
886 virtio_pci_setup_msix_interrupts(struct virtio_softc *sc,
887     struct pci_attach_args *pa)
888 {
889 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
890 	device_t self = sc->sc_dev;
891 	pci_chipset_tag_t pc = pa->pa_pc;
892 	struct virtqueue *vq;
893 	char intrbuf[PCI_INTRSTR_LEN];
894 	char intr_xname[INTRDEVNAMEBUF];
895 	char const *intrstr;
896 	int idx, qid, n;
897 	int ret;
898 
899 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
900 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
901 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
902 
903 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
904 	    device_xname(sc->sc_dev));
905 
906 	psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
907 	    sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
908 	if (psc->sc_ihs[idx] == NULL) {
909 		aprint_error_dev(self, "couldn't establish MSI-X for config\n");
910 		goto error;
911 	}
912 
913 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
914 	if (sc->sc_child_mq) {
915 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
916 			n = idx + qid;
917 			vq = &sc->sc_vqs[qid];
918 
919 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
920 			    device_xname(sc->sc_dev), qid);
921 
922 			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
923 				pci_intr_setattr(pc, &psc->sc_ihp[n],
924 				    PCI_INTR_MPSAFE, true);
925 			}
926 
927 			psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
928 			    sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
929 			if (psc->sc_ihs[n] == NULL) {
930 				aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
931 				goto error;
932 			}
933 		}
934 	} else {
935 		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
936 			pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
937 
938 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
939 		    device_xname(sc->sc_dev));
940 		psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
941 		    sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
942 		if (psc->sc_ihs[idx] == NULL) {
943 			aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
944 			goto error;
945 		}
946 	}
947 
948 	if (sc->sc_version_1) {
949 		ret = virtio_pci_setup_msix_vectors_10(sc);
950 	} else {
951 		ret = virtio_pci_setup_msix_vectors_09(sc);
952 	}
953 	if (ret) {
954 		aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
955 		goto error;
956 	}
957 
958 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
959 	intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
960 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
961 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
962 	if (sc->sc_child_mq) {
963 		kcpuset_t *affinity;
964 		int affinity_to, r;
965 
966 		kcpuset_create(&affinity, false);
967 
968 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
969 			n = idx + qid;
970 			affinity_to = (qid / 2) % ncpu;
971 
972 			intrstr = pci_intr_string(pc, psc->sc_ihp[n],
973 			    intrbuf, sizeof(intrbuf));
974 
975 			kcpuset_zero(affinity);
976 			kcpuset_set(affinity, affinity_to);
977 			r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
978 			if (r == 0) {
979 				aprint_normal_dev(self,
980 				    "for vq #%d interrupting at %s affinity to %u\n",
981 				    qid, intrstr, affinity_to);
982 			} else {
983 				aprint_normal_dev(self,
984 				    "for vq #%d interrupting at %s\n",
985 				    qid, intrstr);
986 			}
987 		}
988 
989 		kcpuset_destroy(affinity);
990 	} else {
991 		intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
992 		aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
993 	}
994 
995 	return 0;
996 
997 error:
998 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
999 	if (psc->sc_ihs[idx] != NULL)
1000 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1001 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1002 	if (sc->sc_child_mq) {
1003 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
1004 			n = idx + qid;
1005 			if (psc->sc_ihs[n] == NULL)
1006 				continue;
1007 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1008 		}
1009 
1010 	} else {
1011 		if (psc->sc_ihs[idx] != NULL)
1012 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1013 	}
1014 
1015 	return -1;
1016 }
1017 
1018 static int
1019 virtio_pci_setup_intx_interrupt(struct virtio_softc *sc,
1020     struct pci_attach_args *pa)
1021 {
1022 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1023 	device_t self = sc->sc_dev;
1024 	pci_chipset_tag_t pc = pa->pa_pc;
1025 	char intrbuf[PCI_INTRSTR_LEN];
1026 	char const *intrstr;
1027 
1028 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1029 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1030 
1031 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1032 	    sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1033 	if (psc->sc_ihs[0] == NULL) {
1034 		aprint_error_dev(self, "couldn't establish INTx\n");
1035 		return -1;
1036 	}
1037 
1038 	intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1039 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1040 
1041 	return 0;
1042 }
1043 
1044 static int
1045 virtio_pci_setup_interrupts(struct virtio_softc *sc)
1046 {
1047 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1048 	device_t self = sc->sc_dev;
1049 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1050 	pcitag_t tag = psc->sc_pa.pa_tag;
1051 	int error;
1052 	int nmsix;
1053 	int off;
1054 	int counts[PCI_INTR_TYPE_SIZE];
1055 	pci_intr_type_t max_type;
1056 	pcireg_t ctl;
1057 
1058 	nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1059 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1060 
1061 	/* We need at least two: one for config and the other for queues */
1062 	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1063 		/* Try INTx only */
1064 		max_type = PCI_INTR_TYPE_INTX;
1065 		counts[PCI_INTR_TYPE_INTX] = 1;
1066 	} else {
1067 		/* Try MSI-X first and INTx second */
1068 		if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1069 			nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1070 		} else {
1071 			sc->sc_child_mq = false;
1072 		}
1073 
1074 		if (sc->sc_child_mq == false) {
1075 			nmsix = 2;
1076 		}
1077 
1078 		max_type = PCI_INTR_TYPE_MSIX;
1079 		counts[PCI_INTR_TYPE_MSIX] = nmsix;
1080 		counts[PCI_INTR_TYPE_MSI] = 0;
1081 		counts[PCI_INTR_TYPE_INTX] = 1;
1082 	}
1083 
1084 retry:
1085 	error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1086 	if (error != 0) {
1087 		aprint_error_dev(self, "couldn't map interrupt\n");
1088 		return -1;
1089 	}
1090 
1091 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1092 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1093 		    KM_SLEEP);
1094 
1095 		error = virtio_pci_setup_msix_interrupts(sc, &psc->sc_pa);
1096 		if (error != 0) {
1097 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1098 			pci_intr_release(pc, psc->sc_ihp, nmsix);
1099 
1100 			/* Retry INTx */
1101 			max_type = PCI_INTR_TYPE_INTX;
1102 			counts[PCI_INTR_TYPE_INTX] = 1;
1103 			goto retry;
1104 		}
1105 
1106 		psc->sc_ihs_num = nmsix;
1107 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1108 		virtio_pci_adjust_config_region(psc);
1109 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1110 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1111 		    KM_SLEEP);
1112 
1113 		error = virtio_pci_setup_intx_interrupt(sc, &psc->sc_pa);
1114 		if (error != 0) {
1115 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1116 			pci_intr_release(pc, psc->sc_ihp, 1);
1117 			return -1;
1118 		}
1119 
1120 		psc->sc_ihs_num = 1;
1121 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1122 		virtio_pci_adjust_config_region(psc);
1123 
1124 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1125 		if (error != 0) {
1126 			ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1127 			ctl &= ~PCI_MSIX_CTL_ENABLE;
1128 			pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1129 		}
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static void
1136 virtio_pci_free_interrupts(struct virtio_softc *sc)
1137 {
1138 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1139 
1140 	for (int i = 0; i < psc->sc_ihs_num; i++) {
1141 		if (psc->sc_ihs[i] == NULL)
1142 			continue;
1143 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1144 		psc->sc_ihs[i] = NULL;
1145 	}
1146 
1147 	if (psc->sc_ihs_num > 0)
1148 		pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1149 
1150 	if (psc->sc_ihs != NULL) {
1151 		kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1152 		psc->sc_ihs = NULL;
1153 	}
1154 	psc->sc_ihs_num = 0;
1155 }
1156 
1157 /*
1158  * Interrupt handler.
1159  */
1160 static int
1161 virtio_pci_intr(void *arg)
1162 {
1163 	struct virtio_softc *sc = arg;
1164 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1165 	int isr, r = 0;
1166 
1167 	/* check and ack the interrupt */
1168 	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1169 	if (isr == 0)
1170 		return 0;
1171 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1172 	    (sc->sc_config_change != NULL))
1173 		r = (sc->sc_config_change)(sc);
1174 	if (sc->sc_intrhand != NULL) {
1175 		if (sc->sc_soft_ih != NULL)
1176 			softint_schedule(sc->sc_soft_ih);
1177 		else
1178 			r |= (sc->sc_intrhand)(sc);
1179 	}
1180 
1181 	return r;
1182 }
1183 
1184 static int
1185 virtio_pci_msix_queue_intr(void *arg)
1186 {
1187 	struct virtio_softc *sc = arg;
1188 	int r = 0;
1189 
1190 	if (sc->sc_intrhand != NULL) {
1191 		if (sc->sc_soft_ih != NULL)
1192 			softint_schedule(sc->sc_soft_ih);
1193 		else
1194 			r |= (sc->sc_intrhand)(sc);
1195 	}
1196 
1197 	return r;
1198 }
1199 
1200 static int
1201 virtio_pci_msix_config_intr(void *arg)
1202 {
1203 	struct virtio_softc *sc = arg;
1204 	int r = 0;
1205 
1206 	if (sc->sc_config_change != NULL)
1207 		r = (sc->sc_config_change)(sc);
1208 	return r;
1209 }
1210 
1211 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1212 
1213 #ifdef _MODULE
1214 #include "ioconf.c"
1215 #endif
1216 
1217 static int
1218 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1219 {
1220 	int error = 0;
1221 
1222 #ifdef _MODULE
1223 	switch (cmd) {
1224 	case MODULE_CMD_INIT:
1225 		error = config_init_component(cfdriver_ioconf_virtio_pci,
1226 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1227 		break;
1228 	case MODULE_CMD_FINI:
1229 		error = config_fini_component(cfdriver_ioconf_virtio_pci,
1230 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1231 		break;
1232 	default:
1233 		error = ENOTTY;
1234 		break;
1235 	}
1236 #endif
1237 
1238 	return error;
1239 }
1240