xref: /openbsd-src/sys/dev/pci/virtio_pci.c (revision 824adb5411e4389b29bae28eba5c2c2bbd147f34)
1 /*	$OpenBSD: virtio_pci.c,v 1.30 2021/09/03 14:04:35 patrick Exp $	*/
2 /*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3 
4 /*
5  * Copyright (c) 2012 Stefan Fritsch.
6  * Copyright (c) 2010 Minoura Makoto.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/mutex.h>
34 
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/virtio_pcireg.h>
39 
40 #include <dev/pv/virtioreg.h>
41 #include <dev/pv/virtiovar.h>
42 #include <dev/pci/virtio_pcireg.h>
43 
44 #define DNPRINTF(n,x...)				\
45     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
46 
47 
48 /*
49  * XXX: Before being used on big endian arches, the access to config registers
50  * XXX: needs to be reviewed/fixed. The non-device specific registers are
51  * XXX: PCI-endian while the device specific registers are native endian.
52  */
53 
54 #define MAX_MSIX_VECS	8
55 
56 struct virtio_pci_softc;
57 
58 int		virtio_pci_match(struct device *, void *, void *);
59 void		virtio_pci_attach(struct device *, struct device *, void *);
60 int		virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
61 int		virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa);
62 int		virtio_pci_detach(struct device *, int);
63 
64 void		virtio_pci_kick(struct virtio_softc *, uint16_t);
65 int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
66 uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
67 uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
68 uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
69 uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
70 void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
71 void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
72 void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
73 void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
74 uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
75 void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
76 void		virtio_pci_set_status(struct virtio_softc *, int);
77 int		virtio_pci_negotiate_features(struct virtio_softc *, const struct virtio_feature_name *);
78 int		virtio_pci_negotiate_features_10(struct virtio_softc *, const struct virtio_feature_name *);
79 void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
80 void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
81 int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct pci_attach_args *, int, int (*)(void *), void *);
82 int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct pci_attach_args *, int);
83 void		virtio_pci_free_irqs(struct virtio_pci_softc *);
84 int		virtio_pci_poll_intr(void *);
85 int		virtio_pci_legacy_intr(void *);
86 int		virtio_pci_legacy_intr_mpsafe(void *);
87 int		virtio_pci_config_intr(void *);
88 int		virtio_pci_queue_intr(void *);
89 int		virtio_pci_shared_queue_intr(void *);
90 int		virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen);
91 #if VIRTIO_DEBUG
92 void virtio_pci_dump_caps(struct virtio_pci_softc *sc);
93 #endif
94 
95 enum irq_type {
96 	IRQ_NO_MSIX,
97 	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
98 	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
99 };
100 
101 struct virtio_pci_softc {
102 	struct virtio_softc	sc_sc;
103 	pci_chipset_tag_t	sc_pc;
104 	pcitag_t		sc_ptag;
105 
106 	bus_space_tag_t		sc_iot;
107 	bus_space_handle_t	sc_ioh;
108 	bus_size_t		sc_iosize;
109 
110 	bus_space_tag_t		sc_bars_iot[4];
111 	bus_space_handle_t	sc_bars_ioh[4];
112 	bus_size_t		sc_bars_iosize[4];
113 
114 	bus_space_tag_t		sc_notify_iot;
115 	bus_space_handle_t	sc_notify_ioh;
116 	bus_size_t		sc_notify_iosize;
117 	unsigned int		sc_notify_off_multiplier;
118 
119 	bus_space_tag_t		sc_devcfg_iot;
120 	bus_space_handle_t	sc_devcfg_ioh;
121 	bus_size_t		sc_devcfg_iosize;
122 	/*
123 	 * With 0.9, the offset of the devcfg region in the io bar changes
124 	 * depending on MSI-X being enabled or not.
125 	 * With 1.0, this field is still used to remember if MSI-X is enabled
126 	 * or not.
127 	 */
128 	unsigned int		sc_devcfg_offset;
129 
130 	bus_space_tag_t		sc_isr_iot;
131 	bus_space_handle_t	sc_isr_ioh;
132 	bus_size_t		sc_isr_iosize;
133 
134 	void			*sc_ih[MAX_MSIX_VECS];
135 
136 	enum irq_type		sc_irq_type;
137 };
138 
139 struct cfattach virtio_pci_ca = {
140 	sizeof(struct virtio_pci_softc),
141 	virtio_pci_match,
142 	virtio_pci_attach,
143 	virtio_pci_detach,
144 	NULL
145 };
146 
147 struct virtio_ops virtio_pci_ops = {
148 	virtio_pci_kick,
149 	virtio_pci_read_device_config_1,
150 	virtio_pci_read_device_config_2,
151 	virtio_pci_read_device_config_4,
152 	virtio_pci_read_device_config_8,
153 	virtio_pci_write_device_config_1,
154 	virtio_pci_write_device_config_2,
155 	virtio_pci_write_device_config_4,
156 	virtio_pci_write_device_config_8,
157 	virtio_pci_read_queue_size,
158 	virtio_pci_setup_queue,
159 	virtio_pci_set_status,
160 	virtio_pci_negotiate_features,
161 	virtio_pci_poll_intr,
162 };
163 
164 static inline
165 uint64_t _cread(struct virtio_pci_softc *sc, unsigned off, unsigned size)
166 {
167 	uint64_t val;
168 	switch (size) {
169 	case 1:
170 		val = bus_space_read_1(sc->sc_iot, sc->sc_ioh, off);
171 		break;
172 	case 2:
173 		val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, off);
174 		break;
175 	case 4:
176 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
177 		break;
178 	case 8:
179 		val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
180 		    off + sizeof(uint32_t));
181 		val <<= 32;
182 		val += bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
183 		break;
184 	}
185 	return val;
186 }
187 
188 #define CREAD(sc, memb)  _cread(sc, offsetof(struct virtio_pci_common_cfg, memb), \
189     sizeof(((struct virtio_pci_common_cfg *)0)->memb))
190 
191 #define CWRITE(sc, memb, val)							\
192 	do {									\
193 		struct virtio_pci_common_cfg c;					\
194 		size_t off = offsetof(struct virtio_pci_common_cfg, memb);	\
195 		size_t size = sizeof(c.memb);					\
196 										\
197 		DNPRINTF(2, "%s: %d: off %#zx size %#zx write %#llx\n",		\
198 		    __func__, __LINE__, off, size, (unsigned long long)val);	\
199 		switch (size) {							\
200 		case 1:								\
201 			bus_space_write_1(sc->sc_iot, sc->sc_ioh, off, val);	\
202 			break;							\
203 		case 2:								\
204 			bus_space_write_2(sc->sc_iot, sc->sc_ioh, off, val);	\
205 			break;							\
206 		case 4:								\
207 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);	\
208 			break;							\
209 		case 8:								\
210 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, off,		\
211 			    (val) & 0xffffffff);				\
212 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,		\
213 			    (off) + sizeof(uint32_t), (uint64_t)(val) >> 32);	\
214 			break;							\
215 		}								\
216 	} while (0)
217 
218 uint16_t
219 virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
220 {
221 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
222 	uint16_t ret;
223 	if (sc->sc_sc.sc_version_1) {
224 		CWRITE(sc, queue_select, idx);
225 		ret = CREAD(sc, queue_size);
226 	} else {
227 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
228 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
229 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
230 		    VIRTIO_CONFIG_QUEUE_SIZE);
231 	}
232 	return ret;
233 }
234 
235 void
236 virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
237     uint64_t addr)
238 {
239 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
240 	if (sc->sc_sc.sc_version_1) {
241 		CWRITE(sc, queue_select, vq->vq_index);
242 		if (addr == 0) {
243 			CWRITE(sc, queue_enable, 0);
244 			CWRITE(sc, queue_desc, 0);
245 			CWRITE(sc, queue_avail, 0);
246 			CWRITE(sc, queue_used, 0);
247 		} else {
248 			CWRITE(sc, queue_desc, addr);
249 			CWRITE(sc, queue_avail, addr + vq->vq_availoffset);
250 			CWRITE(sc, queue_used, addr + vq->vq_usedoffset);
251 			CWRITE(sc, queue_enable, 1);
252 			vq->vq_notify_off = CREAD(sc, queue_notify_off);
253 		}
254 	} else {
255 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
256 		    VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
257 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
258 		    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
259 	}
260 
261 	/*
262 	 * This path is only executed if this function is called after
263 	 * the child's attach function has finished. In other cases,
264 	 * it's done in virtio_pci_setup_msix().
265 	 */
266 	if (sc->sc_irq_type != IRQ_NO_MSIX) {
267 		int vec = 1;
268 		if (sc->sc_irq_type == IRQ_MSIX_PER_VQ)
269 		       vec += vq->vq_index;
270 		if (sc->sc_sc.sc_version_1) {
271 			CWRITE(sc, queue_msix_vector, vec);
272 		} else {
273 			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
274 			    VIRTIO_MSI_QUEUE_VECTOR, vec);
275 		}
276 	}
277 }
278 
279 void
280 virtio_pci_set_status(struct virtio_softc *vsc, int status)
281 {
282 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
283 	int old = 0;
284 
285 	if (sc->sc_sc.sc_version_1) {
286 		if (status != 0)
287 			old = CREAD(sc, device_status);
288 		CWRITE(sc, device_status, status|old);
289 	} else {
290 		if (status != 0)
291 			old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
292 			    VIRTIO_CONFIG_DEVICE_STATUS);
293 		bus_space_write_1(sc->sc_iot, sc->sc_ioh,
294 		    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
295 	}
296 }
297 
298 int
299 virtio_pci_match(struct device *parent, void *match, void *aux)
300 {
301 	struct pci_attach_args *pa;
302 
303 	pa = (struct pci_attach_args *)aux;
304 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
305 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
306 		return 1;
307 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_QUMRANET)
308 		return 0;
309 	/* virtio 0.9 */
310 	if (PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
311 	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
312 	    PCI_REVISION(pa->pa_class) == 0)
313 		return 1;
314 	/* virtio 1.0 */
315 	if (PCI_PRODUCT(pa->pa_id) >= 0x1040 &&
316 	    PCI_PRODUCT(pa->pa_id) <= 0x107f &&
317 	    PCI_REVISION(pa->pa_class) == 1)
318 		return 1;
319 	return 0;
320 }
321 
322 #if VIRTIO_DEBUG
323 void
324 virtio_pci_dump_caps(struct virtio_pci_softc *sc)
325 {
326 	pci_chipset_tag_t pc = sc->sc_pc;
327 	pcitag_t tag = sc->sc_ptag;
328 	int offset;
329 	union {
330 		pcireg_t reg[4];
331 		struct virtio_pci_cap vcap;
332 	} v;
333 
334 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v.reg[0]))
335 		return;
336 
337 	printf("\n");
338 	do {
339 		for (int i = 0; i < 4; i++)
340 			v.reg[i] = pci_conf_read(pc, tag, offset + i * 4);
341 		printf("%s: cfgoff %#x len %#x type %#x bar %#x: off %#x len %#x\n",
342 			__func__, offset, v.vcap.cap_len, v.vcap.cfg_type, v.vcap.bar,
343 			v.vcap.offset, v.vcap.length);
344 		offset = v.vcap.cap_next;
345 	} while (offset != 0);
346 }
347 #endif
348 
349 int
350 virtio_pci_find_cap(struct virtio_pci_softc *sc, int cfg_type, void *buf, int buflen)
351 {
352 	pci_chipset_tag_t pc = sc->sc_pc;
353 	pcitag_t tag = sc->sc_ptag;
354 	unsigned int offset, i, len;
355 	union {
356 		pcireg_t reg[8];
357 		struct virtio_pci_cap vcap;
358 	} *v = buf;
359 
360 	if (buflen < sizeof(struct virtio_pci_cap))
361 		return ERANGE;
362 
363 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
364 		return ENOENT;
365 
366 	do {
367 		for (i = 0; i < 4; i++)
368 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
369 		if (v->vcap.cfg_type == cfg_type)
370 			break;
371 		offset = v->vcap.cap_next;
372 	} while (offset != 0);
373 
374 	if (offset == 0)
375 		return ENOENT;
376 
377 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
378 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
379 		if (len > buflen) {
380 			printf("%s: cap too large\n", __func__);
381 			return ERANGE;
382 		}
383 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
384 			v->reg[i] = pci_conf_read(pc, tag, offset + i * 4);
385 	}
386 
387 	return 0;
388 }
389 
390 
391 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
392 				sizeof(pcireg_t))
393 
394 int
395 virtio_pci_attach_10(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
396 {
397 	struct virtio_pci_cap common, isr, device;
398 	struct virtio_pci_notify_cap notify;
399 	int have_device_cfg = 0;
400 	bus_size_t bars[NMAPREG] = { 0 };
401 	int bars_idx[NMAPREG] = { 0 };
402 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
403 	int i, j = 0, ret = 0;
404 
405 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_COMMON_CFG, &common, sizeof(common)) != 0)
406 		return ENODEV;
407 
408 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, &notify, sizeof(notify)) != 0)
409 		return ENODEV;
410 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_ISR_CFG, &isr, sizeof(isr)) != 0)
411 		return ENODEV;
412 	if (virtio_pci_find_cap(sc, VIRTIO_PCI_CAP_DEVICE_CFG, &device, sizeof(device)) != 0)
413 		memset(&device, 0, sizeof(device));
414 	else
415 		have_device_cfg = 1;
416 
417 	/*
418 	 * XXX Maybe there are devices that offer the pci caps but not the
419 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
420 	 * XXX here and fall back to 0.9 out if not present.
421 	 */
422 
423 	/* Figure out which bars we need to map */
424 	for (i = 0; i < nitems(caps); i++) {
425 		int bar = caps[i]->bar;
426 		bus_size_t len = caps[i]->offset + caps[i]->length;
427 		if (caps[i]->length == 0)
428 			continue;
429 		if (bars[bar] < len)
430 			bars[bar] = len;
431 	}
432 
433 	for (i = 0; i < nitems(bars); i++) {
434 		int reg;
435 		pcireg_t type;
436 		if (bars[i] == 0)
437 			continue;
438 		reg = PCI_MAPREG_START + i * 4;
439 		type = pci_mapreg_type(sc->sc_pc, sc->sc_ptag, reg);
440 		if (pci_mapreg_map(pa, reg, type, 0, &sc->sc_bars_iot[j],
441 		    &sc->sc_bars_ioh[j], NULL, &sc->sc_bars_iosize[j],
442 		    bars[i])) {
443 			printf("%s: can't map bar %u \n",
444 			    sc->sc_sc.sc_dev.dv_xname, i);
445 			ret = EIO;
446 			goto err;
447 		}
448 		bars_idx[i] = j;
449 		j++;
450 	}
451 
452 	i = bars_idx[notify.cap.bar];
453 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
454 	    notify.cap.offset, notify.cap.length, &sc->sc_notify_ioh) != 0) {
455 		printf("%s: can't map notify i/o space\n",
456 		    sc->sc_sc.sc_dev.dv_xname);
457 		ret = EIO;
458 		goto err;
459 	}
460 	sc->sc_notify_iosize = notify.cap.length;
461 	sc->sc_notify_iot = sc->sc_bars_iot[i];
462 	sc->sc_notify_off_multiplier = notify.notify_off_multiplier;
463 
464 	if (have_device_cfg) {
465 		i = bars_idx[device.bar];
466 		if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
467 		    device.offset, device.length, &sc->sc_devcfg_ioh) != 0) {
468 			printf("%s: can't map devcfg i/o space\n",
469 			    sc->sc_sc.sc_dev.dv_xname);
470 			ret = EIO;
471 			goto err;
472 		}
473 		sc->sc_devcfg_iosize = device.length;
474 		sc->sc_devcfg_iot = sc->sc_bars_iot[i];
475 	}
476 
477 	i = bars_idx[isr.bar];
478 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
479 	    isr.offset, isr.length, &sc->sc_isr_ioh) != 0) {
480 		printf("%s: can't map isr i/o space\n",
481 		    sc->sc_sc.sc_dev.dv_xname);
482 		ret = EIO;
483 		goto err;
484 	}
485 	sc->sc_isr_iosize = isr.length;
486 	sc->sc_isr_iot = sc->sc_bars_iot[i];
487 
488 	i = bars_idx[common.bar];
489 	if (bus_space_subregion(sc->sc_bars_iot[i], sc->sc_bars_ioh[i],
490 	    common.offset, common.length, &sc->sc_ioh) != 0) {
491 		printf("%s: can't map common i/o space\n",
492 		    sc->sc_sc.sc_dev.dv_xname);
493 		ret = EIO;
494 		goto err;
495 	}
496 	sc->sc_iosize = common.length;
497 	sc->sc_iot = sc->sc_bars_iot[i];
498 
499 	sc->sc_sc.sc_version_1 = 1;
500 	return 0;
501 
502 err:
503 	/* there is no pci_mapreg_unmap() */
504 	return ret;
505 }
506 
507 int
508 virtio_pci_attach_09(struct virtio_pci_softc *sc, struct pci_attach_args *pa)
509 {
510 	struct virtio_softc *vsc = &sc->sc_sc;
511 	pcireg_t type;
512 
513 	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
514 	if (pci_mapreg_map(pa, PCI_MAPREG_START, type, 0,
515 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
516 		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
517 		return EIO;
518 	}
519 
520 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
521 	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
522 		printf("%s: can't map notify i/o space\n",
523 		    vsc->sc_dev.dv_xname);
524 		return EIO;
525 	}
526 	sc->sc_notify_iosize = 2;
527 	sc->sc_notify_iot = sc->sc_iot;
528 
529 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
530 	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
531 		printf("%s: can't map isr i/o space\n",
532 		    vsc->sc_dev.dv_xname);
533 		return EIO;
534 	}
535 	sc->sc_isr_iosize = 1;
536 	sc->sc_isr_iot = sc->sc_iot;
537 
538 	return 0;
539 }
540 
541 void
542 virtio_pci_attach(struct device *parent, struct device *self, void *aux)
543 {
544 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
545 	struct virtio_softc *vsc = &sc->sc_sc;
546 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
547 	pci_chipset_tag_t pc = pa->pa_pc;
548 	pcitag_t tag = pa->pa_tag;
549 	int revision, ret = ENODEV;
550 	pcireg_t id;
551 	char const *intrstr;
552 	pci_intr_handle_t ih;
553 
554 	revision = PCI_REVISION(pa->pa_class);
555 	switch (revision) {
556 	case 0:
557 		/* subsystem ID shows what I am */
558 		id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
559 		break;
560 	case 1:
561 		id = PCI_PRODUCT(pa->pa_id) - 0x1040;
562 		break;
563 	default:
564 		printf("unknown revision 0x%02x; giving up\n", revision);
565 		return;
566 	}
567 
568 	sc->sc_pc = pc;
569 	sc->sc_ptag = pa->pa_tag;
570 	vsc->sc_dmat = pa->pa_dmat;
571 
572 #if defined(__i386__) || defined(__amd64__)
573 	/*
574 	 * For virtio, ignore normal MSI black/white-listing depending on the
575 	 * PCI bridge but enable it unconditionally.
576 	 */
577 	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
578 #endif
579 
580 #if VIRTIO_DEBUG
581 	virtio_pci_dump_caps(sc);
582 #endif
583 
584 	vsc->sc_ops = &virtio_pci_ops;
585 	if ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_VERSION_1) == 0 &&
586 	    (revision == 1 ||
587 	     (vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_PREFER_VERSION_1))) {
588 		ret = virtio_pci_attach_10(sc, pa);
589 	}
590 	if (ret != 0 && revision == 0) {
591 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
592 		ret = virtio_pci_attach_09(sc, pa);
593 	}
594 	if (ret != 0) {
595 		printf(": Cannot attach (%d)\n", ret);
596 		return;
597 	}
598 
599 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
600 	sc->sc_irq_type = IRQ_NO_MSIX;
601 	if (virtio_pci_adjust_config_region(sc) != 0)
602 		return;
603 
604 	virtio_device_reset(vsc);
605 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
606 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
607 
608 	printf("\n");
609 	vsc->sc_childdevid = id;
610 	vsc->sc_child = NULL;
611 	config_found(self, sc, NULL);
612 	if (vsc->sc_child == NULL) {
613 		printf("%s: no matching child driver; not configured\n",
614 		    vsc->sc_dev.dv_xname);
615 		goto fail_1;
616 	}
617 	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
618 		printf("%s: virtio configuration failed\n",
619 		    vsc->sc_dev.dv_xname);
620 		goto fail_1;
621 	}
622 
623 	if (virtio_pci_setup_msix(sc, pa, 0) == 0) {
624 		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
625 		intrstr = "msix per-VQ";
626 	} else if (virtio_pci_setup_msix(sc, pa, 1) == 0) {
627 		sc->sc_irq_type = IRQ_MSIX_SHARED;
628 		intrstr = "msix shared";
629 	} else {
630 		int (*ih_func)(void *) = virtio_pci_legacy_intr;
631 		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
632 			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
633 			goto fail_2;
634 		}
635 		intrstr = pci_intr_string(pc, ih);
636 		/*
637 		 * We always set the IPL_MPSAFE flag in order to do the relatively
638 		 * expensive ISR read without lock, and then grab the kernel lock in
639 		 * the interrupt handler.
640 		 */
641 		if (vsc->sc_ipl & IPL_MPSAFE)
642 			ih_func = virtio_pci_legacy_intr_mpsafe;
643 		sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE,
644 		    ih_func, sc, vsc->sc_dev.dv_xname);
645 		if (sc->sc_ih[0] == NULL) {
646 			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
647 			if (intrstr != NULL)
648 				printf(" at %s", intrstr);
649 			printf("\n");
650 			goto fail_2;
651 		}
652 	}
653 	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
654 
655 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
656 	return;
657 
658 fail_2:
659 	config_detach(vsc->sc_child, 0);
660 fail_1:
661 	/* no pci_mapreg_unmap() or pci_intr_unmap() */
662 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
663 }
664 
665 int
666 virtio_pci_detach(struct device *self, int flags)
667 {
668 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
669 	struct virtio_softc *vsc = &sc->sc_sc;
670 	int r;
671 
672 	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
673 		r = config_detach(vsc->sc_child, flags);
674 		if (r)
675 			return r;
676 	}
677 	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
678 	KASSERT(vsc->sc_vqs == 0);
679 	virtio_pci_free_irqs(sc);
680 	if (sc->sc_iosize)
681 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
682 	sc->sc_iosize = 0;
683 
684 	return 0;
685 }
686 
687 int
688 virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
689 {
690 	if (sc->sc_sc.sc_version_1)
691 		return 0;
692 	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
693 	sc->sc_devcfg_iot = sc->sc_iot;
694 	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
695 	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
696 		printf("%s: can't map config i/o space\n",
697 		    sc->sc_sc.sc_dev.dv_xname);
698 		return 1;
699 	}
700 	return 0;
701 }
702 
703 /*
704  * Feature negotiation.
705  * Prints available / negotiated features if guest_feature_names != NULL and
706  * VIRTIO_DEBUG is 1
707  */
708 int
709 virtio_pci_negotiate_features(struct virtio_softc *vsc,
710     const struct virtio_feature_name *guest_feature_names)
711 {
712 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
713 	uint64_t host, negotiated;
714 
715 	vsc->sc_active_features = 0;
716 
717 	/*
718 	 * We enable indirect descriptors by default. They can be switched
719 	 * off by setting bit 1 in the driver flags, see config(8)
720 	 */
721 	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
722 	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
723 		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
724 	} else if (guest_feature_names != NULL) {
725 		printf(" RingIndirectDesc disabled by UKC");
726 	}
727 
728 	/*
729 	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
730 	 * If it did, check if it is disabled by bit 2 in the driver flags.
731 	 */
732 	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
733 	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
734 	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
735 		if (guest_feature_names != NULL)
736 			printf(" RingEventIdx disabled by UKC");
737 		vsc->sc_driver_features &= ~VIRTIO_F_RING_EVENT_IDX;
738 	}
739 
740 	if (vsc->sc_version_1) {
741 		return virtio_pci_negotiate_features_10(vsc,
742 		    guest_feature_names);
743 	}
744 
745 	/* virtio 0.9 only */
746 	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
747 				VIRTIO_CONFIG_DEVICE_FEATURES);
748 	negotiated = host & vsc->sc_driver_features;
749 #if VIRTIO_DEBUG
750 	if (guest_feature_names)
751 		virtio_log_features(host, negotiated, guest_feature_names);
752 #endif
753 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
754 			  VIRTIO_CONFIG_GUEST_FEATURES, negotiated);
755 	vsc->sc_active_features = negotiated;
756 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
757 		vsc->sc_indirect = 1;
758 	else
759 		vsc->sc_indirect = 0;
760 	return 0;
761 }
762 
763 int
764 virtio_pci_negotiate_features_10(struct virtio_softc *vsc,
765     const struct virtio_feature_name *guest_feature_names)
766 {
767 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
768 	uint64_t host, negotiated;
769 
770 	vsc->sc_driver_features |= VIRTIO_F_VERSION_1;
771 	/* notify on empty is 0.9 only */
772 	vsc->sc_driver_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
773 	CWRITE(sc, device_feature_select, 0);
774 	host = CREAD(sc, device_feature);
775 	CWRITE(sc, device_feature_select, 1);
776 	host |= (uint64_t)CREAD(sc, device_feature) << 32;
777 
778 	negotiated = host & vsc->sc_driver_features;
779 #if VIRTIO_DEBUG
780 	if (guest_feature_names)
781 		virtio_log_features(host, negotiated, guest_feature_names);
782 #endif
783 	CWRITE(sc, driver_feature_select, 0);
784 	CWRITE(sc, driver_feature, negotiated & 0xffffffff);
785 	CWRITE(sc, driver_feature_select, 1);
786 	CWRITE(sc, driver_feature, negotiated >> 32);
787 	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
788 
789 	if ((CREAD(sc, device_status) &
790 	    VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
791 		printf("%s: Feature negotiation failed\n",
792 		    vsc->sc_dev.dv_xname);
793 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
794 		return ENXIO;
795 	}
796 	vsc->sc_active_features = negotiated;
797 
798 	if (negotiated & VIRTIO_F_RING_INDIRECT_DESC)
799 		vsc->sc_indirect = 1;
800 	else
801 		vsc->sc_indirect = 0;
802 
803 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
804 #if VIRTIO_DEBUG
805 		printf("%s: Host rejected Version_1\n", __func__);
806 #endif
807 		CWRITE(sc, device_status, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
808 		return EINVAL;
809 	}
810 	return 0;
811 }
812 
813 /*
814  * Device configuration registers.
815  */
816 uint8_t
817 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
818 {
819 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
820 	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
821 }
822 
823 uint16_t
824 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
825 {
826 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
827 	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
828 }
829 
830 uint32_t
831 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
832 {
833 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
834 	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
835 }
836 
837 uint64_t
838 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
839 {
840 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
841 	uint64_t r;
842 
843 	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
844 	    index + sizeof(uint32_t));
845 	r <<= 32;
846 	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
847 	return r;
848 }
849 
850 void
851 virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
852     uint8_t value)
853 {
854 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
855 	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
856 }
857 
858 void
859 virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
860     uint16_t value)
861 {
862 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
863 	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
864 }
865 
866 void
867 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
868 			     int index, uint32_t value)
869 {
870 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
871 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
872 }
873 
874 void
875 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
876 			     int index, uint64_t value)
877 {
878 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
879 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
880 	    index, value & 0xffffffff);
881 	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
882 	    index + sizeof(uint32_t), value >> 32);
883 }
884 
885 int
886 virtio_pci_msix_establish(struct virtio_pci_softc *sc,
887     struct pci_attach_args *pa, int idx, int (*handler)(void *), void *ih_arg)
888 {
889 	struct virtio_softc *vsc = &sc->sc_sc;
890 	pci_intr_handle_t ih;
891 
892 	if (pci_intr_map_msix(pa, idx, &ih) != 0) {
893 #if VIRTIO_DEBUG
894 		printf("%s[%d]: pci_intr_map_msix failed\n",
895 		    vsc->sc_dev.dv_xname, idx);
896 #endif
897 		return 1;
898 	}
899 	sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
900 	    handler, ih_arg, vsc->sc_dev.dv_xname);
901 	if (sc->sc_ih[idx] == NULL) {
902 		printf("%s[%d]: couldn't establish msix interrupt\n",
903 		    vsc->sc_dev.dv_xname, idx);
904 		return 1;
905 	}
906 	return 0;
907 }
908 
909 void
910 virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
911 {
912 	if (sc->sc_sc.sc_version_1) {
913 		CWRITE(sc, queue_select, idx);
914 		CWRITE(sc, queue_msix_vector, vector);
915 	} else {
916 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
917 		    VIRTIO_CONFIG_QUEUE_SELECT, idx);
918 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
919 		    VIRTIO_MSI_QUEUE_VECTOR, vector);
920 	}
921 }
922 
923 void
924 virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
925 {
926 	if (sc->sc_sc.sc_version_1) {
927 		CWRITE(sc, config_msix_vector, vector);
928 	} else {
929 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
930 		    VIRTIO_MSI_CONFIG_VECTOR, vector);
931 	}
932 }
933 
934 
935 void
936 virtio_pci_free_irqs(struct virtio_pci_softc *sc)
937 {
938 	struct virtio_softc *vsc = &sc->sc_sc;
939 	int i;
940 
941 	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
942 		for (i = 0; i < vsc->sc_nvqs; i++) {
943 			virtio_pci_set_msix_queue_vector(sc, i,
944 			    VIRTIO_MSI_NO_VECTOR);
945 		}
946 	}
947 
948 	for (i = 0; i < MAX_MSIX_VECS; i++) {
949 		if (sc->sc_ih[i]) {
950 			pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]);
951 			sc->sc_ih[i] = NULL;
952 		}
953 	}
954 
955 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
956 	virtio_pci_adjust_config_region(sc);
957 }
958 
959 int
960 virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa,
961     int shared)
962 {
963 	struct virtio_softc *vsc = &sc->sc_sc;
964 	int i;
965 
966 	if (virtio_pci_msix_establish(sc, pa, 0, virtio_pci_config_intr, vsc))
967 		return 1;
968 	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
969 	virtio_pci_adjust_config_region(sc);
970 	virtio_pci_set_msix_config_vector(sc, 0);
971 
972 	if (shared) {
973 		if (virtio_pci_msix_establish(sc, pa, 1,
974 		    virtio_pci_shared_queue_intr, vsc)) {
975 			goto fail;
976 		}
977 
978 		for (i = 0; i < vsc->sc_nvqs; i++)
979 			virtio_pci_set_msix_queue_vector(sc, i, 1);
980 	} else {
981 		for (i = 0; i <= vsc->sc_nvqs; i++) {
982 			if (virtio_pci_msix_establish(sc, pa, i + 1,
983 			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
984 				goto fail;
985 			}
986 			virtio_pci_set_msix_queue_vector(sc, i, i + 1);
987 		}
988 	}
989 
990 	return 0;
991 fail:
992 	virtio_pci_free_irqs(sc);
993 	return 1;
994 }
995 
996 /*
997  * Interrupt handler.
998  */
999 
1000 /*
1001  * Only used without MSI-X
1002  */
1003 int
1004 virtio_pci_legacy_intr(void *arg)
1005 {
1006 	struct virtio_pci_softc *sc = arg;
1007 	struct virtio_softc *vsc = &sc->sc_sc;
1008 	int isr, r = 0;
1009 
1010 	/* check and ack the interrupt */
1011 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1012 	if (isr == 0)
1013 		return 0;
1014 	KERNEL_LOCK();
1015 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1016 	    (vsc->sc_config_change != NULL)) {
1017 		r = (vsc->sc_config_change)(vsc);
1018 	}
1019 	r |= virtio_check_vqs(vsc);
1020 	KERNEL_UNLOCK();
1021 
1022 	return r;
1023 }
1024 
1025 int
1026 virtio_pci_legacy_intr_mpsafe(void *arg)
1027 {
1028 	struct virtio_pci_softc *sc = arg;
1029 	struct virtio_softc *vsc = &sc->sc_sc;
1030 	int isr, r = 0;
1031 
1032 	/* check and ack the interrupt */
1033 	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
1034 	if (isr == 0)
1035 		return 0;
1036 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1037 	    (vsc->sc_config_change != NULL)) {
1038 		r = (vsc->sc_config_change)(vsc);
1039 	}
1040 	r |= virtio_check_vqs(vsc);
1041 	return r;
1042 }
1043 
1044 /*
1045  * Only used with MSI-X
1046  */
1047 int
1048 virtio_pci_config_intr(void *arg)
1049 {
1050 	struct virtio_softc *vsc = arg;
1051 
1052 	if (vsc->sc_config_change != NULL)
1053 		return vsc->sc_config_change(vsc);
1054 	return 0;
1055 }
1056 
1057 /*
1058  * Only used with MSI-X
1059  */
1060 int
1061 virtio_pci_queue_intr(void *arg)
1062 {
1063 	struct virtqueue *vq = arg;
1064 
1065 	if (vq->vq_done)
1066 		return (vq->vq_done)(vq);
1067 	return 0;
1068 }
1069 
1070 int
1071 virtio_pci_shared_queue_intr(void *arg)
1072 {
1073 	struct virtio_softc *vsc = arg;
1074 
1075 	return virtio_check_vqs(vsc);
1076 }
1077 
1078 /*
1079  * Interrupt handler to be used when polling.
1080  * We cannot use isr here because it is not defined in MSI-X mode.
1081  */
1082 int
1083 virtio_pci_poll_intr(void *arg)
1084 {
1085 	struct virtio_pci_softc *sc = arg;
1086 	struct virtio_softc *vsc = &sc->sc_sc;
1087 	int r = 0;
1088 
1089 	if (vsc->sc_config_change != NULL)
1090 		r = (vsc->sc_config_change)(vsc);
1091 
1092 	r |= virtio_check_vqs(vsc);
1093 
1094 	return r;
1095 }
1096 
1097 void
1098 virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
1099 {
1100 	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
1101 	unsigned offset = 0;
1102 	if (vsc->sc_version_1) {
1103 		offset = vsc->sc_vqs[idx].vq_notify_off *
1104 		    sc->sc_notify_off_multiplier;
1105 	}
1106 	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, offset, idx);
1107 }
1108