xref: /netbsd-src/sys/dev/pci/if_vioif.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: if_vioif.c,v 1.41 2018/06/26 06:48:01 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 Minoura Makoto.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.41 2018/06/26 06:48:01 msaitoh Exp $");
30 
31 #ifdef _KERNEL_OPT
32 #include "opt_net_mpsafe.h"
33 #endif
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/condvar.h>
40 #include <sys/device.h>
41 #include <sys/intr.h>
42 #include <sys/kmem.h>
43 #include <sys/mbuf.h>
44 #include <sys/mutex.h>
45 #include <sys/sockio.h>
46 #include <sys/cpu.h>
47 #include <sys/module.h>
48 
49 #include <dev/pci/virtioreg.h>
50 #include <dev/pci/virtiovar.h>
51 
52 #include <net/if.h>
53 #include <net/if_media.h>
54 #include <net/if_ether.h>
55 
56 #include <net/bpf.h>
57 
58 #include "ioconf.h"
59 
60 #ifdef NET_MPSAFE
61 #define VIOIF_MPSAFE	1
62 #endif
63 
64 #ifdef SOFTINT_INTR
65 #define VIOIF_SOFTINT_INTR	1
66 #endif
67 
68 /*
69  * if_vioifreg.h:
70  */
71 /* Configuration registers */
72 #define VIRTIO_NET_CONFIG_MAC		0 /* 8bit x 6byte */
73 #define VIRTIO_NET_CONFIG_STATUS	6 /* 16bit */
74 
75 /* Feature bits */
76 #define VIRTIO_NET_F_CSUM	(1<<0)
77 #define VIRTIO_NET_F_GUEST_CSUM	(1<<1)
78 #define VIRTIO_NET_F_MAC	(1<<5)
79 #define VIRTIO_NET_F_GSO	(1<<6)
80 #define VIRTIO_NET_F_GUEST_TSO4	(1<<7)
81 #define VIRTIO_NET_F_GUEST_TSO6	(1<<8)
82 #define VIRTIO_NET_F_GUEST_ECN	(1<<9)
83 #define VIRTIO_NET_F_GUEST_UFO	(1<<10)
84 #define VIRTIO_NET_F_HOST_TSO4	(1<<11)
85 #define VIRTIO_NET_F_HOST_TSO6	(1<<12)
86 #define VIRTIO_NET_F_HOST_ECN	(1<<13)
87 #define VIRTIO_NET_F_HOST_UFO	(1<<14)
88 #define VIRTIO_NET_F_MRG_RXBUF	(1<<15)
89 #define VIRTIO_NET_F_STATUS	(1<<16)
90 #define VIRTIO_NET_F_CTRL_VQ	(1<<17)
91 #define VIRTIO_NET_F_CTRL_RX	(1<<18)
92 #define VIRTIO_NET_F_CTRL_VLAN	(1<<19)
93 
94 #define VIRTIO_NET_FLAG_BITS \
95 	VIRTIO_COMMON_FLAG_BITS \
96 	"\x14""CTRL_VLAN" \
97 	"\x13""CTRL_RX" \
98 	"\x12""CTRL_VQ" \
99 	"\x11""STATUS" \
100 	"\x10""MRG_RXBUF" \
101 	"\x0f""HOST_UFO" \
102 	"\x0e""HOST_ECN" \
103 	"\x0d""HOST_TSO6" \
104 	"\x0c""HOST_TSO4" \
105 	"\x0b""GUEST_UFO" \
106 	"\x0a""GUEST_ECN" \
107 	"\x09""GUEST_TSO6" \
108 	"\x08""GUEST_TSO4" \
109 	"\x07""GSO" \
110 	"\x06""MAC" \
111 	"\x02""GUEST_CSUM" \
112 	"\x01""CSUM"
113 
114 /* Status */
115 #define VIRTIO_NET_S_LINK_UP	1
116 
117 /* Packet header structure */
118 struct virtio_net_hdr {
119 	uint8_t		flags;
120 	uint8_t		gso_type;
121 	uint16_t	hdr_len;
122 	uint16_t	gso_size;
123 	uint16_t	csum_start;
124 	uint16_t	csum_offset;
125 #if 0
126 	uint16_t	num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
127 #endif
128 } __packed;
129 
130 #define VIRTIO_NET_HDR_F_NEEDS_CSUM	1 /* flags */
131 #define VIRTIO_NET_HDR_GSO_NONE		0 /* gso_type */
132 #define VIRTIO_NET_HDR_GSO_TCPV4	1 /* gso_type */
133 #define VIRTIO_NET_HDR_GSO_UDP		3 /* gso_type */
134 #define VIRTIO_NET_HDR_GSO_TCPV6	4 /* gso_type */
135 #define VIRTIO_NET_HDR_GSO_ECN		0x80 /* gso_type, |'ed */
136 
137 #define VIRTIO_NET_MAX_GSO_LEN		(65536+ETHER_HDR_LEN)
138 
139 /* Control virtqueue */
140 struct virtio_net_ctrl_cmd {
141 	uint8_t	class;
142 	uint8_t	command;
143 } __packed;
144 #define VIRTIO_NET_CTRL_RX		0
145 # define VIRTIO_NET_CTRL_RX_PROMISC	0
146 # define VIRTIO_NET_CTRL_RX_ALLMULTI	1
147 
148 #define VIRTIO_NET_CTRL_MAC		1
149 # define VIRTIO_NET_CTRL_MAC_TABLE_SET	0
150 
151 #define VIRTIO_NET_CTRL_VLAN		2
152 # define VIRTIO_NET_CTRL_VLAN_ADD	0
153 # define VIRTIO_NET_CTRL_VLAN_DEL	1
154 
155 struct virtio_net_ctrl_status {
156 	uint8_t	ack;
157 } __packed;
158 #define VIRTIO_NET_OK			0
159 #define VIRTIO_NET_ERR			1
160 
161 struct virtio_net_ctrl_rx {
162 	uint8_t	onoff;
163 } __packed;
164 
165 struct virtio_net_ctrl_mac_tbl {
166 	uint32_t nentries;
167 	uint8_t macs[][ETHER_ADDR_LEN];
168 } __packed;
169 
170 struct virtio_net_ctrl_vlan {
171 	uint16_t id;
172 } __packed;
173 
174 
175 /*
176  * if_vioifvar.h:
177  */
178 struct vioif_softc {
179 	device_t		sc_dev;
180 
181 	struct virtio_softc	*sc_virtio;
182 	struct virtqueue	sc_vq[3];
183 #define VQ_RX	0
184 #define VQ_TX	1
185 #define VQ_CTRL	2
186 
187 	uint8_t			sc_mac[ETHER_ADDR_LEN];
188 	struct ethercom		sc_ethercom;
189 	short			sc_deferred_init_done;
190 	bool			sc_link_active;
191 
192 	/* bus_dmamem */
193 	bus_dma_segment_t	sc_hdr_segs[1];
194 	struct virtio_net_hdr	*sc_hdrs;
195 #define sc_rx_hdrs	sc_hdrs
196 	struct virtio_net_hdr	*sc_tx_hdrs;
197 	struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
198 	struct virtio_net_ctrl_status *sc_ctrl_status;
199 	struct virtio_net_ctrl_rx *sc_ctrl_rx;
200 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
201 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;
202 
203 	/* kmem */
204 	bus_dmamap_t		*sc_arrays;
205 #define sc_rxhdr_dmamaps sc_arrays
206 	bus_dmamap_t		*sc_txhdr_dmamaps;
207 	bus_dmamap_t		*sc_rx_dmamaps;
208 	bus_dmamap_t		*sc_tx_dmamaps;
209 	struct mbuf		**sc_rx_mbufs;
210 	struct mbuf		**sc_tx_mbufs;
211 
212 	bus_dmamap_t		sc_ctrl_cmd_dmamap;
213 	bus_dmamap_t		sc_ctrl_status_dmamap;
214 	bus_dmamap_t		sc_ctrl_rx_dmamap;
215 	bus_dmamap_t		sc_ctrl_tbl_uc_dmamap;
216 	bus_dmamap_t		sc_ctrl_tbl_mc_dmamap;
217 
218 	void			*sc_rx_softint;
219 	void			*sc_ctl_softint;
220 
221 	enum {
222 		FREE, INUSE, DONE
223 	}			sc_ctrl_inuse;
224 	kcondvar_t		sc_ctrl_wait;
225 	kmutex_t		sc_ctrl_wait_lock;
226 	kmutex_t		sc_tx_lock;
227 	kmutex_t		sc_rx_lock;
228 	bool			sc_stopping;
229 
230 	bool			sc_has_ctrl;
231 };
232 #define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
233 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */
234 
235 #define VIOIF_TX_LOCK(_sc)	mutex_enter(&(_sc)->sc_tx_lock)
236 #define VIOIF_TX_UNLOCK(_sc)	mutex_exit(&(_sc)->sc_tx_lock)
237 #define VIOIF_TX_LOCKED(_sc)	mutex_owned(&(_sc)->sc_tx_lock)
238 #define VIOIF_RX_LOCK(_sc)	mutex_enter(&(_sc)->sc_rx_lock)
239 #define VIOIF_RX_UNLOCK(_sc)	mutex_exit(&(_sc)->sc_rx_lock)
240 #define VIOIF_RX_LOCKED(_sc)	mutex_owned(&(_sc)->sc_rx_lock)
241 
242 /* cfattach interface functions */
243 static int	vioif_match(device_t, cfdata_t, void *);
244 static void	vioif_attach(device_t, device_t, void *);
245 static void	vioif_deferred_init(device_t);
246 
247 /* ifnet interface functions */
248 static int	vioif_init(struct ifnet *);
249 static void	vioif_stop(struct ifnet *, int);
250 static void	vioif_start(struct ifnet *);
251 static int	vioif_ioctl(struct ifnet *, u_long, void *);
252 static void	vioif_watchdog(struct ifnet *);
253 
254 /* rx */
255 static int	vioif_add_rx_mbuf(struct vioif_softc *, int);
256 static void	vioif_free_rx_mbuf(struct vioif_softc *, int);
257 static void	vioif_populate_rx_mbufs(struct vioif_softc *);
258 static void	vioif_populate_rx_mbufs_locked(struct vioif_softc *);
259 static int	vioif_rx_deq(struct vioif_softc *);
260 static int	vioif_rx_deq_locked(struct vioif_softc *);
261 static int	vioif_rx_vq_done(struct virtqueue *);
262 static void	vioif_rx_softint(void *);
263 static void	vioif_rx_drain(struct vioif_softc *);
264 
265 /* tx */
266 static int	vioif_tx_vq_done(struct virtqueue *);
267 static int	vioif_tx_vq_done_locked(struct virtqueue *);
268 static void	vioif_tx_drain(struct vioif_softc *);
269 
270 /* other control */
271 static bool	vioif_is_link_up(struct vioif_softc *);
272 static void	vioif_update_link_status(struct vioif_softc *);
273 static int	vioif_ctrl_rx(struct vioif_softc *, int, bool);
274 static int	vioif_set_promisc(struct vioif_softc *, bool);
275 static int	vioif_set_allmulti(struct vioif_softc *, bool);
276 static int	vioif_set_rx_filter(struct vioif_softc *);
277 static int	vioif_rx_filter(struct vioif_softc *);
278 static int	vioif_ctrl_vq_done(struct virtqueue *);
279 static int	vioif_config_change(struct virtio_softc *);
280 static void	vioif_ctl_softint(void *);
281 
282 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
283 		  vioif_match, vioif_attach, NULL, NULL);
284 
285 static int
286 vioif_match(device_t parent, cfdata_t match, void *aux)
287 {
288 	struct virtio_attach_args *va = aux;
289 
290 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
291 		return 1;
292 
293 	return 0;
294 }
295 
296 /* allocate memory */
297 /*
298  * dma memory is used for:
299  *   sc_rx_hdrs[slot]:	 metadata array for received frames (READ)
300  *   sc_tx_hdrs[slot]:	 metadata array for frames to be sent (WRITE)
301  *   sc_ctrl_cmd:	 command to be sent via ctrl vq (WRITE)
302  *   sc_ctrl_status:	 return value for a command via ctrl vq (READ)
303  *   sc_ctrl_rx:	 parameter for a VIRTIO_NET_CTRL_RX class command
304  *			 (WRITE)
305  *   sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
306  *			 class command (WRITE)
307  *   sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
308  *			 class command (WRITE)
309  * sc_ctrl_* structures are allocated only one each; they are protected by
310  * sc_ctrl_inuse variable and sc_ctrl_wait condvar.
311  */
312 /*
313  * dynamically allocated memory is used for:
314  *   sc_rxhdr_dmamaps[slot]:	bus_dmamap_t array for sc_rx_hdrs[slot]
315  *   sc_txhdr_dmamaps[slot]:	bus_dmamap_t array for sc_tx_hdrs[slot]
316  *   sc_rx_dmamaps[slot]:	bus_dmamap_t array for received payload
317  *   sc_tx_dmamaps[slot]:	bus_dmamap_t array for sent payload
318  *   sc_rx_mbufs[slot]:		mbuf pointer array for received frames
319  *   sc_tx_mbufs[slot]:		mbuf pointer array for sent frames
320  */
321 static int
322 vioif_alloc_mems(struct vioif_softc *sc)
323 {
324 	struct virtio_softc *vsc = sc->sc_virtio;
325 	int allocsize, allocsize2, r, rsegs, i;
326 	void *vaddr;
327 	intptr_t p;
328 	int rxqsize, txqsize;
329 
330 	rxqsize = sc->sc_vq[VQ_RX].vq_num;
331 	txqsize = sc->sc_vq[VQ_TX].vq_num;
332 
333 	allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
334 	allocsize += sizeof(struct virtio_net_hdr) * txqsize;
335 	if (sc->sc_has_ctrl) {
336 		allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
337 		allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
338 		allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
339 		allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
340 			+ sizeof(struct virtio_net_ctrl_mac_tbl)
341 			+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
342 	}
343 	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
344 			     &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
345 	if (r != 0) {
346 		aprint_error_dev(sc->sc_dev,
347 				 "DMA memory allocation failed, size %d, "
348 				 "error code %d\n", allocsize, r);
349 		goto err_none;
350 	}
351 	r = bus_dmamem_map(virtio_dmat(vsc),
352 			   &sc->sc_hdr_segs[0], 1, allocsize,
353 			   &vaddr, BUS_DMA_NOWAIT);
354 	if (r != 0) {
355 		aprint_error_dev(sc->sc_dev,
356 				 "DMA memory map failed, "
357 				 "error code %d\n", r);
358 		goto err_dmamem_alloc;
359 	}
360 	sc->sc_hdrs = vaddr;
361 	memset(vaddr, 0, allocsize);
362 	p = (intptr_t) vaddr;
363 	p += sizeof(struct virtio_net_hdr) * rxqsize;
364 #define P(name,size)	do { sc->sc_ ##name = (void*) p;	\
365 			     p += size; } while (0)
366 	P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize);
367 	if (sc->sc_has_ctrl) {
368 		P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd));
369 		P(ctrl_status, sizeof(struct virtio_net_ctrl_status));
370 		P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx));
371 		P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl));
372 		P(ctrl_mac_tbl_mc,
373 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
374 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
375 	}
376 #undef P
377 
378 	allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize);
379 	allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize);
380 	allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize);
381 	sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP);
382 	sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize;
383 	sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize;
384 	sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize;
385 	sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize);
386 	sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize;
387 
388 #define C(map, buf, size, nsegs, rw, usage)				\
389 	do {								\
390 		r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \
391 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,	\
392 				      &sc->sc_ ##map);			\
393 		if (r != 0) {						\
394 			aprint_error_dev(sc->sc_dev,			\
395 					 usage " dmamap creation failed, " \
396 					 "error code %d\n", r);		\
397 					 goto err_reqs;			\
398 		}							\
399 	} while (0)
400 #define C_L1(map, buf, size, nsegs, rw, usage)				\
401 	C(map, buf, size, nsegs, rw, usage);				\
402 	do {								\
403 		r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map,	\
404 				    &sc->sc_ ##buf, size, NULL,		\
405 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
406 		if (r != 0) {						\
407 			aprint_error_dev(sc->sc_dev,			\
408 					 usage " dmamap load failed, "	\
409 					 "error code %d\n", r);		\
410 			goto err_reqs;					\
411 		}							\
412 	} while (0)
413 #define C_L2(map, buf, size, nsegs, rw, usage)				\
414 	C(map, buf, size, nsegs, rw, usage);				\
415 	do {								\
416 		r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map,	\
417 				    sc->sc_ ##buf, size, NULL,		\
418 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
419 		if (r != 0) {						\
420 			aprint_error_dev(sc->sc_dev,			\
421 					 usage " dmamap load failed, "	\
422 					 "error code %d\n", r);		\
423 			goto err_reqs;					\
424 		}							\
425 	} while (0)
426 	for (i = 0; i < rxqsize; i++) {
427 		C_L1(rxhdr_dmamaps[i], rx_hdrs[i],
428 		    sizeof(struct virtio_net_hdr), 1,
429 		    READ, "rx header");
430 		C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload");
431 	}
432 
433 	for (i = 0; i < txqsize; i++) {
434 		C_L1(txhdr_dmamaps[i], tx_hdrs[i],
435 		    sizeof(struct virtio_net_hdr), 1,
436 		    WRITE, "tx header");
437 		C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, 0,
438 		  "tx payload");
439 	}
440 
441 	if (sc->sc_has_ctrl) {
442 		/* control vq class & command */
443 		C_L2(ctrl_cmd_dmamap, ctrl_cmd,
444 		    sizeof(struct virtio_net_ctrl_cmd), 1, WRITE,
445 		    "control command");
446 
447 		/* control vq status */
448 		C_L2(ctrl_status_dmamap, ctrl_status,
449 		    sizeof(struct virtio_net_ctrl_status), 1, READ,
450 		    "control status");
451 
452 		/* control vq rx mode command parameter */
453 		C_L2(ctrl_rx_dmamap, ctrl_rx,
454 		    sizeof(struct virtio_net_ctrl_rx), 1, WRITE,
455 		    "rx mode control command");
456 
457 		/* control vq MAC filter table for unicast */
458 		/* do not load now since its length is variable */
459 		C(ctrl_tbl_uc_dmamap, NULL,
460 		  sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE,
461 		  "unicast MAC address filter command");
462 
463 		/* control vq MAC filter table for multicast */
464 		C(ctrl_tbl_mc_dmamap, NULL,
465 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
466 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES),
467 		  1, WRITE, "multicast MAC address filter command");
468 	}
469 #undef C_L2
470 #undef C_L1
471 #undef C
472 
473 	return 0;
474 
475 err_reqs:
476 #define D(map)								\
477 	do {								\
478 		if (sc->sc_ ##map) {					\
479 			bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_ ##map); \
480 			sc->sc_ ##map = NULL;				\
481 		}							\
482 	} while (0)
483 	D(ctrl_tbl_mc_dmamap);
484 	D(ctrl_tbl_uc_dmamap);
485 	D(ctrl_rx_dmamap);
486 	D(ctrl_status_dmamap);
487 	D(ctrl_cmd_dmamap);
488 	for (i = 0; i < txqsize; i++) {
489 		D(tx_dmamaps[i]);
490 		D(txhdr_dmamaps[i]);
491 	}
492 	for (i = 0; i < rxqsize; i++) {
493 		D(rx_dmamaps[i]);
494 		D(rxhdr_dmamaps[i]);
495 	}
496 #undef D
497 	if (sc->sc_arrays) {
498 		kmem_free(sc->sc_arrays, allocsize2);
499 		sc->sc_arrays = 0;
500 	}
501 	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_hdrs, allocsize);
502 err_dmamem_alloc:
503 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
504 err_none:
505 	return -1;
506 }
507 
508 static void
509 vioif_attach(device_t parent, device_t self, void *aux)
510 {
511 	struct vioif_softc *sc = device_private(self);
512 	struct virtio_softc *vsc = device_private(parent);
513 	uint32_t features;
514 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
515 	u_int flags;
516 	int r, nvqs=0, req_flags;
517 
518 	if (virtio_child(vsc) != NULL) {
519 		aprint_normal(": child already attached for %s; "
520 			      "something wrong...\n",
521 			      device_xname(parent));
522 		return;
523 	}
524 
525 	sc->sc_dev = self;
526 	sc->sc_virtio = vsc;
527 	sc->sc_link_active = false;
528 
529 	req_flags = 0;
530 
531 #ifdef VIOIF_MPSAFE
532 	req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
533 #endif
534 #ifdef VIOIF_SOFTINT_INTR
535 	req_flags |= VIRTIO_F_PCI_INTR_SOFTINT;
536 #endif
537 	req_flags |= VIRTIO_F_PCI_INTR_MSIX;
538 
539 	virtio_child_attach_start(vsc, self, IPL_NET, sc->sc_vq,
540 	    vioif_config_change, virtio_vq_intr, req_flags,
541 	    (VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
542 	     VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY),
543 	    VIRTIO_NET_FLAG_BITS);
544 
545 	features = virtio_features(vsc);
546 
547 	if (features & VIRTIO_NET_F_MAC) {
548 		sc->sc_mac[0] = virtio_read_device_config_1(vsc,
549 						    VIRTIO_NET_CONFIG_MAC+0);
550 		sc->sc_mac[1] = virtio_read_device_config_1(vsc,
551 						    VIRTIO_NET_CONFIG_MAC+1);
552 		sc->sc_mac[2] = virtio_read_device_config_1(vsc,
553 						    VIRTIO_NET_CONFIG_MAC+2);
554 		sc->sc_mac[3] = virtio_read_device_config_1(vsc,
555 						    VIRTIO_NET_CONFIG_MAC+3);
556 		sc->sc_mac[4] = virtio_read_device_config_1(vsc,
557 						    VIRTIO_NET_CONFIG_MAC+4);
558 		sc->sc_mac[5] = virtio_read_device_config_1(vsc,
559 						    VIRTIO_NET_CONFIG_MAC+5);
560 	} else {
561 		/* code stolen from sys/net/if_tap.c */
562 		struct timeval tv;
563 		uint32_t ui;
564 		getmicrouptime(&tv);
565 		ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
566 		memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
567 		virtio_write_device_config_1(vsc,
568 					     VIRTIO_NET_CONFIG_MAC+0,
569 					     sc->sc_mac[0]);
570 		virtio_write_device_config_1(vsc,
571 					     VIRTIO_NET_CONFIG_MAC+1,
572 					     sc->sc_mac[1]);
573 		virtio_write_device_config_1(vsc,
574 					     VIRTIO_NET_CONFIG_MAC+2,
575 					     sc->sc_mac[2]);
576 		virtio_write_device_config_1(vsc,
577 					     VIRTIO_NET_CONFIG_MAC+3,
578 					     sc->sc_mac[3]);
579 		virtio_write_device_config_1(vsc,
580 					     VIRTIO_NET_CONFIG_MAC+4,
581 					     sc->sc_mac[4]);
582 		virtio_write_device_config_1(vsc,
583 					     VIRTIO_NET_CONFIG_MAC+5,
584 					     sc->sc_mac[5]);
585 	}
586 
587 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->sc_mac));
588 
589 	mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET);
590 	mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET);
591 	sc->sc_stopping = false;
592 
593 	/*
594 	 * Allocating a virtqueue for Rx
595 	 */
596 	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_RX], VQ_RX,
597 	    MCLBYTES+sizeof(struct virtio_net_hdr), 2, "rx");
598 	if (r != 0)
599 		goto err;
600 	nvqs = 1;
601 	sc->sc_vq[VQ_RX].vq_done = vioif_rx_vq_done;
602 
603 	/*
604 	 * Allocating a virtqueue for Tx
605 	 */
606 	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_TX], VQ_TX,
607 	    (sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
608 	    VIRTIO_NET_TX_MAXNSEGS + 1, "tx");
609 	if (r != 0)
610 		goto err;
611 	nvqs = 2;
612 	sc->sc_vq[VQ_TX].vq_done = vioif_tx_vq_done;
613 
614 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
615 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]); /* not urgent; do it later */
616 
617 	if ((features & VIRTIO_NET_F_CTRL_VQ) &&
618 	    (features & VIRTIO_NET_F_CTRL_RX)) {
619 		/*
620 		 * Allocating a virtqueue for control channel
621 		 */
622 		r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_CTRL], VQ_CTRL,
623 		    NBPG, 1, "control");
624 		if (r != 0) {
625 			aprint_error_dev(self, "failed to allocate "
626 			    "a virtqueue for control channel\n");
627 			goto skip;
628 		}
629 
630 		sc->sc_vq[VQ_CTRL].vq_done = vioif_ctrl_vq_done;
631 		cv_init(&sc->sc_ctrl_wait, "ctrl_vq");
632 		mutex_init(&sc->sc_ctrl_wait_lock, MUTEX_DEFAULT, IPL_NET);
633 		sc->sc_ctrl_inuse = FREE;
634 		virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
635 		sc->sc_has_ctrl = true;
636 		nvqs = 3;
637 	}
638 skip:
639 
640 #ifdef VIOIF_MPSAFE
641 	flags = SOFTINT_NET | SOFTINT_MPSAFE;
642 #else
643 	flags = SOFTINT_NET;
644 #endif
645 	sc->sc_rx_softint = softint_establish(flags, vioif_rx_softint, sc);
646 	if (sc->sc_rx_softint == NULL) {
647 		aprint_error_dev(self, "cannot establish rx softint\n");
648 		goto err;
649 	}
650 
651 	sc->sc_ctl_softint = softint_establish(flags, vioif_ctl_softint, sc);
652 	if (sc->sc_ctl_softint == NULL) {
653 		aprint_error_dev(self, "cannot establish ctl softint\n");
654 		goto err;
655 	}
656 
657 	if (vioif_alloc_mems(sc) < 0)
658 		goto err;
659 
660 	if (virtio_child_attach_finish(vsc) != 0)
661 		goto err;
662 
663 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
664 	ifp->if_softc = sc;
665 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
666 	ifp->if_start = vioif_start;
667 	ifp->if_ioctl = vioif_ioctl;
668 	ifp->if_init = vioif_init;
669 	ifp->if_stop = vioif_stop;
670 	ifp->if_capabilities = 0;
671 	ifp->if_watchdog = vioif_watchdog;
672 	IFQ_SET_MAXLEN(&ifp->if_snd, MAX(sc->sc_vq[VQ_TX].vq_num, IFQ_MAXLEN));
673 	IFQ_SET_READY(&ifp->if_snd);
674 
675 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
676 
677 	if_attach(ifp);
678 	if_deferred_start_init(ifp, NULL);
679 	ether_ifattach(ifp, sc->sc_mac);
680 
681 	return;
682 
683 err:
684 	mutex_destroy(&sc->sc_tx_lock);
685 	mutex_destroy(&sc->sc_rx_lock);
686 
687 	if (sc->sc_has_ctrl) {
688 		cv_destroy(&sc->sc_ctrl_wait);
689 		mutex_destroy(&sc->sc_ctrl_wait_lock);
690 	}
691 
692 	while (nvqs > 0)
693 		virtio_free_vq(vsc, &sc->sc_vq[--nvqs]);
694 
695 	virtio_child_attach_failed(vsc);
696 	return;
697 }
698 
699 /* we need interrupts to make promiscuous mode off */
700 static void
701 vioif_deferred_init(device_t self)
702 {
703 	struct vioif_softc *sc = device_private(self);
704 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
705 	int r;
706 
707 	if (ifp->if_flags & IFF_PROMISC)
708 		return;
709 
710 	r =  vioif_set_promisc(sc, false);
711 	if (r != 0)
712 		aprint_error_dev(self, "resetting promisc mode failed, "
713 				 "errror code %d\n", r);
714 }
715 
716 /*
717  * Interface functions for ifnet
718  */
719 static int
720 vioif_init(struct ifnet *ifp)
721 {
722 	struct vioif_softc *sc = ifp->if_softc;
723 	struct virtio_softc *vsc = sc->sc_virtio;
724 
725 	vioif_stop(ifp, 0);
726 
727 	virtio_reinit_start(vsc);
728 	virtio_negotiate_features(vsc, virtio_features(vsc));
729 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
730 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]);
731 	if (sc->sc_has_ctrl)
732 		virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
733 	virtio_reinit_end(vsc);
734 
735 	if (!sc->sc_deferred_init_done) {
736 		sc->sc_deferred_init_done = 1;
737 		if (sc->sc_has_ctrl)
738 			vioif_deferred_init(sc->sc_dev);
739 	}
740 
741 	/* Have to set false before vioif_populate_rx_mbufs */
742 	sc->sc_stopping = false;
743 
744 	vioif_populate_rx_mbufs(sc);
745 
746 	vioif_update_link_status(sc);
747 	ifp->if_flags |= IFF_RUNNING;
748 	ifp->if_flags &= ~IFF_OACTIVE;
749 	vioif_rx_filter(sc);
750 
751 	return 0;
752 }
753 
754 static void
755 vioif_stop(struct ifnet *ifp, int disable)
756 {
757 	struct vioif_softc *sc = ifp->if_softc;
758 	struct virtio_softc *vsc = sc->sc_virtio;
759 
760 	/* Take the locks to ensure that ongoing TX/RX finish */
761 	VIOIF_TX_LOCK(sc);
762 	VIOIF_RX_LOCK(sc);
763 	sc->sc_stopping = true;
764 	VIOIF_RX_UNLOCK(sc);
765 	VIOIF_TX_UNLOCK(sc);
766 
767 	/* disable interrupts */
768 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
769 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]);
770 	if (sc->sc_has_ctrl)
771 		virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
772 
773 	/* only way to stop I/O and DMA is resetting... */
774 	virtio_reset(vsc);
775 	vioif_rx_deq(sc);
776 	vioif_tx_drain(sc);
777 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
778 	sc->sc_link_active = false;
779 
780 	if (disable)
781 		vioif_rx_drain(sc);
782 }
783 
784 static void
785 vioif_start(struct ifnet *ifp)
786 {
787 	struct vioif_softc *sc = ifp->if_softc;
788 	struct virtio_softc *vsc = sc->sc_virtio;
789 	struct virtqueue *vq = &sc->sc_vq[VQ_TX];
790 	struct mbuf *m;
791 	int queued = 0;
792 
793 	VIOIF_TX_LOCK(sc);
794 
795 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING ||
796 	    !sc->sc_link_active)
797 		goto out;
798 
799 	if (sc->sc_stopping)
800 		goto out;
801 
802 	for (;;) {
803 		int slot, r;
804 
805 		IFQ_DEQUEUE(&ifp->if_snd, m);
806 		if (m == NULL)
807 			break;
808 
809 		r = virtio_enqueue_prep(vsc, vq, &slot);
810 		if (r == EAGAIN) {
811 			ifp->if_flags |= IFF_OACTIVE;
812 			m_freem(m);
813 			break;
814 		}
815 		if (r != 0)
816 			panic("enqueue_prep for a tx buffer");
817 
818 		r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
819 					 sc->sc_tx_dmamaps[slot],
820 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
821 		if (r != 0) {
822 			/* maybe just too fragmented */
823 			struct mbuf *newm;
824 
825 			newm = m_defrag(m, M_NOWAIT);
826 			if (newm == NULL) {
827 				aprint_error_dev(sc->sc_dev,
828 				    "m_defrag() failed\n");
829 				goto skip;
830 			}
831 
832 			m = newm;
833 			r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
834 					 sc->sc_tx_dmamaps[slot],
835 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
836 			if (r != 0) {
837 				aprint_error_dev(sc->sc_dev,
838 	   			    "tx dmamap load failed, error code %d\n",
839 				    r);
840 skip:
841 				m_freem(m);
842 				virtio_enqueue_abort(vsc, vq, slot);
843 				continue;
844 			}
845 		}
846 
847 		/* This should actually never fail */
848 		r = virtio_enqueue_reserve(vsc, vq, slot,
849 					sc->sc_tx_dmamaps[slot]->dm_nsegs + 1);
850 		if (r != 0) {
851 			aprint_error_dev(sc->sc_dev,
852 	   		    "virtio_enqueue_reserve failed, error code %d\n",
853 			    r);
854 			bus_dmamap_unload(virtio_dmat(vsc),
855 					  sc->sc_tx_dmamaps[slot]);
856 			/* slot already freed by virtio_enqueue_reserve */
857 			m_freem(m);
858 			continue;
859 		}
860 
861 		sc->sc_tx_mbufs[slot] = m;
862 
863 		memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
864 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot],
865 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
866 				BUS_DMASYNC_PREWRITE);
867 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot],
868 				0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize,
869 				BUS_DMASYNC_PREWRITE);
870 		virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true);
871 		virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true);
872 		virtio_enqueue_commit(vsc, vq, slot, false);
873 
874 		queued++;
875 		bpf_mtap(ifp, m, BPF_D_OUT);
876 	}
877 
878 	if (queued > 0) {
879 		virtio_enqueue_commit(vsc, vq, -1, true);
880 		ifp->if_timer = 5;
881 	}
882 
883 out:
884 	VIOIF_TX_UNLOCK(sc);
885 }
886 
887 static int
888 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
889 {
890 	int s, r;
891 
892 	s = splnet();
893 
894 	r = ether_ioctl(ifp, cmd, data);
895 	if ((r == 0 && cmd == SIOCSIFFLAGS) ||
896 	    (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
897 		if (ifp->if_flags & IFF_RUNNING)
898 			r = vioif_rx_filter(ifp->if_softc);
899 		else
900 			r = 0;
901 	}
902 
903 	splx(s);
904 
905 	return r;
906 }
907 
908 void
909 vioif_watchdog(struct ifnet *ifp)
910 {
911 	struct vioif_softc *sc = ifp->if_softc;
912 
913 	if (ifp->if_flags & IFF_RUNNING)
914 		vioif_tx_vq_done(&sc->sc_vq[VQ_TX]);
915 }
916 
917 
918 /*
919  * Receive implementation
920  */
921 /* allocate and initialize a mbuf for receive */
922 static int
923 vioif_add_rx_mbuf(struct vioif_softc *sc, int i)
924 {
925 	struct mbuf *m;
926 	int r;
927 
928 	MGETHDR(m, M_DONTWAIT, MT_DATA);
929 	if (m == NULL)
930 		return ENOBUFS;
931 	MCLGET(m, M_DONTWAIT);
932 	if ((m->m_flags & M_EXT) == 0) {
933 		m_freem(m);
934 		return ENOBUFS;
935 	}
936 	sc->sc_rx_mbufs[i] = m;
937 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
938 	r = bus_dmamap_load_mbuf(virtio_dmat(sc->sc_virtio),
939 				 sc->sc_rx_dmamaps[i],
940 				 m, BUS_DMA_READ|BUS_DMA_NOWAIT);
941 	if (r) {
942 		m_freem(m);
943 		sc->sc_rx_mbufs[i] = 0;
944 		return r;
945 	}
946 
947 	return 0;
948 }
949 
950 /* free a mbuf for receive */
951 static void
952 vioif_free_rx_mbuf(struct vioif_softc *sc, int i)
953 {
954 	bus_dmamap_unload(virtio_dmat(sc->sc_virtio), sc->sc_rx_dmamaps[i]);
955 	m_freem(sc->sc_rx_mbufs[i]);
956 	sc->sc_rx_mbufs[i] = NULL;
957 }
958 
959 /* add mbufs for all the empty receive slots */
960 static void
961 vioif_populate_rx_mbufs(struct vioif_softc *sc)
962 {
963 	VIOIF_RX_LOCK(sc);
964 	vioif_populate_rx_mbufs_locked(sc);
965 	VIOIF_RX_UNLOCK(sc);
966 }
967 
968 static void
969 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc)
970 {
971 	struct virtio_softc *vsc = sc->sc_virtio;
972 	int i, r, ndone = 0;
973 	struct virtqueue *vq = &sc->sc_vq[VQ_RX];
974 
975 	KASSERT(VIOIF_RX_LOCKED(sc));
976 
977 	if (sc->sc_stopping)
978 		return;
979 
980 	for (i = 0; i < vq->vq_num; i++) {
981 		int slot;
982 		r = virtio_enqueue_prep(vsc, vq, &slot);
983 		if (r == EAGAIN)
984 			break;
985 		if (r != 0)
986 			panic("enqueue_prep for rx buffers");
987 		if (sc->sc_rx_mbufs[slot] == NULL) {
988 			r = vioif_add_rx_mbuf(sc, slot);
989 			if (r != 0) {
990 				printf("%s: rx mbuf allocation failed, "
991 				       "error code %d\n",
992 				       device_xname(sc->sc_dev), r);
993 				break;
994 			}
995 		}
996 		r = virtio_enqueue_reserve(vsc, vq, slot,
997 					sc->sc_rx_dmamaps[slot]->dm_nsegs + 1);
998 		if (r != 0) {
999 			vioif_free_rx_mbuf(sc, slot);
1000 			break;
1001 		}
1002 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot],
1003 			0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
1004 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot],
1005 			0, MCLBYTES, BUS_DMASYNC_PREREAD);
1006 		virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false);
1007 		virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false);
1008 		virtio_enqueue_commit(vsc, vq, slot, false);
1009 		ndone++;
1010 	}
1011 	if (ndone > 0)
1012 		virtio_enqueue_commit(vsc, vq, -1, true);
1013 }
1014 
1015 /* dequeue received packets */
1016 static int
1017 vioif_rx_deq(struct vioif_softc *sc)
1018 {
1019 	int r;
1020 
1021 	KASSERT(sc->sc_stopping);
1022 
1023 	VIOIF_RX_LOCK(sc);
1024 	r = vioif_rx_deq_locked(sc);
1025 	VIOIF_RX_UNLOCK(sc);
1026 
1027 	return r;
1028 }
1029 
1030 /* dequeue received packets */
1031 static int
1032 vioif_rx_deq_locked(struct vioif_softc *sc)
1033 {
1034 	struct virtio_softc *vsc = sc->sc_virtio;
1035 	struct virtqueue *vq = &sc->sc_vq[VQ_RX];
1036 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1037 	struct mbuf *m;
1038 	int r = 0;
1039 	int slot, len;
1040 
1041 	KASSERT(VIOIF_RX_LOCKED(sc));
1042 
1043 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1044 		len -= sizeof(struct virtio_net_hdr);
1045 		r = 1;
1046 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot],
1047 				0, sizeof(struct virtio_net_hdr),
1048 				BUS_DMASYNC_POSTREAD);
1049 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot],
1050 				0, MCLBYTES,
1051 				BUS_DMASYNC_POSTREAD);
1052 		m = sc->sc_rx_mbufs[slot];
1053 		KASSERT(m != NULL);
1054 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot]);
1055 		sc->sc_rx_mbufs[slot] = 0;
1056 		virtio_dequeue_commit(vsc, vq, slot);
1057 		m_set_rcvif(m, ifp);
1058 		m->m_len = m->m_pkthdr.len = len;
1059 
1060 		VIOIF_RX_UNLOCK(sc);
1061 		if_percpuq_enqueue(ifp->if_percpuq, m);
1062 		VIOIF_RX_LOCK(sc);
1063 
1064 		if (sc->sc_stopping)
1065 			break;
1066 	}
1067 
1068 	return r;
1069 }
1070 
1071 /* rx interrupt; call _dequeue above and schedule a softint */
1072 static int
1073 vioif_rx_vq_done(struct virtqueue *vq)
1074 {
1075 	struct virtio_softc *vsc = vq->vq_owner;
1076 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1077 	int r = 0;
1078 
1079 #ifdef VIOIF_SOFTINT_INTR
1080 	KASSERT(!cpu_intr_p());
1081 #endif
1082 
1083 	VIOIF_RX_LOCK(sc);
1084 
1085 	if (sc->sc_stopping)
1086 		goto out;
1087 
1088 	r = vioif_rx_deq_locked(sc);
1089 	if (r)
1090 #ifdef VIOIF_SOFTINT_INTR
1091 		vioif_populate_rx_mbufs_locked(sc);
1092 #else
1093 		softint_schedule(sc->sc_rx_softint);
1094 #endif
1095 
1096 out:
1097 	VIOIF_RX_UNLOCK(sc);
1098 	return r;
1099 }
1100 
1101 /* softint: enqueue receive requests for new incoming packets */
1102 static void
1103 vioif_rx_softint(void *arg)
1104 {
1105 	struct vioif_softc *sc = arg;
1106 
1107 	vioif_populate_rx_mbufs(sc);
1108 }
1109 
1110 /* free all the mbufs; called from if_stop(disable) */
1111 static void
1112 vioif_rx_drain(struct vioif_softc *sc)
1113 {
1114 	struct virtqueue *vq = &sc->sc_vq[VQ_RX];
1115 	int i;
1116 
1117 	for (i = 0; i < vq->vq_num; i++) {
1118 		if (sc->sc_rx_mbufs[i] == NULL)
1119 			continue;
1120 		vioif_free_rx_mbuf(sc, i);
1121 	}
1122 }
1123 
1124 
1125 /*
1126  * Transmition implementation
1127  */
1128 /* actual transmission is done in if_start */
1129 /* tx interrupt; dequeue and free mbufs */
1130 /*
1131  * tx interrupt is actually disabled; this should be called upon
1132  * tx vq full and watchdog
1133  */
1134 static int
1135 vioif_tx_vq_done(struct virtqueue *vq)
1136 {
1137 	struct virtio_softc *vsc = vq->vq_owner;
1138 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1139 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1140 	int r = 0;
1141 
1142 	VIOIF_TX_LOCK(sc);
1143 
1144 	if (sc->sc_stopping)
1145 		goto out;
1146 
1147 	r = vioif_tx_vq_done_locked(vq);
1148 
1149 out:
1150 	VIOIF_TX_UNLOCK(sc);
1151 	if (r)
1152 		if_schedule_deferred_start(ifp);
1153 	return r;
1154 }
1155 
1156 static int
1157 vioif_tx_vq_done_locked(struct virtqueue *vq)
1158 {
1159 	struct virtio_softc *vsc = vq->vq_owner;
1160 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1161 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1162 	struct mbuf *m;
1163 	int r = 0;
1164 	int slot, len;
1165 
1166 	KASSERT(VIOIF_TX_LOCKED(sc));
1167 
1168 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1169 		r++;
1170 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot],
1171 				0, sizeof(struct virtio_net_hdr),
1172 				BUS_DMASYNC_POSTWRITE);
1173 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot],
1174 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
1175 				BUS_DMASYNC_POSTWRITE);
1176 		m = sc->sc_tx_mbufs[slot];
1177 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot]);
1178 		sc->sc_tx_mbufs[slot] = 0;
1179 		virtio_dequeue_commit(vsc, vq, slot);
1180 		ifp->if_opackets++;
1181 		m_freem(m);
1182 	}
1183 
1184 	if (r)
1185 		ifp->if_flags &= ~IFF_OACTIVE;
1186 	return r;
1187 }
1188 
1189 /* free all the mbufs already put on vq; called from if_stop(disable) */
1190 static void
1191 vioif_tx_drain(struct vioif_softc *sc)
1192 {
1193 	struct virtio_softc *vsc = sc->sc_virtio;
1194 	struct virtqueue *vq = &sc->sc_vq[VQ_TX];
1195 	int i;
1196 
1197 	KASSERT(sc->sc_stopping);
1198 
1199 	for (i = 0; i < vq->vq_num; i++) {
1200 		if (sc->sc_tx_mbufs[i] == NULL)
1201 			continue;
1202 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[i]);
1203 		m_freem(sc->sc_tx_mbufs[i]);
1204 		sc->sc_tx_mbufs[i] = NULL;
1205 	}
1206 }
1207 
1208 /*
1209  * Control vq
1210  */
1211 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1212 static int
1213 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
1214 {
1215 	struct virtio_softc *vsc = sc->sc_virtio;
1216 	struct virtqueue *vq = &sc->sc_vq[VQ_CTRL];
1217 	int r, slot;
1218 
1219 	if (!sc->sc_has_ctrl)
1220 		return ENOTSUP;
1221 
1222 	mutex_enter(&sc->sc_ctrl_wait_lock);
1223 	while (sc->sc_ctrl_inuse != FREE)
1224 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1225 	sc->sc_ctrl_inuse = INUSE;
1226 	mutex_exit(&sc->sc_ctrl_wait_lock);
1227 
1228 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX;
1229 	sc->sc_ctrl_cmd->command = cmd;
1230 	sc->sc_ctrl_rx->onoff = onoff;
1231 
1232 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap,
1233 			0, sizeof(struct virtio_net_ctrl_cmd),
1234 			BUS_DMASYNC_PREWRITE);
1235 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap,
1236 			0, sizeof(struct virtio_net_ctrl_rx),
1237 			BUS_DMASYNC_PREWRITE);
1238 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap,
1239 			0, sizeof(struct virtio_net_ctrl_status),
1240 			BUS_DMASYNC_PREREAD);
1241 
1242 	r = virtio_enqueue_prep(vsc, vq, &slot);
1243 	if (r != 0)
1244 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1245 	r = virtio_enqueue_reserve(vsc, vq, slot, 3);
1246 	if (r != 0)
1247 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1248 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
1249 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true);
1250 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
1251 	virtio_enqueue_commit(vsc, vq, slot, true);
1252 
1253 	/* wait for done */
1254 	mutex_enter(&sc->sc_ctrl_wait_lock);
1255 	while (sc->sc_ctrl_inuse != DONE)
1256 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1257 	mutex_exit(&sc->sc_ctrl_wait_lock);
1258 	/* already dequeueued */
1259 
1260 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0,
1261 			sizeof(struct virtio_net_ctrl_cmd),
1262 			BUS_DMASYNC_POSTWRITE);
1263 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap, 0,
1264 			sizeof(struct virtio_net_ctrl_rx),
1265 			BUS_DMASYNC_POSTWRITE);
1266 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0,
1267 			sizeof(struct virtio_net_ctrl_status),
1268 			BUS_DMASYNC_POSTREAD);
1269 
1270 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
1271 		r = 0;
1272 	else {
1273 		printf("%s: failed setting rx mode\n",
1274 		       device_xname(sc->sc_dev));
1275 		r = EIO;
1276 	}
1277 
1278 	mutex_enter(&sc->sc_ctrl_wait_lock);
1279 	sc->sc_ctrl_inuse = FREE;
1280 	cv_signal(&sc->sc_ctrl_wait);
1281 	mutex_exit(&sc->sc_ctrl_wait_lock);
1282 
1283 	return r;
1284 }
1285 
1286 static int
1287 vioif_set_promisc(struct vioif_softc *sc, bool onoff)
1288 {
1289 	int r;
1290 
1291 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
1292 
1293 	return r;
1294 }
1295 
1296 static int
1297 vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
1298 {
1299 	int r;
1300 
1301 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
1302 
1303 	return r;
1304 }
1305 
1306 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
1307 static int
1308 vioif_set_rx_filter(struct vioif_softc *sc)
1309 {
1310 	/* filter already set in sc_ctrl_mac_tbl */
1311 	struct virtio_softc *vsc = sc->sc_virtio;
1312 	struct virtqueue *vq = &sc->sc_vq[VQ_CTRL];
1313 	int r, slot;
1314 
1315 	if (!sc->sc_has_ctrl)
1316 		return ENOTSUP;
1317 
1318 	mutex_enter(&sc->sc_ctrl_wait_lock);
1319 	while (sc->sc_ctrl_inuse != FREE)
1320 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1321 	sc->sc_ctrl_inuse = INUSE;
1322 	mutex_exit(&sc->sc_ctrl_wait_lock);
1323 
1324 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC;
1325 	sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1326 
1327 	r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap,
1328 			    sc->sc_ctrl_mac_tbl_uc,
1329 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
1330 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
1331 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1332 	if (r) {
1333 		printf("%s: control command dmamap load failed, "
1334 		       "error code %d\n", device_xname(sc->sc_dev), r);
1335 		goto out;
1336 	}
1337 	r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap,
1338 			    sc->sc_ctrl_mac_tbl_mc,
1339 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
1340 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
1341 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1342 	if (r) {
1343 		printf("%s: control command dmamap load failed, "
1344 		       "error code %d\n", device_xname(sc->sc_dev), r);
1345 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap);
1346 		goto out;
1347 	}
1348 
1349 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap,
1350 			0, sizeof(struct virtio_net_ctrl_cmd),
1351 			BUS_DMASYNC_PREWRITE);
1352 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0,
1353 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1354 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
1355 			BUS_DMASYNC_PREWRITE);
1356 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0,
1357 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1358 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
1359 			BUS_DMASYNC_PREWRITE);
1360 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap,
1361 			0, sizeof(struct virtio_net_ctrl_status),
1362 			BUS_DMASYNC_PREREAD);
1363 
1364 	r = virtio_enqueue_prep(vsc, vq, &slot);
1365 	if (r != 0)
1366 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1367 	r = virtio_enqueue_reserve(vsc, vq, slot, 4);
1368 	if (r != 0)
1369 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1370 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
1371 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true);
1372 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true);
1373 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
1374 	virtio_enqueue_commit(vsc, vq, slot, true);
1375 
1376 	/* wait for done */
1377 	mutex_enter(&sc->sc_ctrl_wait_lock);
1378 	while (sc->sc_ctrl_inuse != DONE)
1379 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1380 	mutex_exit(&sc->sc_ctrl_wait_lock);
1381 	/* already dequeueued */
1382 
1383 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0,
1384 			sizeof(struct virtio_net_ctrl_cmd),
1385 			BUS_DMASYNC_POSTWRITE);
1386 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0,
1387 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1388 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
1389 			BUS_DMASYNC_POSTWRITE);
1390 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0,
1391 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1392 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
1393 			BUS_DMASYNC_POSTWRITE);
1394 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0,
1395 			sizeof(struct virtio_net_ctrl_status),
1396 			BUS_DMASYNC_POSTREAD);
1397 	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap);
1398 	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap);
1399 
1400 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
1401 		r = 0;
1402 	else {
1403 		printf("%s: failed setting rx filter\n",
1404 		       device_xname(sc->sc_dev));
1405 		r = EIO;
1406 	}
1407 
1408 out:
1409 	mutex_enter(&sc->sc_ctrl_wait_lock);
1410 	sc->sc_ctrl_inuse = FREE;
1411 	cv_signal(&sc->sc_ctrl_wait);
1412 	mutex_exit(&sc->sc_ctrl_wait_lock);
1413 
1414 	return r;
1415 }
1416 
1417 /* ctrl vq interrupt; wake up the command issuer */
1418 static int
1419 vioif_ctrl_vq_done(struct virtqueue *vq)
1420 {
1421 	struct virtio_softc *vsc = vq->vq_owner;
1422 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1423 	int r, slot;
1424 
1425 	r = virtio_dequeue(vsc, vq, &slot, NULL);
1426 	if (r == ENOENT)
1427 		return 0;
1428 	virtio_dequeue_commit(vsc, vq, slot);
1429 
1430 	mutex_enter(&sc->sc_ctrl_wait_lock);
1431 	sc->sc_ctrl_inuse = DONE;
1432 	cv_signal(&sc->sc_ctrl_wait);
1433 	mutex_exit(&sc->sc_ctrl_wait_lock);
1434 
1435 	return 1;
1436 }
1437 
1438 /*
1439  * If IFF_PROMISC requested,  set promiscuous
1440  * If multicast filter small enough (<=MAXENTRIES) set rx filter
1441  * If large multicast filter exist use ALLMULTI
1442  */
1443 /*
1444  * If setting rx filter fails fall back to ALLMULTI
1445  * If ALLMULTI fails fall back to PROMISC
1446  */
1447 static int
1448 vioif_rx_filter(struct vioif_softc *sc)
1449 {
1450 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1451 	struct ether_multi *enm;
1452 	struct ether_multistep step;
1453 	int nentries;
1454 	int promisc = 0, allmulti = 0, rxfilter = 0;
1455 	int r;
1456 
1457 	if (!sc->sc_has_ctrl) {	/* no ctrl vq; always promisc */
1458 		ifp->if_flags |= IFF_PROMISC;
1459 		return 0;
1460 	}
1461 
1462 	if (ifp->if_flags & IFF_PROMISC) {
1463 		promisc = 1;
1464 		goto set;
1465 	}
1466 
1467 	nentries = -1;
1468 	ETHER_LOCK(&sc->sc_ethercom);
1469 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1470 	while (nentries++, enm != NULL) {
1471 		if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
1472 			allmulti = 1;
1473 			goto set_unlock;
1474 		}
1475 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1476 			   ETHER_ADDR_LEN)) {
1477 			allmulti = 1;
1478 			goto set_unlock;
1479 		}
1480 		memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries],
1481 		       enm->enm_addrlo, ETHER_ADDR_LEN);
1482 		ETHER_NEXT_MULTI(step, enm);
1483 	}
1484 	rxfilter = 1;
1485 
1486 set_unlock:
1487 	ETHER_UNLOCK(&sc->sc_ethercom);
1488 
1489 set:
1490 	if (rxfilter) {
1491 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
1492 		sc->sc_ctrl_mac_tbl_mc->nentries = nentries;
1493 		r = vioif_set_rx_filter(sc);
1494 		if (r != 0) {
1495 			rxfilter = 0;
1496 			allmulti = 1; /* fallback */
1497 		}
1498 	} else {
1499 		/* remove rx filter */
1500 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
1501 		sc->sc_ctrl_mac_tbl_mc->nentries = 0;
1502 		r = vioif_set_rx_filter(sc);
1503 		/* what to do on failure? */
1504 	}
1505 	if (allmulti) {
1506 		r = vioif_set_allmulti(sc, true);
1507 		if (r != 0) {
1508 			allmulti = 0;
1509 			promisc = 1; /* fallback */
1510 		}
1511 	} else {
1512 		r = vioif_set_allmulti(sc, false);
1513 		/* what to do on failure? */
1514 	}
1515 	if (promisc) {
1516 		r = vioif_set_promisc(sc, true);
1517 	} else {
1518 		r = vioif_set_promisc(sc, false);
1519 	}
1520 
1521 	return r;
1522 }
1523 
1524 static bool
1525 vioif_is_link_up(struct vioif_softc *sc)
1526 {
1527 	struct virtio_softc *vsc = sc->sc_virtio;
1528 	uint16_t status;
1529 
1530 	if (virtio_features(vsc) & VIRTIO_NET_F_STATUS)
1531 		status = virtio_read_device_config_2(vsc,
1532 		    VIRTIO_NET_CONFIG_STATUS);
1533 	else
1534 		status = VIRTIO_NET_S_LINK_UP;
1535 
1536 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
1537 }
1538 
1539 /* change link status */
1540 static void
1541 vioif_update_link_status(struct vioif_softc *sc)
1542 {
1543 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1544 	bool active, changed;
1545 	int link;
1546 
1547 	active = vioif_is_link_up(sc);
1548 	changed = false;
1549 
1550 	VIOIF_TX_LOCK(sc);
1551 	if (active) {
1552 		if (!sc->sc_link_active)
1553 			changed = true;
1554 
1555 		link = LINK_STATE_UP;
1556 		sc->sc_link_active = true;
1557 	} else {
1558 		if (sc->sc_link_active)
1559 			changed = true;
1560 
1561 		link = LINK_STATE_DOWN;
1562 		sc->sc_link_active = false;
1563 	}
1564 	VIOIF_TX_UNLOCK(sc);
1565 
1566 	if (changed)
1567 		if_link_state_change(ifp, link);
1568 }
1569 
1570 static int
1571 vioif_config_change(struct virtio_softc *vsc)
1572 {
1573 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1574 
1575 #ifdef VIOIF_SOFTINT_INTR
1576 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1577 #endif
1578 
1579 #ifdef VIOIF_SOFTINT_INTR
1580 	KASSERT(!cpu_intr_p());
1581 	vioif_update_link_status(sc);
1582 	vioif_start(ifp);
1583 #else
1584 	softint_schedule(sc->sc_ctl_softint);
1585 #endif
1586 
1587 	return 0;
1588 }
1589 
1590 static void
1591 vioif_ctl_softint(void *arg)
1592 {
1593 	struct vioif_softc *sc = arg;
1594 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1595 
1596 	vioif_update_link_status(sc);
1597 	vioif_start(ifp);
1598 }
1599 
1600 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
1601 
1602 #ifdef _MODULE
1603 #include "ioconf.c"
1604 #endif
1605 
1606 static int
1607 if_vioif_modcmd(modcmd_t cmd, void *opaque)
1608 {
1609 	int error = 0;
1610 
1611 #ifdef _MODULE
1612 	switch (cmd) {
1613 	case MODULE_CMD_INIT:
1614 		error = config_init_component(cfdriver_ioconf_if_vioif,
1615 		    cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
1616 		break;
1617 	case MODULE_CMD_FINI:
1618 		error = config_fini_component(cfdriver_ioconf_if_vioif,
1619 		    cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
1620 		break;
1621 	default:
1622 		error = ENOTTY;
1623 		break;
1624 	}
1625 #endif
1626 
1627 	return error;
1628 }
1629