xref: /netbsd-src/sys/dev/pci/if_vioif.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*	$NetBSD: if_vioif.c,v 1.38 2017/06/01 02:45:11 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 Minoura Makoto.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.38 2017/06/01 02:45:11 chs Exp $");
30 
31 #ifdef _KERNEL_OPT
32 #include "opt_net_mpsafe.h"
33 #endif
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/condvar.h>
40 #include <sys/device.h>
41 #include <sys/intr.h>
42 #include <sys/kmem.h>
43 #include <sys/mbuf.h>
44 #include <sys/mutex.h>
45 #include <sys/sockio.h>
46 #include <sys/cpu.h>
47 #include <sys/module.h>
48 
49 #include <dev/pci/pcidevs.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <dev/pci/virtioreg.h>
53 #include <dev/pci/virtiovar.h>
54 
55 #include <net/if.h>
56 #include <net/if_media.h>
57 #include <net/if_ether.h>
58 
59 #include <net/bpf.h>
60 
61 #include "ioconf.h"
62 
63 #ifdef NET_MPSAFE
64 #define VIOIF_MPSAFE	1
65 #endif
66 
67 #ifdef SOFTINT_INTR
68 #define VIOIF_SOFTINT_INTR	1
69 #endif
70 
71 /*
72  * if_vioifreg.h:
73  */
74 /* Configuration registers */
75 #define VIRTIO_NET_CONFIG_MAC		0 /* 8bit x 6byte */
76 #define VIRTIO_NET_CONFIG_STATUS	6 /* 16bit */
77 
78 /* Feature bits */
79 #define VIRTIO_NET_F_CSUM	(1<<0)
80 #define VIRTIO_NET_F_GUEST_CSUM	(1<<1)
81 #define VIRTIO_NET_F_MAC	(1<<5)
82 #define VIRTIO_NET_F_GSO	(1<<6)
83 #define VIRTIO_NET_F_GUEST_TSO4	(1<<7)
84 #define VIRTIO_NET_F_GUEST_TSO6	(1<<8)
85 #define VIRTIO_NET_F_GUEST_ECN	(1<<9)
86 #define VIRTIO_NET_F_GUEST_UFO	(1<<10)
87 #define VIRTIO_NET_F_HOST_TSO4	(1<<11)
88 #define VIRTIO_NET_F_HOST_TSO6	(1<<12)
89 #define VIRTIO_NET_F_HOST_ECN	(1<<13)
90 #define VIRTIO_NET_F_HOST_UFO	(1<<14)
91 #define VIRTIO_NET_F_MRG_RXBUF	(1<<15)
92 #define VIRTIO_NET_F_STATUS	(1<<16)
93 #define VIRTIO_NET_F_CTRL_VQ	(1<<17)
94 #define VIRTIO_NET_F_CTRL_RX	(1<<18)
95 #define VIRTIO_NET_F_CTRL_VLAN	(1<<19)
96 
97 #define VIRTIO_NET_FLAG_BITS \
98 	VIRTIO_COMMON_FLAG_BITS \
99 	"\x14""CTRL_VLAN" \
100 	"\x13""CTRL_RX" \
101 	"\x12""CTRL_VQ" \
102 	"\x11""STATUS" \
103 	"\x10""MRG_RXBUF" \
104 	"\x0f""HOST_UFO" \
105 	"\x0e""HOST_ECN" \
106 	"\x0d""HOST_TSO6" \
107 	"\x0c""HOST_TSO4" \
108 	"\x0b""GUEST_UFO" \
109 	"\x0a""GUEST_ECN" \
110 	"\x09""GUEST_TSO6" \
111 	"\x08""GUEST_TSO4" \
112 	"\x07""GSO" \
113 	"\x06""MAC" \
114 	"\x02""GUEST_CSUM" \
115 	"\x01""CSUM"
116 
117 /* Status */
118 #define VIRTIO_NET_S_LINK_UP	1
119 
120 /* Packet header structure */
121 struct virtio_net_hdr {
122 	uint8_t		flags;
123 	uint8_t		gso_type;
124 	uint16_t	hdr_len;
125 	uint16_t	gso_size;
126 	uint16_t	csum_start;
127 	uint16_t	csum_offset;
128 #if 0
129 	uint16_t	num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
130 #endif
131 } __packed;
132 
133 #define VIRTIO_NET_HDR_F_NEEDS_CSUM	1 /* flags */
134 #define VIRTIO_NET_HDR_GSO_NONE		0 /* gso_type */
135 #define VIRTIO_NET_HDR_GSO_TCPV4	1 /* gso_type */
136 #define VIRTIO_NET_HDR_GSO_UDP		3 /* gso_type */
137 #define VIRTIO_NET_HDR_GSO_TCPV6	4 /* gso_type */
138 #define VIRTIO_NET_HDR_GSO_ECN		0x80 /* gso_type, |'ed */
139 
140 #define VIRTIO_NET_MAX_GSO_LEN		(65536+ETHER_HDR_LEN)
141 
142 /* Control virtqueue */
143 struct virtio_net_ctrl_cmd {
144 	uint8_t	class;
145 	uint8_t	command;
146 } __packed;
147 #define VIRTIO_NET_CTRL_RX		0
148 # define VIRTIO_NET_CTRL_RX_PROMISC	0
149 # define VIRTIO_NET_CTRL_RX_ALLMULTI	1
150 
151 #define VIRTIO_NET_CTRL_MAC		1
152 # define VIRTIO_NET_CTRL_MAC_TABLE_SET	0
153 
154 #define VIRTIO_NET_CTRL_VLAN		2
155 # define VIRTIO_NET_CTRL_VLAN_ADD	0
156 # define VIRTIO_NET_CTRL_VLAN_DEL	1
157 
158 struct virtio_net_ctrl_status {
159 	uint8_t	ack;
160 } __packed;
161 #define VIRTIO_NET_OK			0
162 #define VIRTIO_NET_ERR			1
163 
164 struct virtio_net_ctrl_rx {
165 	uint8_t	onoff;
166 } __packed;
167 
168 struct virtio_net_ctrl_mac_tbl {
169 	uint32_t nentries;
170 	uint8_t macs[][ETHER_ADDR_LEN];
171 } __packed;
172 
173 struct virtio_net_ctrl_vlan {
174 	uint16_t id;
175 } __packed;
176 
177 
178 /*
179  * if_vioifvar.h:
180  */
181 struct vioif_softc {
182 	device_t		sc_dev;
183 
184 	struct virtio_softc	*sc_virtio;
185 	struct virtqueue	sc_vq[3];
186 #define VQ_RX	0
187 #define VQ_TX	1
188 #define VQ_CTRL	2
189 
190 	uint8_t			sc_mac[ETHER_ADDR_LEN];
191 	struct ethercom		sc_ethercom;
192 	short			sc_deferred_init_done;
193 	bool			sc_link_active;
194 
195 	/* bus_dmamem */
196 	bus_dma_segment_t	sc_hdr_segs[1];
197 	struct virtio_net_hdr	*sc_hdrs;
198 #define sc_rx_hdrs	sc_hdrs
199 	struct virtio_net_hdr	*sc_tx_hdrs;
200 	struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
201 	struct virtio_net_ctrl_status *sc_ctrl_status;
202 	struct virtio_net_ctrl_rx *sc_ctrl_rx;
203 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
204 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;
205 
206 	/* kmem */
207 	bus_dmamap_t		*sc_arrays;
208 #define sc_rxhdr_dmamaps sc_arrays
209 	bus_dmamap_t		*sc_txhdr_dmamaps;
210 	bus_dmamap_t		*sc_rx_dmamaps;
211 	bus_dmamap_t		*sc_tx_dmamaps;
212 	struct mbuf		**sc_rx_mbufs;
213 	struct mbuf		**sc_tx_mbufs;
214 
215 	bus_dmamap_t		sc_ctrl_cmd_dmamap;
216 	bus_dmamap_t		sc_ctrl_status_dmamap;
217 	bus_dmamap_t		sc_ctrl_rx_dmamap;
218 	bus_dmamap_t		sc_ctrl_tbl_uc_dmamap;
219 	bus_dmamap_t		sc_ctrl_tbl_mc_dmamap;
220 
221 	void			*sc_rx_softint;
222 	void			*sc_ctl_softint;
223 
224 	enum {
225 		FREE, INUSE, DONE
226 	}			sc_ctrl_inuse;
227 	kcondvar_t		sc_ctrl_wait;
228 	kmutex_t		sc_ctrl_wait_lock;
229 	kmutex_t		sc_tx_lock;
230 	kmutex_t		sc_rx_lock;
231 	bool			sc_stopping;
232 
233 	bool			sc_has_ctrl;
234 };
235 #define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
236 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */
237 
238 #define VIOIF_TX_LOCK(_sc)	mutex_enter(&(_sc)->sc_tx_lock)
239 #define VIOIF_TX_UNLOCK(_sc)	mutex_exit(&(_sc)->sc_tx_lock)
240 #define VIOIF_TX_LOCKED(_sc)	mutex_owned(&(_sc)->sc_tx_lock)
241 #define VIOIF_RX_LOCK(_sc)	mutex_enter(&(_sc)->sc_rx_lock)
242 #define VIOIF_RX_UNLOCK(_sc)	mutex_exit(&(_sc)->sc_rx_lock)
243 #define VIOIF_RX_LOCKED(_sc)	mutex_owned(&(_sc)->sc_rx_lock)
244 
245 /* cfattach interface functions */
246 static int	vioif_match(device_t, cfdata_t, void *);
247 static void	vioif_attach(device_t, device_t, void *);
248 static void	vioif_deferred_init(device_t);
249 
250 /* ifnet interface functions */
251 static int	vioif_init(struct ifnet *);
252 static void	vioif_stop(struct ifnet *, int);
253 static void	vioif_start(struct ifnet *);
254 static int	vioif_ioctl(struct ifnet *, u_long, void *);
255 static void	vioif_watchdog(struct ifnet *);
256 
257 /* rx */
258 static int	vioif_add_rx_mbuf(struct vioif_softc *, int);
259 static void	vioif_free_rx_mbuf(struct vioif_softc *, int);
260 static void	vioif_populate_rx_mbufs(struct vioif_softc *);
261 static void	vioif_populate_rx_mbufs_locked(struct vioif_softc *);
262 static int	vioif_rx_deq(struct vioif_softc *);
263 static int	vioif_rx_deq_locked(struct vioif_softc *);
264 static int	vioif_rx_vq_done(struct virtqueue *);
265 static void	vioif_rx_softint(void *);
266 static void	vioif_rx_drain(struct vioif_softc *);
267 
268 /* tx */
269 static int	vioif_tx_vq_done(struct virtqueue *);
270 static int	vioif_tx_vq_done_locked(struct virtqueue *);
271 static void	vioif_tx_drain(struct vioif_softc *);
272 
273 /* other control */
274 static bool	vioif_is_link_up(struct vioif_softc *);
275 static void	vioif_update_link_status(struct vioif_softc *);
276 static int	vioif_ctrl_rx(struct vioif_softc *, int, bool);
277 static int	vioif_set_promisc(struct vioif_softc *, bool);
278 static int	vioif_set_allmulti(struct vioif_softc *, bool);
279 static int	vioif_set_rx_filter(struct vioif_softc *);
280 static int	vioif_rx_filter(struct vioif_softc *);
281 static int	vioif_ctrl_vq_done(struct virtqueue *);
282 static int	vioif_config_change(struct virtio_softc *);
283 static void	vioif_ctl_softint(void *);
284 
285 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
286 		  vioif_match, vioif_attach, NULL, NULL);
287 
288 static int
289 vioif_match(device_t parent, cfdata_t match, void *aux)
290 {
291 	struct virtio_attach_args *va = aux;
292 
293 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
294 		return 1;
295 
296 	return 0;
297 }
298 
299 /* allocate memory */
300 /*
301  * dma memory is used for:
302  *   sc_rx_hdrs[slot]:	 metadata array for recieved frames (READ)
303  *   sc_tx_hdrs[slot]:	 metadata array for frames to be sent (WRITE)
304  *   sc_ctrl_cmd:	 command to be sent via ctrl vq (WRITE)
305  *   sc_ctrl_status:	 return value for a command via ctrl vq (READ)
306  *   sc_ctrl_rx:	 parameter for a VIRTIO_NET_CTRL_RX class command
307  *			 (WRITE)
308  *   sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
309  *			 class command (WRITE)
310  *   sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
311  *			 class command (WRITE)
312  * sc_ctrl_* structures are allocated only one each; they are protected by
313  * sc_ctrl_inuse variable and sc_ctrl_wait condvar.
314  */
315 /*
316  * dynamically allocated memory is used for:
317  *   sc_rxhdr_dmamaps[slot]:	bus_dmamap_t array for sc_rx_hdrs[slot]
318  *   sc_txhdr_dmamaps[slot]:	bus_dmamap_t array for sc_tx_hdrs[slot]
319  *   sc_rx_dmamaps[slot]:	bus_dmamap_t array for recieved payload
320  *   sc_tx_dmamaps[slot]:	bus_dmamap_t array for sent payload
321  *   sc_rx_mbufs[slot]:		mbuf pointer array for recieved frames
322  *   sc_tx_mbufs[slot]:		mbuf pointer array for sent frames
323  */
324 static int
325 vioif_alloc_mems(struct vioif_softc *sc)
326 {
327 	struct virtio_softc *vsc = sc->sc_virtio;
328 	int allocsize, allocsize2, r, rsegs, i;
329 	void *vaddr;
330 	intptr_t p;
331 	int rxqsize, txqsize;
332 
333 	rxqsize = sc->sc_vq[VQ_RX].vq_num;
334 	txqsize = sc->sc_vq[VQ_TX].vq_num;
335 
336 	allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
337 	allocsize += sizeof(struct virtio_net_hdr) * txqsize;
338 	if (sc->sc_has_ctrl) {
339 		allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
340 		allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
341 		allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
342 		allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
343 			+ sizeof(struct virtio_net_ctrl_mac_tbl)
344 			+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
345 	}
346 	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
347 			     &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
348 	if (r != 0) {
349 		aprint_error_dev(sc->sc_dev,
350 				 "DMA memory allocation failed, size %d, "
351 				 "error code %d\n", allocsize, r);
352 		goto err_none;
353 	}
354 	r = bus_dmamem_map(virtio_dmat(vsc),
355 			   &sc->sc_hdr_segs[0], 1, allocsize,
356 			   &vaddr, BUS_DMA_NOWAIT);
357 	if (r != 0) {
358 		aprint_error_dev(sc->sc_dev,
359 				 "DMA memory map failed, "
360 				 "error code %d\n", r);
361 		goto err_dmamem_alloc;
362 	}
363 	sc->sc_hdrs = vaddr;
364 	memset(vaddr, 0, allocsize);
365 	p = (intptr_t) vaddr;
366 	p += sizeof(struct virtio_net_hdr) * rxqsize;
367 #define P(name,size)	do { sc->sc_ ##name = (void*) p;	\
368 			     p += size; } while (0)
369 	P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize);
370 	if (sc->sc_has_ctrl) {
371 		P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd));
372 		P(ctrl_status, sizeof(struct virtio_net_ctrl_status));
373 		P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx));
374 		P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl));
375 		P(ctrl_mac_tbl_mc,
376 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
377 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
378 	}
379 #undef P
380 
381 	allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize);
382 	allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize);
383 	allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize);
384 	sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP);
385 	sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize;
386 	sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize;
387 	sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize;
388 	sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize);
389 	sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize;
390 
391 #define C(map, buf, size, nsegs, rw, usage)				\
392 	do {								\
393 		r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \
394 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,	\
395 				      &sc->sc_ ##map);			\
396 		if (r != 0) {						\
397 			aprint_error_dev(sc->sc_dev,			\
398 					 usage " dmamap creation failed, " \
399 					 "error code %d\n", r);		\
400 					 goto err_reqs;			\
401 		}							\
402 	} while (0)
403 #define C_L1(map, buf, size, nsegs, rw, usage)				\
404 	C(map, buf, size, nsegs, rw, usage);				\
405 	do {								\
406 		r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map,	\
407 				    &sc->sc_ ##buf, size, NULL,		\
408 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
409 		if (r != 0) {						\
410 			aprint_error_dev(sc->sc_dev,			\
411 					 usage " dmamap load failed, "	\
412 					 "error code %d\n", r);		\
413 			goto err_reqs;					\
414 		}							\
415 	} while (0)
416 #define C_L2(map, buf, size, nsegs, rw, usage)				\
417 	C(map, buf, size, nsegs, rw, usage);				\
418 	do {								\
419 		r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map,	\
420 				    sc->sc_ ##buf, size, NULL,		\
421 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
422 		if (r != 0) {						\
423 			aprint_error_dev(sc->sc_dev,			\
424 					 usage " dmamap load failed, "	\
425 					 "error code %d\n", r);		\
426 			goto err_reqs;					\
427 		}							\
428 	} while (0)
429 	for (i = 0; i < rxqsize; i++) {
430 		C_L1(rxhdr_dmamaps[i], rx_hdrs[i],
431 		    sizeof(struct virtio_net_hdr), 1,
432 		    READ, "rx header");
433 		C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload");
434 	}
435 
436 	for (i = 0; i < txqsize; i++) {
437 		C_L1(txhdr_dmamaps[i], tx_hdrs[i],
438 		    sizeof(struct virtio_net_hdr), 1,
439 		    WRITE, "tx header");
440 		C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, 0,
441 		  "tx payload");
442 	}
443 
444 	if (sc->sc_has_ctrl) {
445 		/* control vq class & command */
446 		C_L2(ctrl_cmd_dmamap, ctrl_cmd,
447 		    sizeof(struct virtio_net_ctrl_cmd), 1, WRITE,
448 		    "control command");
449 
450 		/* control vq status */
451 		C_L2(ctrl_status_dmamap, ctrl_status,
452 		    sizeof(struct virtio_net_ctrl_status), 1, READ,
453 		    "control status");
454 
455 		/* control vq rx mode command parameter */
456 		C_L2(ctrl_rx_dmamap, ctrl_rx,
457 		    sizeof(struct virtio_net_ctrl_rx), 1, WRITE,
458 		    "rx mode control command");
459 
460 		/* control vq MAC filter table for unicast */
461 		/* do not load now since its length is variable */
462 		C(ctrl_tbl_uc_dmamap, NULL,
463 		  sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE,
464 		  "unicast MAC address filter command");
465 
466 		/* control vq MAC filter table for multicast */
467 		C(ctrl_tbl_mc_dmamap, NULL,
468 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
469 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES),
470 		  1, WRITE, "multicast MAC address filter command");
471 	}
472 #undef C_L2
473 #undef C_L1
474 #undef C
475 
476 	return 0;
477 
478 err_reqs:
479 #define D(map)								\
480 	do {								\
481 		if (sc->sc_ ##map) {					\
482 			bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_ ##map); \
483 			sc->sc_ ##map = NULL;				\
484 		}							\
485 	} while (0)
486 	D(ctrl_tbl_mc_dmamap);
487 	D(ctrl_tbl_uc_dmamap);
488 	D(ctrl_rx_dmamap);
489 	D(ctrl_status_dmamap);
490 	D(ctrl_cmd_dmamap);
491 	for (i = 0; i < txqsize; i++) {
492 		D(tx_dmamaps[i]);
493 		D(txhdr_dmamaps[i]);
494 	}
495 	for (i = 0; i < rxqsize; i++) {
496 		D(rx_dmamaps[i]);
497 		D(rxhdr_dmamaps[i]);
498 	}
499 #undef D
500 	if (sc->sc_arrays) {
501 		kmem_free(sc->sc_arrays, allocsize2);
502 		sc->sc_arrays = 0;
503 	}
504 	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_hdrs, allocsize);
505 err_dmamem_alloc:
506 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
507 err_none:
508 	return -1;
509 }
510 
511 static void
512 vioif_attach(device_t parent, device_t self, void *aux)
513 {
514 	struct vioif_softc *sc = device_private(self);
515 	struct virtio_softc *vsc = device_private(parent);
516 	uint32_t features;
517 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
518 	u_int flags;
519 	int r, nvqs=0, req_flags;
520 
521 	if (virtio_child(vsc) != NULL) {
522 		aprint_normal(": child already attached for %s; "
523 			      "something wrong...\n",
524 			      device_xname(parent));
525 		return;
526 	}
527 
528 	sc->sc_dev = self;
529 	sc->sc_virtio = vsc;
530 	sc->sc_link_active = false;
531 
532 	req_flags = 0;
533 
534 #ifdef VIOIF_MPSAFE
535 	req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
536 #endif
537 #ifdef VIOIF_SOFTINT_INTR
538 	req_flags |= VIRTIO_F_PCI_INTR_SOFTINT;
539 #endif
540 	req_flags |= VIRTIO_F_PCI_INTR_MSIX;
541 
542 	virtio_child_attach_start(vsc, self, IPL_NET, sc->sc_vq,
543 	    vioif_config_change, virtio_vq_intr, req_flags,
544 	    (VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
545 	     VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY),
546 	    VIRTIO_NET_FLAG_BITS);
547 
548 	features = virtio_features(vsc);
549 
550 	if (features & VIRTIO_NET_F_MAC) {
551 		sc->sc_mac[0] = virtio_read_device_config_1(vsc,
552 						    VIRTIO_NET_CONFIG_MAC+0);
553 		sc->sc_mac[1] = virtio_read_device_config_1(vsc,
554 						    VIRTIO_NET_CONFIG_MAC+1);
555 		sc->sc_mac[2] = virtio_read_device_config_1(vsc,
556 						    VIRTIO_NET_CONFIG_MAC+2);
557 		sc->sc_mac[3] = virtio_read_device_config_1(vsc,
558 						    VIRTIO_NET_CONFIG_MAC+3);
559 		sc->sc_mac[4] = virtio_read_device_config_1(vsc,
560 						    VIRTIO_NET_CONFIG_MAC+4);
561 		sc->sc_mac[5] = virtio_read_device_config_1(vsc,
562 						    VIRTIO_NET_CONFIG_MAC+5);
563 	} else {
564 		/* code stolen from sys/net/if_tap.c */
565 		struct timeval tv;
566 		uint32_t ui;
567 		getmicrouptime(&tv);
568 		ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
569 		memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
570 		virtio_write_device_config_1(vsc,
571 					     VIRTIO_NET_CONFIG_MAC+0,
572 					     sc->sc_mac[0]);
573 		virtio_write_device_config_1(vsc,
574 					     VIRTIO_NET_CONFIG_MAC+1,
575 					     sc->sc_mac[1]);
576 		virtio_write_device_config_1(vsc,
577 					     VIRTIO_NET_CONFIG_MAC+2,
578 					     sc->sc_mac[2]);
579 		virtio_write_device_config_1(vsc,
580 					     VIRTIO_NET_CONFIG_MAC+3,
581 					     sc->sc_mac[3]);
582 		virtio_write_device_config_1(vsc,
583 					     VIRTIO_NET_CONFIG_MAC+4,
584 					     sc->sc_mac[4]);
585 		virtio_write_device_config_1(vsc,
586 					     VIRTIO_NET_CONFIG_MAC+5,
587 					     sc->sc_mac[5]);
588 	}
589 
590 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->sc_mac));
591 
592 	mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET);
593 	mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET);
594 	sc->sc_stopping = false;
595 
596 	/*
597 	 * Allocating a virtqueue for Rx
598 	 */
599 	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_RX], VQ_RX,
600 	    MCLBYTES+sizeof(struct virtio_net_hdr), 2, "rx");
601 	if (r != 0)
602 		goto err;
603 	nvqs = 1;
604 	sc->sc_vq[VQ_RX].vq_done = vioif_rx_vq_done;
605 
606 	/*
607 	 * Allocating a virtqueue for Tx
608 	 */
609 	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_TX], VQ_TX,
610 	    (sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
611 	    VIRTIO_NET_TX_MAXNSEGS + 1, "tx");
612 	if (r != 0)
613 		goto err;
614 	nvqs = 2;
615 	sc->sc_vq[VQ_TX].vq_done = vioif_tx_vq_done;
616 
617 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
618 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]); /* not urgent; do it later */
619 
620 	if ((features & VIRTIO_NET_F_CTRL_VQ) &&
621 	    (features & VIRTIO_NET_F_CTRL_RX)) {
622 		/*
623 		 * Allocating a virtqueue for control channel
624 		 */
625 		r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_CTRL], VQ_CTRL,
626 		    NBPG, 1, "control");
627 		if (r != 0) {
628 			aprint_error_dev(self, "failed to allocate "
629 			    "a virtqueue for control channel\n");
630 			goto skip;
631 		}
632 
633 		sc->sc_vq[VQ_CTRL].vq_done = vioif_ctrl_vq_done;
634 		cv_init(&sc->sc_ctrl_wait, "ctrl_vq");
635 		mutex_init(&sc->sc_ctrl_wait_lock, MUTEX_DEFAULT, IPL_NET);
636 		sc->sc_ctrl_inuse = FREE;
637 		virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
638 		sc->sc_has_ctrl = true;
639 		nvqs = 3;
640 	}
641 skip:
642 
643 #ifdef VIOIF_MPSAFE
644 	flags = SOFTINT_NET | SOFTINT_MPSAFE;
645 #else
646 	flags = SOFTINT_NET;
647 #endif
648 	sc->sc_rx_softint = softint_establish(flags, vioif_rx_softint, sc);
649 	if (sc->sc_rx_softint == NULL) {
650 		aprint_error_dev(self, "cannot establish rx softint\n");
651 		goto err;
652 	}
653 
654 	sc->sc_ctl_softint = softint_establish(flags, vioif_ctl_softint, sc);
655 	if (sc->sc_ctl_softint == NULL) {
656 		aprint_error_dev(self, "cannot establish ctl softint\n");
657 		goto err;
658 	}
659 
660 	if (vioif_alloc_mems(sc) < 0)
661 		goto err;
662 
663 	if (virtio_child_attach_finish(vsc) != 0)
664 		goto err;
665 
666 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
667 	ifp->if_softc = sc;
668 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
669 	ifp->if_start = vioif_start;
670 	ifp->if_ioctl = vioif_ioctl;
671 	ifp->if_init = vioif_init;
672 	ifp->if_stop = vioif_stop;
673 	ifp->if_capabilities = 0;
674 	ifp->if_watchdog = vioif_watchdog;
675 	IFQ_SET_MAXLEN(&ifp->if_snd, MAX(sc->sc_vq[VQ_TX].vq_num, IFQ_MAXLEN));
676 	IFQ_SET_READY(&ifp->if_snd);
677 
678 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
679 
680 	if_attach(ifp);
681 	if_deferred_start_init(ifp, NULL);
682 	ether_ifattach(ifp, sc->sc_mac);
683 
684 	return;
685 
686 err:
687 	mutex_destroy(&sc->sc_tx_lock);
688 	mutex_destroy(&sc->sc_rx_lock);
689 
690 	if (sc->sc_has_ctrl) {
691 		cv_destroy(&sc->sc_ctrl_wait);
692 		mutex_destroy(&sc->sc_ctrl_wait_lock);
693 	}
694 
695 	while (nvqs > 0)
696 		virtio_free_vq(vsc, &sc->sc_vq[--nvqs]);
697 
698 	virtio_child_attach_failed(vsc);
699 	return;
700 }
701 
702 /* we need interrupts to make promiscuous mode off */
703 static void
704 vioif_deferred_init(device_t self)
705 {
706 	struct vioif_softc *sc = device_private(self);
707 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
708 	int r;
709 
710 	if (ifp->if_flags & IFF_PROMISC)
711 		return;
712 
713 	r =  vioif_set_promisc(sc, false);
714 	if (r != 0)
715 		aprint_error_dev(self, "resetting promisc mode failed, "
716 				 "errror code %d\n", r);
717 }
718 
719 /*
720  * Interface functions for ifnet
721  */
722 static int
723 vioif_init(struct ifnet *ifp)
724 {
725 	struct vioif_softc *sc = ifp->if_softc;
726 	struct virtio_softc *vsc = sc->sc_virtio;
727 
728 	vioif_stop(ifp, 0);
729 
730 	virtio_reinit_start(vsc);
731 	virtio_negotiate_features(vsc, virtio_features(vsc));
732 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
733 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]);
734 	if (sc->sc_has_ctrl)
735 		virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
736 	virtio_reinit_end(vsc);
737 
738 	if (!sc->sc_deferred_init_done) {
739 		sc->sc_deferred_init_done = 1;
740 		if (sc->sc_has_ctrl)
741 			vioif_deferred_init(sc->sc_dev);
742 	}
743 
744 	/* Have to set false before vioif_populate_rx_mbufs */
745 	sc->sc_stopping = false;
746 
747 	vioif_populate_rx_mbufs(sc);
748 
749 	vioif_update_link_status(sc);
750 	ifp->if_flags |= IFF_RUNNING;
751 	ifp->if_flags &= ~IFF_OACTIVE;
752 	vioif_rx_filter(sc);
753 
754 	return 0;
755 }
756 
757 static void
758 vioif_stop(struct ifnet *ifp, int disable)
759 {
760 	struct vioif_softc *sc = ifp->if_softc;
761 	struct virtio_softc *vsc = sc->sc_virtio;
762 
763 	/* Take the locks to ensure that ongoing TX/RX finish */
764 	VIOIF_TX_LOCK(sc);
765 	VIOIF_RX_LOCK(sc);
766 	sc->sc_stopping = true;
767 	VIOIF_RX_UNLOCK(sc);
768 	VIOIF_TX_UNLOCK(sc);
769 
770 	/* disable interrupts */
771 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
772 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]);
773 	if (sc->sc_has_ctrl)
774 		virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
775 
776 	/* only way to stop I/O and DMA is resetting... */
777 	virtio_reset(vsc);
778 	vioif_rx_deq(sc);
779 	vioif_tx_drain(sc);
780 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
781 	sc->sc_link_active = false;
782 
783 	if (disable)
784 		vioif_rx_drain(sc);
785 }
786 
787 static void
788 vioif_start(struct ifnet *ifp)
789 {
790 	struct vioif_softc *sc = ifp->if_softc;
791 	struct virtio_softc *vsc = sc->sc_virtio;
792 	struct virtqueue *vq = &sc->sc_vq[VQ_TX];
793 	struct mbuf *m;
794 	int queued = 0;
795 
796 	VIOIF_TX_LOCK(sc);
797 
798 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING ||
799 	    !sc->sc_link_active)
800 		goto out;
801 
802 	if (sc->sc_stopping)
803 		goto out;
804 
805 	for (;;) {
806 		int slot, r;
807 
808 		IFQ_DEQUEUE(&ifp->if_snd, m);
809 		if (m == NULL)
810 			break;
811 
812 		r = virtio_enqueue_prep(vsc, vq, &slot);
813 		if (r == EAGAIN) {
814 			ifp->if_flags |= IFF_OACTIVE;
815 			m_freem(m);
816 			break;
817 		}
818 		if (r != 0)
819 			panic("enqueue_prep for a tx buffer");
820 
821 		r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
822 					 sc->sc_tx_dmamaps[slot],
823 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
824 		if (r != 0) {
825 			/* maybe just too fragmented */
826 			struct mbuf *newm;
827 
828 			newm = m_defrag(m, M_NOWAIT);
829 			if (newm == NULL) {
830 				aprint_error_dev(sc->sc_dev,
831 				    "m_defrag() failed\n");
832 				goto skip;
833 			}
834 
835 			m = newm;
836 			r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
837 					 sc->sc_tx_dmamaps[slot],
838 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
839 			if (r != 0) {
840 				aprint_error_dev(sc->sc_dev,
841 	   			    "tx dmamap load failed, error code %d\n",
842 				    r);
843 skip:
844 				m_freem(m);
845 				virtio_enqueue_abort(vsc, vq, slot);
846 				continue;
847 			}
848 		}
849 
850 		/* This should actually never fail */
851 		r = virtio_enqueue_reserve(vsc, vq, slot,
852 					sc->sc_tx_dmamaps[slot]->dm_nsegs + 1);
853 		if (r != 0) {
854 			aprint_error_dev(sc->sc_dev,
855 	   		    "virtio_enqueue_reserve failed, error code %d\n",
856 			    r);
857 			bus_dmamap_unload(virtio_dmat(vsc),
858 					  sc->sc_tx_dmamaps[slot]);
859 			/* slot already freed by virtio_enqueue_reserve */
860 			m_freem(m);
861 			continue;
862 		}
863 
864 		sc->sc_tx_mbufs[slot] = m;
865 
866 		memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
867 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot],
868 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
869 				BUS_DMASYNC_PREWRITE);
870 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot],
871 				0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize,
872 				BUS_DMASYNC_PREWRITE);
873 		virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true);
874 		virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true);
875 		virtio_enqueue_commit(vsc, vq, slot, false);
876 
877 		queued++;
878 		bpf_mtap(ifp, m);
879 	}
880 
881 	if (queued > 0) {
882 		virtio_enqueue_commit(vsc, vq, -1, true);
883 		ifp->if_timer = 5;
884 	}
885 
886 out:
887 	VIOIF_TX_UNLOCK(sc);
888 }
889 
890 static int
891 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
892 {
893 	int s, r;
894 
895 	s = splnet();
896 
897 	r = ether_ioctl(ifp, cmd, data);
898 	if ((r == 0 && cmd == SIOCSIFFLAGS) ||
899 	    (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
900 		if (ifp->if_flags & IFF_RUNNING)
901 			r = vioif_rx_filter(ifp->if_softc);
902 		else
903 			r = 0;
904 	}
905 
906 	splx(s);
907 
908 	return r;
909 }
910 
911 void
912 vioif_watchdog(struct ifnet *ifp)
913 {
914 	struct vioif_softc *sc = ifp->if_softc;
915 
916 	if (ifp->if_flags & IFF_RUNNING)
917 		vioif_tx_vq_done(&sc->sc_vq[VQ_TX]);
918 }
919 
920 
921 /*
922  * Recieve implementation
923  */
924 /* allocate and initialize a mbuf for recieve */
925 static int
926 vioif_add_rx_mbuf(struct vioif_softc *sc, int i)
927 {
928 	struct mbuf *m;
929 	int r;
930 
931 	MGETHDR(m, M_DONTWAIT, MT_DATA);
932 	if (m == NULL)
933 		return ENOBUFS;
934 	MCLGET(m, M_DONTWAIT);
935 	if ((m->m_flags & M_EXT) == 0) {
936 		m_freem(m);
937 		return ENOBUFS;
938 	}
939 	sc->sc_rx_mbufs[i] = m;
940 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
941 	r = bus_dmamap_load_mbuf(virtio_dmat(sc->sc_virtio),
942 				 sc->sc_rx_dmamaps[i],
943 				 m, BUS_DMA_READ|BUS_DMA_NOWAIT);
944 	if (r) {
945 		m_freem(m);
946 		sc->sc_rx_mbufs[i] = 0;
947 		return r;
948 	}
949 
950 	return 0;
951 }
952 
953 /* free a mbuf for recieve */
954 static void
955 vioif_free_rx_mbuf(struct vioif_softc *sc, int i)
956 {
957 	bus_dmamap_unload(virtio_dmat(sc->sc_virtio), sc->sc_rx_dmamaps[i]);
958 	m_freem(sc->sc_rx_mbufs[i]);
959 	sc->sc_rx_mbufs[i] = NULL;
960 }
961 
962 /* add mbufs for all the empty recieve slots */
963 static void
964 vioif_populate_rx_mbufs(struct vioif_softc *sc)
965 {
966 	VIOIF_RX_LOCK(sc);
967 	vioif_populate_rx_mbufs_locked(sc);
968 	VIOIF_RX_UNLOCK(sc);
969 }
970 
971 static void
972 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc)
973 {
974 	struct virtio_softc *vsc = sc->sc_virtio;
975 	int i, r, ndone = 0;
976 	struct virtqueue *vq = &sc->sc_vq[VQ_RX];
977 
978 	KASSERT(VIOIF_RX_LOCKED(sc));
979 
980 	if (sc->sc_stopping)
981 		return;
982 
983 	for (i = 0; i < vq->vq_num; i++) {
984 		int slot;
985 		r = virtio_enqueue_prep(vsc, vq, &slot);
986 		if (r == EAGAIN)
987 			break;
988 		if (r != 0)
989 			panic("enqueue_prep for rx buffers");
990 		if (sc->sc_rx_mbufs[slot] == NULL) {
991 			r = vioif_add_rx_mbuf(sc, slot);
992 			if (r != 0) {
993 				printf("%s: rx mbuf allocation failed, "
994 				       "error code %d\n",
995 				       device_xname(sc->sc_dev), r);
996 				break;
997 			}
998 		}
999 		r = virtio_enqueue_reserve(vsc, vq, slot,
1000 					sc->sc_rx_dmamaps[slot]->dm_nsegs + 1);
1001 		if (r != 0) {
1002 			vioif_free_rx_mbuf(sc, slot);
1003 			break;
1004 		}
1005 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot],
1006 			0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
1007 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot],
1008 			0, MCLBYTES, BUS_DMASYNC_PREREAD);
1009 		virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false);
1010 		virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false);
1011 		virtio_enqueue_commit(vsc, vq, slot, false);
1012 		ndone++;
1013 	}
1014 	if (ndone > 0)
1015 		virtio_enqueue_commit(vsc, vq, -1, true);
1016 }
1017 
1018 /* dequeue recieved packets */
1019 static int
1020 vioif_rx_deq(struct vioif_softc *sc)
1021 {
1022 	int r;
1023 
1024 	KASSERT(sc->sc_stopping);
1025 
1026 	VIOIF_RX_LOCK(sc);
1027 	r = vioif_rx_deq_locked(sc);
1028 	VIOIF_RX_UNLOCK(sc);
1029 
1030 	return r;
1031 }
1032 
1033 /* dequeue recieved packets */
1034 static int
1035 vioif_rx_deq_locked(struct vioif_softc *sc)
1036 {
1037 	struct virtio_softc *vsc = sc->sc_virtio;
1038 	struct virtqueue *vq = &sc->sc_vq[VQ_RX];
1039 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1040 	struct mbuf *m;
1041 	int r = 0;
1042 	int slot, len;
1043 
1044 	KASSERT(VIOIF_RX_LOCKED(sc));
1045 
1046 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1047 		len -= sizeof(struct virtio_net_hdr);
1048 		r = 1;
1049 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot],
1050 				0, sizeof(struct virtio_net_hdr),
1051 				BUS_DMASYNC_POSTREAD);
1052 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot],
1053 				0, MCLBYTES,
1054 				BUS_DMASYNC_POSTREAD);
1055 		m = sc->sc_rx_mbufs[slot];
1056 		KASSERT(m != NULL);
1057 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot]);
1058 		sc->sc_rx_mbufs[slot] = 0;
1059 		virtio_dequeue_commit(vsc, vq, slot);
1060 		m_set_rcvif(m, ifp);
1061 		m->m_len = m->m_pkthdr.len = len;
1062 
1063 		VIOIF_RX_UNLOCK(sc);
1064 		if_percpuq_enqueue(ifp->if_percpuq, m);
1065 		VIOIF_RX_LOCK(sc);
1066 
1067 		if (sc->sc_stopping)
1068 			break;
1069 	}
1070 
1071 	return r;
1072 }
1073 
1074 /* rx interrupt; call _dequeue above and schedule a softint */
1075 static int
1076 vioif_rx_vq_done(struct virtqueue *vq)
1077 {
1078 	struct virtio_softc *vsc = vq->vq_owner;
1079 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1080 	int r = 0;
1081 
1082 #ifdef VIOIF_SOFTINT_INTR
1083 	KASSERT(!cpu_intr_p());
1084 #endif
1085 
1086 	VIOIF_RX_LOCK(sc);
1087 
1088 	if (sc->sc_stopping)
1089 		goto out;
1090 
1091 	r = vioif_rx_deq_locked(sc);
1092 	if (r)
1093 #ifdef VIOIF_SOFTINT_INTR
1094 		vioif_populate_rx_mbufs_locked(sc);
1095 #else
1096 		softint_schedule(sc->sc_rx_softint);
1097 #endif
1098 
1099 out:
1100 	VIOIF_RX_UNLOCK(sc);
1101 	return r;
1102 }
1103 
1104 /* softint: enqueue recieve requests for new incoming packets */
1105 static void
1106 vioif_rx_softint(void *arg)
1107 {
1108 	struct vioif_softc *sc = arg;
1109 
1110 	vioif_populate_rx_mbufs(sc);
1111 }
1112 
1113 /* free all the mbufs; called from if_stop(disable) */
1114 static void
1115 vioif_rx_drain(struct vioif_softc *sc)
1116 {
1117 	struct virtqueue *vq = &sc->sc_vq[VQ_RX];
1118 	int i;
1119 
1120 	for (i = 0; i < vq->vq_num; i++) {
1121 		if (sc->sc_rx_mbufs[i] == NULL)
1122 			continue;
1123 		vioif_free_rx_mbuf(sc, i);
1124 	}
1125 }
1126 
1127 
1128 /*
1129  * Transmition implementation
1130  */
1131 /* actual transmission is done in if_start */
1132 /* tx interrupt; dequeue and free mbufs */
1133 /*
1134  * tx interrupt is actually disabled; this should be called upon
1135  * tx vq full and watchdog
1136  */
1137 static int
1138 vioif_tx_vq_done(struct virtqueue *vq)
1139 {
1140 	struct virtio_softc *vsc = vq->vq_owner;
1141 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1142 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1143 	int r = 0;
1144 
1145 	VIOIF_TX_LOCK(sc);
1146 
1147 	if (sc->sc_stopping)
1148 		goto out;
1149 
1150 	r = vioif_tx_vq_done_locked(vq);
1151 
1152 out:
1153 	VIOIF_TX_UNLOCK(sc);
1154 	if (r)
1155 		if_schedule_deferred_start(ifp);
1156 	return r;
1157 }
1158 
1159 static int
1160 vioif_tx_vq_done_locked(struct virtqueue *vq)
1161 {
1162 	struct virtio_softc *vsc = vq->vq_owner;
1163 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1164 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1165 	struct mbuf *m;
1166 	int r = 0;
1167 	int slot, len;
1168 
1169 	KASSERT(VIOIF_TX_LOCKED(sc));
1170 
1171 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1172 		r++;
1173 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot],
1174 				0, sizeof(struct virtio_net_hdr),
1175 				BUS_DMASYNC_POSTWRITE);
1176 		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot],
1177 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
1178 				BUS_DMASYNC_POSTWRITE);
1179 		m = sc->sc_tx_mbufs[slot];
1180 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot]);
1181 		sc->sc_tx_mbufs[slot] = 0;
1182 		virtio_dequeue_commit(vsc, vq, slot);
1183 		ifp->if_opackets++;
1184 		m_freem(m);
1185 	}
1186 
1187 	if (r)
1188 		ifp->if_flags &= ~IFF_OACTIVE;
1189 	return r;
1190 }
1191 
1192 /* free all the mbufs already put on vq; called from if_stop(disable) */
1193 static void
1194 vioif_tx_drain(struct vioif_softc *sc)
1195 {
1196 	struct virtio_softc *vsc = sc->sc_virtio;
1197 	struct virtqueue *vq = &sc->sc_vq[VQ_TX];
1198 	int i;
1199 
1200 	KASSERT(sc->sc_stopping);
1201 
1202 	for (i = 0; i < vq->vq_num; i++) {
1203 		if (sc->sc_tx_mbufs[i] == NULL)
1204 			continue;
1205 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[i]);
1206 		m_freem(sc->sc_tx_mbufs[i]);
1207 		sc->sc_tx_mbufs[i] = NULL;
1208 	}
1209 }
1210 
1211 /*
1212  * Control vq
1213  */
1214 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1215 static int
1216 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
1217 {
1218 	struct virtio_softc *vsc = sc->sc_virtio;
1219 	struct virtqueue *vq = &sc->sc_vq[VQ_CTRL];
1220 	int r, slot;
1221 
1222 	if (!sc->sc_has_ctrl)
1223 		return ENOTSUP;
1224 
1225 	mutex_enter(&sc->sc_ctrl_wait_lock);
1226 	while (sc->sc_ctrl_inuse != FREE)
1227 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1228 	sc->sc_ctrl_inuse = INUSE;
1229 	mutex_exit(&sc->sc_ctrl_wait_lock);
1230 
1231 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX;
1232 	sc->sc_ctrl_cmd->command = cmd;
1233 	sc->sc_ctrl_rx->onoff = onoff;
1234 
1235 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap,
1236 			0, sizeof(struct virtio_net_ctrl_cmd),
1237 			BUS_DMASYNC_PREWRITE);
1238 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap,
1239 			0, sizeof(struct virtio_net_ctrl_rx),
1240 			BUS_DMASYNC_PREWRITE);
1241 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap,
1242 			0, sizeof(struct virtio_net_ctrl_status),
1243 			BUS_DMASYNC_PREREAD);
1244 
1245 	r = virtio_enqueue_prep(vsc, vq, &slot);
1246 	if (r != 0)
1247 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1248 	r = virtio_enqueue_reserve(vsc, vq, slot, 3);
1249 	if (r != 0)
1250 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1251 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
1252 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true);
1253 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
1254 	virtio_enqueue_commit(vsc, vq, slot, true);
1255 
1256 	/* wait for done */
1257 	mutex_enter(&sc->sc_ctrl_wait_lock);
1258 	while (sc->sc_ctrl_inuse != DONE)
1259 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1260 	mutex_exit(&sc->sc_ctrl_wait_lock);
1261 	/* already dequeueued */
1262 
1263 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0,
1264 			sizeof(struct virtio_net_ctrl_cmd),
1265 			BUS_DMASYNC_POSTWRITE);
1266 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap, 0,
1267 			sizeof(struct virtio_net_ctrl_rx),
1268 			BUS_DMASYNC_POSTWRITE);
1269 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0,
1270 			sizeof(struct virtio_net_ctrl_status),
1271 			BUS_DMASYNC_POSTREAD);
1272 
1273 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
1274 		r = 0;
1275 	else {
1276 		printf("%s: failed setting rx mode\n",
1277 		       device_xname(sc->sc_dev));
1278 		r = EIO;
1279 	}
1280 
1281 	mutex_enter(&sc->sc_ctrl_wait_lock);
1282 	sc->sc_ctrl_inuse = FREE;
1283 	cv_signal(&sc->sc_ctrl_wait);
1284 	mutex_exit(&sc->sc_ctrl_wait_lock);
1285 
1286 	return r;
1287 }
1288 
1289 static int
1290 vioif_set_promisc(struct vioif_softc *sc, bool onoff)
1291 {
1292 	int r;
1293 
1294 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
1295 
1296 	return r;
1297 }
1298 
1299 static int
1300 vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
1301 {
1302 	int r;
1303 
1304 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
1305 
1306 	return r;
1307 }
1308 
1309 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
1310 static int
1311 vioif_set_rx_filter(struct vioif_softc *sc)
1312 {
1313 	/* filter already set in sc_ctrl_mac_tbl */
1314 	struct virtio_softc *vsc = sc->sc_virtio;
1315 	struct virtqueue *vq = &sc->sc_vq[VQ_CTRL];
1316 	int r, slot;
1317 
1318 	if (!sc->sc_has_ctrl)
1319 		return ENOTSUP;
1320 
1321 	mutex_enter(&sc->sc_ctrl_wait_lock);
1322 	while (sc->sc_ctrl_inuse != FREE)
1323 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1324 	sc->sc_ctrl_inuse = INUSE;
1325 	mutex_exit(&sc->sc_ctrl_wait_lock);
1326 
1327 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC;
1328 	sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1329 
1330 	r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap,
1331 			    sc->sc_ctrl_mac_tbl_uc,
1332 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
1333 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
1334 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1335 	if (r) {
1336 		printf("%s: control command dmamap load failed, "
1337 		       "error code %d\n", device_xname(sc->sc_dev), r);
1338 		goto out;
1339 	}
1340 	r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap,
1341 			    sc->sc_ctrl_mac_tbl_mc,
1342 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
1343 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
1344 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1345 	if (r) {
1346 		printf("%s: control command dmamap load failed, "
1347 		       "error code %d\n", device_xname(sc->sc_dev), r);
1348 		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap);
1349 		goto out;
1350 	}
1351 
1352 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap,
1353 			0, sizeof(struct virtio_net_ctrl_cmd),
1354 			BUS_DMASYNC_PREWRITE);
1355 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0,
1356 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1357 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
1358 			BUS_DMASYNC_PREWRITE);
1359 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0,
1360 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1361 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
1362 			BUS_DMASYNC_PREWRITE);
1363 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap,
1364 			0, sizeof(struct virtio_net_ctrl_status),
1365 			BUS_DMASYNC_PREREAD);
1366 
1367 	r = virtio_enqueue_prep(vsc, vq, &slot);
1368 	if (r != 0)
1369 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1370 	r = virtio_enqueue_reserve(vsc, vq, slot, 4);
1371 	if (r != 0)
1372 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1373 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
1374 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true);
1375 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true);
1376 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
1377 	virtio_enqueue_commit(vsc, vq, slot, true);
1378 
1379 	/* wait for done */
1380 	mutex_enter(&sc->sc_ctrl_wait_lock);
1381 	while (sc->sc_ctrl_inuse != DONE)
1382 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
1383 	mutex_exit(&sc->sc_ctrl_wait_lock);
1384 	/* already dequeueued */
1385 
1386 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0,
1387 			sizeof(struct virtio_net_ctrl_cmd),
1388 			BUS_DMASYNC_POSTWRITE);
1389 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0,
1390 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1391 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
1392 			BUS_DMASYNC_POSTWRITE);
1393 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0,
1394 			(sizeof(struct virtio_net_ctrl_mac_tbl)
1395 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
1396 			BUS_DMASYNC_POSTWRITE);
1397 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0,
1398 			sizeof(struct virtio_net_ctrl_status),
1399 			BUS_DMASYNC_POSTREAD);
1400 	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap);
1401 	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap);
1402 
1403 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
1404 		r = 0;
1405 	else {
1406 		printf("%s: failed setting rx filter\n",
1407 		       device_xname(sc->sc_dev));
1408 		r = EIO;
1409 	}
1410 
1411 out:
1412 	mutex_enter(&sc->sc_ctrl_wait_lock);
1413 	sc->sc_ctrl_inuse = FREE;
1414 	cv_signal(&sc->sc_ctrl_wait);
1415 	mutex_exit(&sc->sc_ctrl_wait_lock);
1416 
1417 	return r;
1418 }
1419 
1420 /* ctrl vq interrupt; wake up the command issuer */
1421 static int
1422 vioif_ctrl_vq_done(struct virtqueue *vq)
1423 {
1424 	struct virtio_softc *vsc = vq->vq_owner;
1425 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1426 	int r, slot;
1427 
1428 	r = virtio_dequeue(vsc, vq, &slot, NULL);
1429 	if (r == ENOENT)
1430 		return 0;
1431 	virtio_dequeue_commit(vsc, vq, slot);
1432 
1433 	mutex_enter(&sc->sc_ctrl_wait_lock);
1434 	sc->sc_ctrl_inuse = DONE;
1435 	cv_signal(&sc->sc_ctrl_wait);
1436 	mutex_exit(&sc->sc_ctrl_wait_lock);
1437 
1438 	return 1;
1439 }
1440 
1441 /*
1442  * If IFF_PROMISC requested,  set promiscuous
1443  * If multicast filter small enough (<=MAXENTRIES) set rx filter
1444  * If large multicast filter exist use ALLMULTI
1445  */
1446 /*
1447  * If setting rx filter fails fall back to ALLMULTI
1448  * If ALLMULTI fails fall back to PROMISC
1449  */
1450 static int
1451 vioif_rx_filter(struct vioif_softc *sc)
1452 {
1453 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1454 	struct ether_multi *enm;
1455 	struct ether_multistep step;
1456 	int nentries;
1457 	int promisc = 0, allmulti = 0, rxfilter = 0;
1458 	int r;
1459 
1460 	if (!sc->sc_has_ctrl) {	/* no ctrl vq; always promisc */
1461 		ifp->if_flags |= IFF_PROMISC;
1462 		return 0;
1463 	}
1464 
1465 	if (ifp->if_flags & IFF_PROMISC) {
1466 		promisc = 1;
1467 		goto set;
1468 	}
1469 
1470 	nentries = -1;
1471 	ETHER_LOCK(&sc->sc_ethercom);
1472 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1473 	while (nentries++, enm != NULL) {
1474 		if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
1475 			allmulti = 1;
1476 			goto set_unlock;
1477 		}
1478 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1479 			   ETHER_ADDR_LEN)) {
1480 			allmulti = 1;
1481 			goto set_unlock;
1482 		}
1483 		memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries],
1484 		       enm->enm_addrlo, ETHER_ADDR_LEN);
1485 		ETHER_NEXT_MULTI(step, enm);
1486 	}
1487 	rxfilter = 1;
1488 
1489 set_unlock:
1490 	ETHER_UNLOCK(&sc->sc_ethercom);
1491 
1492 set:
1493 	if (rxfilter) {
1494 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
1495 		sc->sc_ctrl_mac_tbl_mc->nentries = nentries;
1496 		r = vioif_set_rx_filter(sc);
1497 		if (r != 0) {
1498 			rxfilter = 0;
1499 			allmulti = 1; /* fallback */
1500 		}
1501 	} else {
1502 		/* remove rx filter */
1503 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
1504 		sc->sc_ctrl_mac_tbl_mc->nentries = 0;
1505 		r = vioif_set_rx_filter(sc);
1506 		/* what to do on failure? */
1507 	}
1508 	if (allmulti) {
1509 		r = vioif_set_allmulti(sc, true);
1510 		if (r != 0) {
1511 			allmulti = 0;
1512 			promisc = 1; /* fallback */
1513 		}
1514 	} else {
1515 		r = vioif_set_allmulti(sc, false);
1516 		/* what to do on failure? */
1517 	}
1518 	if (promisc) {
1519 		r = vioif_set_promisc(sc, true);
1520 	} else {
1521 		r = vioif_set_promisc(sc, false);
1522 	}
1523 
1524 	return r;
1525 }
1526 
1527 static bool
1528 vioif_is_link_up(struct vioif_softc *sc)
1529 {
1530 	struct virtio_softc *vsc = sc->sc_virtio;
1531 	uint16_t status;
1532 
1533 	if (virtio_features(vsc) & VIRTIO_NET_F_STATUS)
1534 		status = virtio_read_device_config_2(vsc,
1535 		    VIRTIO_NET_CONFIG_STATUS);
1536 	else
1537 		status = VIRTIO_NET_S_LINK_UP;
1538 
1539 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
1540 }
1541 
1542 /* change link status */
1543 static void
1544 vioif_update_link_status(struct vioif_softc *sc)
1545 {
1546 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1547 	bool active, changed;
1548 	int link;
1549 
1550 	active = vioif_is_link_up(sc);
1551 	changed = false;
1552 
1553 	VIOIF_TX_LOCK(sc);
1554 	if (active) {
1555 		if (!sc->sc_link_active)
1556 			changed = true;
1557 
1558 		link = LINK_STATE_UP;
1559 		sc->sc_link_active = true;
1560 	} else {
1561 		if (sc->sc_link_active)
1562 			changed = true;
1563 
1564 		link = LINK_STATE_DOWN;
1565 		sc->sc_link_active = false;
1566 	}
1567 	VIOIF_TX_UNLOCK(sc);
1568 
1569 	if (changed)
1570 		if_link_state_change(ifp, link);
1571 }
1572 
1573 static int
1574 vioif_config_change(struct virtio_softc *vsc)
1575 {
1576 	struct vioif_softc *sc = device_private(virtio_child(vsc));
1577 
1578 #ifdef VIOIF_SOFTINT_INTR
1579 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1580 #endif
1581 
1582 #ifdef VIOIF_SOFTINT_INTR
1583 	KASSERT(!cpu_intr_p());
1584 	vioif_update_link_status(sc);
1585 	vioif_start(ifp);
1586 #else
1587 	softint_schedule(sc->sc_ctl_softint);
1588 #endif
1589 
1590 	return 0;
1591 }
1592 
1593 static void
1594 vioif_ctl_softint(void *arg)
1595 {
1596 	struct vioif_softc *sc = arg;
1597 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1598 
1599 	vioif_update_link_status(sc);
1600 	vioif_start(ifp);
1601 }
1602 
1603 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
1604 
1605 #ifdef _MODULE
1606 #include "ioconf.c"
1607 #endif
1608 
1609 static int
1610 if_vioif_modcmd(modcmd_t cmd, void *opaque)
1611 {
1612 	int error = 0;
1613 
1614 #ifdef _MODULE
1615 	switch (cmd) {
1616 	case MODULE_CMD_INIT:
1617 		error = config_init_component(cfdriver_ioconf_if_vioif,
1618 		    cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
1619 		break;
1620 	case MODULE_CMD_FINI:
1621 		error = config_fini_component(cfdriver_ioconf_if_vioif,
1622 		    cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
1623 		break;
1624 	default:
1625 		error = ENOTTY;
1626 		break;
1627 	}
1628 #endif
1629 
1630 	return error;
1631 }
1632