xref: /openbsd-src/sys/dev/pv/if_xnf.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: if_xnf.c,v 1.39 2016/09/13 10:16:22 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2015, 2016 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "xen.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/atomic.h>
26 #include <sys/device.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/pool.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/task.h>
35 #include <sys/timeout.h>
36 
37 #include <machine/bus.h>
38 
39 #include <dev/pv/xenreg.h>
40 #include <dev/pv/xenvar.h>
41 
42 #include <net/if.h>
43 #include <net/if_media.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 
48 #ifdef INET6
49 #include <netinet/ip6.h>
50 #endif
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 
57 /*
58  * Rx ring
59  */
60 
61 struct xnf_rx_req {
62 	uint16_t		 rxq_id;
63 	uint16_t		 rxq_pad;
64 	uint32_t		 rxq_ref;
65 } __packed;
66 
67 struct xnf_rx_rsp {
68 	uint16_t		 rxp_id;
69 	uint16_t		 rxp_offset;
70 	uint16_t		 rxp_flags;
71 #define  XNF_RXF_CSUM_VALID	  0x0001
72 #define  XNF_RXF_CSUM_BLANK	  0x0002
73 #define  XNF_RXF_CHUNK		  0x0004
74 #define  XNF_RXF_MGMT		  0x0008
75 	int16_t			 rxp_status;
76 } __packed;
77 
78 union xnf_rx_desc {
79 	struct xnf_rx_req	 rxd_req;
80 	struct xnf_rx_rsp	 rxd_rsp;
81 } __packed;
82 
83 #define XNF_RX_DESC		256
84 #define XNF_MCLEN		PAGE_SIZE
85 #define XNF_RX_MIN		32
86 
87 struct xnf_rx_ring {
88 	volatile uint32_t	 rxr_prod;
89 	volatile uint32_t	 rxr_prod_event;
90 	volatile uint32_t	 rxr_cons;
91 	volatile uint32_t	 rxr_cons_event;
92 	uint32_t		 rxr_reserved[12];
93 	union xnf_rx_desc	 rxr_desc[XNF_RX_DESC];
94 } __packed;
95 
96 
97 /*
98  * Tx ring
99  */
100 
101 struct xnf_tx_req {
102 	uint32_t		 txq_ref;
103 	uint16_t		 txq_offset;
104 	uint16_t		 txq_flags;
105 #define  XNF_TXF_CSUM_BLANK	  0x0001
106 #define  XNF_TXF_CSUM_VALID	  0x0002
107 #define  XNF_TXF_CHUNK		  0x0004
108 #define  XNF_TXF_ETXRA		  0x0008
109 	uint16_t		 txq_id;
110 	uint16_t		 txq_size;
111 } __packed;
112 
113 struct xnf_tx_rsp {
114 	uint16_t		 txp_id;
115 	int16_t			 txp_status;
116 } __packed;
117 
118 union xnf_tx_desc {
119 	struct xnf_tx_req	 txd_req;
120 	struct xnf_tx_rsp	 txd_rsp;
121 } __packed;
122 
123 #define XNF_TX_DESC		256
124 #define XNF_TX_FRAG		18
125 
126 struct xnf_tx_ring {
127 	volatile uint32_t	 txr_prod;
128 	volatile uint32_t	 txr_prod_event;
129 	volatile uint32_t	 txr_cons;
130 	volatile uint32_t	 txr_cons_event;
131 	uint32_t		 txr_reserved[12];
132 	union xnf_tx_desc	 txr_desc[XNF_TX_DESC];
133 } __packed;
134 
135 
136 /* Management frame, "extra info" in Xen parlance */
137 struct xnf_mgmt {
138 	uint8_t			 mg_type;
139 #define  XNF_MGMT_MCAST_ADD	2
140 #define  XNF_MGMT_MCAST_DEL	3
141 	uint8_t			 mg_flags;
142 	union {
143 		uint8_t		 mgu_mcaddr[ETHER_ADDR_LEN];
144 		uint16_t	 mgu_pad[3];
145 	} u;
146 #define mg_mcaddr		 u.mgu_mcaddr
147 } __packed;
148 
149 
150 struct xnf_softc {
151 	struct device		 sc_dev;
152 	struct xen_attach_args	 sc_xa;
153 	struct xen_softc	*sc_xen;
154 	bus_dma_tag_t		 sc_dmat;
155 	int			 sc_domid;
156 
157 	struct arpcom		 sc_ac;
158 	struct ifmedia		 sc_media;
159 
160 	xen_intr_handle_t	 sc_xih;
161 
162 	int			 sc_caps;
163 #define  XNF_CAP_SG		  0x0001
164 #define  XNF_CAP_CSUM4		  0x0002
165 #define  XNF_CAP_CSUM6		  0x0004
166 #define  XNF_CAP_MCAST		  0x0008
167 #define  XNF_CAP_SPLIT		  0x0010
168 #define  XNF_CAP_MULTIQ		  0x0020
169 
170 	/* Rx ring */
171 	struct xnf_rx_ring	*sc_rx_ring;
172 	int			 sc_rx_cons;
173 	bus_dmamap_t		 sc_rx_rmap;		  /* map for the ring */
174 	bus_dma_segment_t	 sc_rx_seg;
175 	uint32_t		 sc_rx_ref;		  /* grant table ref */
176 	struct mbuf		*sc_rx_buf[XNF_RX_DESC];
177 	bus_dmamap_t		 sc_rx_dmap[XNF_RX_DESC]; /* maps for packets */
178 	struct mbuf		*sc_rx_cbuf[2];	  	  /* chain handling */
179 
180 	/* Tx ring */
181 	struct xnf_tx_ring	*sc_tx_ring;
182 	int			 sc_tx_cons;
183 	bus_dmamap_t		 sc_tx_rmap;		  /* map for the ring */
184 	bus_dma_segment_t	 sc_tx_seg;
185 	uint32_t		 sc_tx_ref;		  /* grant table ref */
186 	int			 sc_tx_frags;
187 	struct mbuf		*sc_tx_buf[XNF_TX_DESC];
188 	bus_dmamap_t		 sc_tx_dmap[XNF_TX_DESC]; /* maps for packets */
189 };
190 
191 int	xnf_match(struct device *, void *, void *);
192 void	xnf_attach(struct device *, struct device *, void *);
193 int	xnf_lladdr(struct xnf_softc *);
194 int	xnf_ioctl(struct ifnet *, u_long, caddr_t);
195 int	xnf_media_change(struct ifnet *);
196 void	xnf_media_status(struct ifnet *, struct ifmediareq *);
197 int	xnf_iff(struct xnf_softc *);
198 void	xnf_init(struct xnf_softc *);
199 void	xnf_stop(struct xnf_softc *);
200 void	xnf_start(struct ifnet *);
201 int	xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
202 void	xnf_intr(void *);
203 void	xnf_watchdog(struct ifnet *);
204 void	xnf_txeof(struct xnf_softc *);
205 void	xnf_rxeof(struct xnf_softc *);
206 int	xnf_rx_ring_fill(struct xnf_softc *);
207 int	xnf_rx_ring_create(struct xnf_softc *);
208 void	xnf_rx_ring_drain(struct xnf_softc *);
209 void	xnf_rx_ring_destroy(struct xnf_softc *);
210 int	xnf_tx_ring_create(struct xnf_softc *);
211 void	xnf_tx_ring_drain(struct xnf_softc *);
212 void	xnf_tx_ring_destroy(struct xnf_softc *);
213 int	xnf_capabilities(struct xnf_softc *sc);
214 int	xnf_init_backend(struct xnf_softc *);
215 
216 struct cfdriver xnf_cd = {
217 	NULL, "xnf", DV_IFNET
218 };
219 
220 const struct cfattach xnf_ca = {
221 	sizeof(struct xnf_softc), xnf_match, xnf_attach
222 };
223 
224 int
225 xnf_match(struct device *parent, void *match, void *aux)
226 {
227 	struct xen_attach_args *xa = aux;
228 
229 	if (strcmp("vif", xa->xa_name))
230 		return (0);
231 
232 	return (1);
233 }
234 
235 void
236 xnf_attach(struct device *parent, struct device *self, void *aux)
237 {
238 	struct xen_attach_args *xa = aux;
239 	struct xnf_softc *sc = (struct xnf_softc *)self;
240 	struct ifnet *ifp = &sc->sc_ac.ac_if;
241 
242 	sc->sc_xa = *xa;
243 	sc->sc_xen = xa->xa_parent;
244 	sc->sc_dmat = xa->xa_dmat;
245 	sc->sc_domid = xa->xa_domid;
246 
247 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
248 
249 	if (xnf_lladdr(sc)) {
250 		printf(": failed to obtain MAC address\n");
251 		return;
252 	}
253 
254 	if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
255 	    ifp->if_xname)) {
256 		printf(": failed to establish an interrupt\n");
257 		return;
258 	}
259 	xen_intr_mask(sc->sc_xih);
260 
261 	printf(": backend %d, event channel %u, address %s\n", sc->sc_domid,
262 	    sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
263 
264 	if (xnf_capabilities(sc)) {
265 		xen_intr_disestablish(sc->sc_xih);
266 		return;
267 	}
268 
269 	if (sc->sc_caps & XNF_CAP_SG)
270 		ifp->if_hardmtu = 9000;
271 
272 	if (xnf_rx_ring_create(sc)) {
273 		xen_intr_disestablish(sc->sc_xih);
274 		return;
275 	}
276 	if (xnf_tx_ring_create(sc)) {
277 		xen_intr_disestablish(sc->sc_xih);
278 		xnf_rx_ring_destroy(sc);
279 		return;
280 	}
281 	if (xnf_init_backend(sc)) {
282 		xen_intr_disestablish(sc->sc_xih);
283 		xnf_rx_ring_destroy(sc);
284 		xnf_tx_ring_destroy(sc);
285 		return;
286 	}
287 
288 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 	ifp->if_xflags = IFXF_MPSAFE;
290 	ifp->if_ioctl = xnf_ioctl;
291 	ifp->if_start = xnf_start;
292 	ifp->if_watchdog = xnf_watchdog;
293 	ifp->if_softc = sc;
294 
295 	ifp->if_capabilities = IFCAP_VLAN_MTU;
296 	if (sc->sc_caps & XNF_CAP_CSUM4)
297 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
298 	if (sc->sc_caps & XNF_CAP_CSUM6)
299 		ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
300 
301 	IFQ_SET_MAXLEN(&ifp->if_snd, XNF_TX_DESC - 1);
302 
303 	ifmedia_init(&sc->sc_media, IFM_IMASK, xnf_media_change,
304 	    xnf_media_status);
305 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
306 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
307 
308 	if_attach(ifp);
309 	ether_ifattach(ifp);
310 
311 	/* Kick out emulated em's and re's */
312 	sc->sc_xen->sc_flags |= XSF_UNPLUG_NIC;
313 }
314 
315 static int
316 nibble(int ch)
317 {
318 	if (ch >= '0' && ch <= '9')
319 		return (ch - '0');
320 	if (ch >= 'A' && ch <= 'F')
321 		return (10 + ch - 'A');
322 	if (ch >= 'a' && ch <= 'f')
323 		return (10 + ch - 'a');
324 	return (-1);
325 }
326 
327 int
328 xnf_lladdr(struct xnf_softc *sc)
329 {
330 	char enaddr[ETHER_ADDR_LEN];
331 	char mac[32];
332 	int i, j, lo, hi;
333 
334 	if (xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend, "mac",
335 	    mac, sizeof(mac)))
336 		return (-1);
337 
338 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 3) {
339 		if ((hi = nibble(mac[i])) == -1 ||
340 		    (lo = nibble(mac[i+1])) == -1)
341 			return (-1);
342 		enaddr[j++] = hi << 4 | lo;
343 	}
344 
345 	memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
346 	return (0);
347 }
348 
349 int
350 xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
351 {
352 	struct xnf_softc *sc = ifp->if_softc;
353 	struct ifreq *ifr = (struct ifreq *)data;
354 	int s, error = 0;
355 
356 	s = splnet();
357 
358 	switch (command) {
359 	case SIOCSIFADDR:
360 		ifp->if_flags |= IFF_UP;
361 		if (!(ifp->if_flags & IFF_RUNNING))
362 			xnf_init(sc);
363 		break;
364 	case SIOCSIFFLAGS:
365 		if (ifp->if_flags & IFF_UP) {
366 			if (ifp->if_flags & IFF_RUNNING)
367 				error = ENETRESET;
368 			else
369 				xnf_init(sc);
370 		} else {
371 			if (ifp->if_flags & IFF_RUNNING)
372 				xnf_stop(sc);
373 		}
374 		break;
375 	case SIOCGIFMEDIA:
376 	case SIOCSIFMEDIA:
377 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
378 		break;
379 	default:
380 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
381 		break;
382 	}
383 
384 	if (error == ENETRESET) {
385 		if (ifp->if_flags & IFF_RUNNING)
386 			xnf_iff(sc);
387 		error = 0;
388 	}
389 
390 	splx(s);
391 
392 	return (error);
393 }
394 
395 int
396 xnf_media_change(struct ifnet *ifp)
397 {
398 	return (0);
399 }
400 
401 void
402 xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
403 {
404 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
405 	ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
406 }
407 
408 int
409 xnf_iff(struct xnf_softc *sc)
410 {
411 	return (0);
412 }
413 
414 void
415 xnf_init(struct xnf_softc *sc)
416 {
417 	struct ifnet *ifp = &sc->sc_ac.ac_if;
418 
419 	xnf_stop(sc);
420 
421 	xnf_iff(sc);
422 
423 	if (xen_intr_unmask(sc->sc_xih)) {
424 		printf("%s: failed to enable interrupts\n", ifp->if_xname);
425 		xnf_stop(sc);
426 		return;
427 	}
428 
429 	ifp->if_flags |= IFF_RUNNING;
430 	ifq_clr_oactive(&ifp->if_snd);
431 }
432 
433 void
434 xnf_stop(struct xnf_softc *sc)
435 {
436 	struct ifnet *ifp = &sc->sc_ac.ac_if;
437 
438 	ifp->if_flags &= ~IFF_RUNNING;
439 
440 	xen_intr_mask(sc->sc_xih);
441 
442 	ifp->if_timer = 0;
443 
444 	ifq_barrier(&ifp->if_snd);
445 	intr_barrier(&sc->sc_xih);
446 
447 	ifq_clr_oactive(&ifp->if_snd);
448 
449 	if (sc->sc_tx_ring)
450 		xnf_tx_ring_drain(sc);
451 	if (sc->sc_rx_ring)
452 		xnf_rx_ring_drain(sc);
453 }
454 
455 void
456 xnf_start(struct ifnet *ifp)
457 {
458 	struct xnf_softc *sc = ifp->if_softc;
459 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
460 	struct mbuf *m;
461 	int pkts = 0;
462 	uint32_t prod, oprod;
463 
464 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
465 		return;
466 
467 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
468 	    BUS_DMASYNC_POSTREAD);
469 
470 	prod = oprod = txr->txr_prod;
471 
472 	for (;;) {
473 		if ((XNF_TX_DESC - (prod - sc->sc_tx_cons)) <
474 		    sc->sc_tx_frags) {
475 			/* transient */
476 			ifq_set_oactive(&ifp->if_snd);
477 			break;
478 		}
479 		m = ifq_dequeue(&ifp->if_snd);
480 		if (m == NULL)
481 			break;
482 
483 		if (xnf_encap(sc, m, &prod)) {
484 			/* the chain is too large */
485 			ifp->if_oerrors++;
486 			m_freem(m);
487 			continue;
488 		}
489 		ifp->if_opackets++;
490 
491 #if NBPFILTER > 0
492 		if (ifp->if_bpf)
493 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
494 #endif
495 		pkts++;
496 	}
497 	if (pkts > 0) {
498 		txr->txr_prod = prod;
499 		if (txr->txr_cons_event < txr->txr_cons)
500 			txr->txr_cons_event = txr->txr_cons +
501 			    ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
502 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
503 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
504 		if (prod - txr->txr_prod_event < prod - oprod)
505 			xen_intr_signal(sc->sc_xih);
506 		ifp->if_timer = 5;
507 	}
508 }
509 
510 static inline int
511 xnf_fragcount(struct mbuf *m_head)
512 {
513 	struct mbuf *m;
514 	vaddr_t va, va0;
515 	int n = 0;
516 
517 	for (m = m_head; m != NULL; m = m->m_next) {
518 		if (m->m_len == 0)
519 			continue;
520 		     /* start of the buffer */
521 		for (va0 = va = mtod(m, vaddr_t);
522 		     /* does the buffer end on this page? */
523 		     va + (PAGE_SIZE - (va & PAGE_MASK)) < va0 + m->m_len;
524 		     /* move on to the next page */
525 		     va += PAGE_SIZE - (va & PAGE_MASK))
526 			n++;
527 		n++;
528 	}
529 	return (n);
530 }
531 
532 int
533 xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
534 {
535 	struct ifnet *ifp = &sc->sc_ac.ac_if;
536 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
537 	union xnf_tx_desc *txd;
538 	struct mbuf *m;
539 	bus_dmamap_t dmap;
540 	uint32_t oprod = *prod;
541 	int i, id, flags, n;
542 
543 	if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
544 	    m_defrag(m_head, M_DONTWAIT))
545 		goto errout;
546 
547 	for (m = m_head; m != NULL && m->m_len > 0; m = m->m_next) {
548 		i = *prod & (XNF_TX_DESC - 1);
549 		dmap = sc->sc_tx_dmap[i];
550 		txd = &txr->txr_desc[i];
551 		if (sc->sc_tx_buf[i])
552 			panic("%s: cons %u(%u) prod %u next %u seg %d/%d\n",
553 			    ifp->if_xname, txr->txr_cons, sc->sc_tx_cons,
554 			    txr->txr_prod, *prod, *prod - oprod,
555 			    xnf_fragcount(m_head));
556 
557 		flags = (sc->sc_domid << 16) | BUS_DMA_WRITE | BUS_DMA_WAITOK;
558 		if (bus_dmamap_load(sc->sc_dmat, dmap, m->m_data, m->m_len,
559 		    NULL, flags)) {
560 			DPRINTF("%s: failed to load %d bytes @%lu\n",
561 			    sc->sc_dev.dv_xname, m->m_len,
562 			    mtod(m, vaddr_t) & PAGE_MASK);
563 			goto unroll;
564 		}
565 
566 		if (m == m_head) {
567 			if (m->m_pkthdr.csum_flags &
568 			    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
569 				txd->txd_req.txq_flags = XNF_TXF_CSUM_BLANK |
570 				    XNF_TXF_CSUM_VALID;
571 			txd->txd_req.txq_size = m->m_pkthdr.len;
572 		}
573 		for (n = 0; n < dmap->dm_nsegs; n++) {
574 			i = *prod & (XNF_TX_DESC - 1);
575 			txd = &txr->txr_desc[i];
576 			if (sc->sc_tx_buf[i])
577 				panic("%s: cons %u(%u) prod %u next %u "
578 				    "seg %d/%d\n", ifp->if_xname,
579 				    txr->txr_cons, sc->sc_tx_cons,
580 				    txr->txr_prod, *prod, *prod - oprod,
581 				    xnf_fragcount(m_head));
582 
583 			/* Don't overwrite lenght of the very first one */
584 			if (!(m == m_head && n == 0))
585 				txd->txd_req.txq_size = dmap->dm_segs[n].ds_len;
586 			/* The chunk flag will be removed from the last one */
587 			txd->txd_req.txq_flags |= XNF_TXF_CHUNK;
588 			txd->txd_req.txq_ref = dmap->dm_segs[n].ds_addr;
589 			txd->txd_req.txq_offset = dmap->dm_segs[n].ds_offset;
590 			(*prod)++;
591 		}
592 	}
593 	/* Clear the chunk flag from the last segment */
594 	txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK;
595 	sc->sc_tx_buf[i] = m_head;
596 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
597 	    BUS_DMASYNC_PREWRITE);
598 
599 	return (0);
600 
601  unroll:
602 	for (; *prod != oprod; (*prod)--) {
603 		i = (*prod - 1) & (XNF_TX_DESC - 1);
604 		dmap = sc->sc_tx_dmap[i];
605 		txd = &txr->txr_desc[i];
606 
607 		id = txd->txd_rsp.txp_id;
608 		memset(txd, 0, sizeof(*txd));
609 		txd->txd_req.txq_id = id;
610 
611 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
612 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
613 		bus_dmamap_unload(sc->sc_dmat, dmap);
614 
615 		if (sc->sc_tx_buf[i])
616 			sc->sc_tx_buf[i] = NULL;
617 	}
618 
619  errout:
620 	return (ENOBUFS);
621 }
622 
623 void
624 xnf_intr(void *arg)
625 {
626 	struct xnf_softc *sc = arg;
627 	struct ifnet *ifp = &sc->sc_ac.ac_if;
628 
629 	if (ifp->if_flags & IFF_RUNNING) {
630 		xnf_txeof(sc);
631 		xnf_rxeof(sc);
632 	}
633 }
634 
635 void
636 xnf_watchdog(struct ifnet *ifp)
637 {
638 	struct xnf_softc *sc = ifp->if_softc;
639 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
640 
641 	printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
642 	    ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
643 	    txr->txr_prod_event, txr->txr_cons_event);
644 }
645 
646 void
647 xnf_txeof(struct xnf_softc *sc)
648 {
649 	struct ifnet *ifp = &sc->sc_ac.ac_if;
650 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
651 	union xnf_tx_desc *txd;
652 	bus_dmamap_t dmap;
653 	uint32_t cons;
654 	int i, id;
655 
656 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
657 	    BUS_DMASYNC_POSTWRITE);
658 
659 	for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
660 		i = cons & (XNF_TX_DESC - 1);
661 		txd = &txr->txr_desc[i];
662 		dmap = sc->sc_tx_dmap[i];
663 
664 		id = txd->txd_rsp.txp_id;
665 		memset(txd, 0, sizeof(*txd));
666 		txd->txd_req.txq_id = id;
667 
668 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
669 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
670 		bus_dmamap_unload(sc->sc_dmat, dmap);
671 
672 		if (sc->sc_tx_buf[i] != NULL) {
673 			m_freem(sc->sc_tx_buf[i]);
674 			sc->sc_tx_buf[i] = NULL;
675 		}
676 	}
677 
678 	sc->sc_tx_cons = cons;
679 	txr->txr_cons_event = sc->sc_tx_cons +
680 	    ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
681 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
682 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
683 
684 	if (txr->txr_cons == txr->txr_prod)
685 		ifp->if_timer = 0;
686 	if (ifq_is_oactive(&ifp->if_snd))
687 		ifq_restart(&ifp->if_snd);
688 }
689 
690 void
691 xnf_rxeof(struct xnf_softc *sc)
692 {
693 	struct ifnet *ifp = &sc->sc_ac.ac_if;
694 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
695 	union xnf_rx_desc *rxd;
696 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
697 	struct mbuf *fmp = sc->sc_rx_cbuf[0];
698 	struct mbuf *lmp = sc->sc_rx_cbuf[1];
699 	struct mbuf *m;
700 	bus_dmamap_t dmap;
701 	uint32_t cons;
702 	int i, id, flags, len, offset;
703 
704 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
705 	    BUS_DMASYNC_POSTREAD);
706 
707 	for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
708 		i = cons & (XNF_RX_DESC - 1);
709 		rxd = &rxr->rxr_desc[i];
710 		dmap = sc->sc_rx_dmap[i];
711 
712 		len = rxd->rxd_rsp.rxp_status;
713 		flags = rxd->rxd_rsp.rxp_flags;
714 		offset = rxd->rxd_rsp.rxp_offset;
715 		id = rxd->rxd_rsp.rxp_id;
716 		memset(rxd, 0, sizeof(*rxd));
717 		rxd->rxd_req.rxq_id = id;
718 
719 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
720 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
721 		bus_dmamap_unload(sc->sc_dmat, dmap);
722 
723 		m = sc->sc_rx_buf[i];
724 		KASSERT(m != NULL);
725 		sc->sc_rx_buf[i] = NULL;
726 
727 		if (flags & XNF_RXF_MGMT) {
728 			printf("%s: management data present\n",
729 			    ifp->if_xname);
730 			m_freem(m);
731 			continue;
732 		}
733 
734 		if (flags & XNF_RXF_CSUM_VALID)
735 			m->m_pkthdr.csum_flags = M_TCP_CSUM_IN_OK |
736 			    M_UDP_CSUM_IN_OK;
737 
738 		if (len < 0 || (len + offset > PAGE_SIZE)) {
739 			ifp->if_ierrors++;
740 			m_freem(m);
741 			continue;
742 		}
743 
744 		m->m_len = len;
745 		m->m_data += offset;
746 
747 		if (fmp == NULL) {
748 			m->m_pkthdr.len = len;
749 			fmp = m;
750 		} else {
751 			m->m_flags &= ~M_PKTHDR;
752 			lmp->m_next = m;
753 			fmp->m_pkthdr.len += m->m_len;
754 		}
755 		lmp = m;
756 
757 		if (flags & XNF_RXF_CHUNK) {
758 			sc->sc_rx_cbuf[0] = fmp;
759 			sc->sc_rx_cbuf[1] = lmp;
760 			continue;
761 		}
762 
763 		m = fmp;
764 
765 		ml_enqueue(&ml, m);
766 		sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL;
767 	}
768 
769 	sc->sc_rx_cons = cons;
770 	rxr->rxr_cons_event = sc->sc_rx_cons + 1;
771 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
772 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
773 
774 	if (!ml_empty(&ml))
775 		if_input(ifp, &ml);
776 
777 	if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
778 		xen_intr_schedule(sc->sc_xih);
779 }
780 
781 int
782 xnf_rx_ring_fill(struct xnf_softc *sc)
783 {
784 	struct ifnet *ifp = &sc->sc_ac.ac_if;
785 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
786 	bus_dmamap_t dmap;
787 	struct mbuf *m;
788 	uint32_t cons, prod, oprod;
789 	int i, flags, resched = 0;
790 
791 	cons = rxr->rxr_cons;
792 	prod = oprod = rxr->rxr_prod;
793 
794 	while (prod - cons < XNF_RX_DESC) {
795 		i = prod & (XNF_RX_DESC - 1);
796 		if (sc->sc_rx_buf[i])
797 			break;
798 		m = MCLGETI(NULL, M_DONTWAIT, NULL, XNF_MCLEN);
799 		if (m == NULL)
800 			break;
801 		m->m_len = m->m_pkthdr.len = XNF_MCLEN;
802 		dmap = sc->sc_rx_dmap[i];
803 		flags = (sc->sc_domid << 16) | BUS_DMA_READ | BUS_DMA_NOWAIT;
804 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)) {
805 			m_freem(m);
806 			break;
807 		}
808 		sc->sc_rx_buf[i] = m;
809 		rxr->rxr_desc[i].rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
810 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE);
811 		prod++;
812 	}
813 
814 	rxr->rxr_prod = prod;
815 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
816 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
817 
818 	if ((prod - cons < XNF_RX_MIN) && (ifp->if_flags & IFF_RUNNING))
819 		resched = 1;
820 	if (prod - rxr->rxr_prod_event < prod - oprod)
821 		xen_intr_signal(sc->sc_xih);
822 
823 	return (resched);
824 }
825 
826 int
827 xnf_rx_ring_create(struct xnf_softc *sc)
828 {
829 	int i, flags, rsegs;
830 
831 	/* Allocate a page of memory for the ring */
832 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
833 	    &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_WAITOK)) {
834 		printf("%s: failed to allocate memory for the rx ring\n",
835 		    sc->sc_dev.dv_xname);
836 		return (-1);
837 	}
838 	/* Map in the allocated memory into the ring structure */
839 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,
840 	    (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_WAITOK)) {
841 		printf("%s: failed to map memory for the rx ring\n",
842 		    sc->sc_dev.dv_xname);
843 		goto errout;
844 	}
845 	/* Create a map to load the ring memory into */
846 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
847 	    BUS_DMA_WAITOK, &sc->sc_rx_rmap)) {
848 		printf("%s: failed to create a memory map for the rx ring\n",
849 		    sc->sc_dev.dv_xname);
850 		goto errout;
851 	}
852 	/* Load the ring into the ring map to extract the PA */
853 	flags = (sc->sc_domid << 16) | BUS_DMA_WAITOK;
854 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,
855 	    PAGE_SIZE, NULL, flags)) {
856 		printf("%s: failed to load the rx ring map\n",
857 		    sc->sc_dev.dv_xname);
858 		goto errout;
859 	}
860 	sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
861 
862 	sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
863 
864 	for (i = 0; i < XNF_RX_DESC; i++) {
865 		if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,
866 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->sc_rx_dmap[i])) {
867 			printf("%s: failed to create a memory map for the"
868 			    " rx slot %d\n", sc->sc_dev.dv_xname, i);
869 			goto errout;
870 		}
871 		sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
872 	}
873 
874 	xnf_rx_ring_fill(sc);
875 
876 	return (0);
877 
878  errout:
879 	xnf_rx_ring_destroy(sc);
880 	return (-1);
881 }
882 
883 void
884 xnf_rx_ring_drain(struct xnf_softc *sc)
885 {
886 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
887 
888 	if (sc->sc_rx_cons != rxr->rxr_cons)
889 		xnf_rxeof(sc);
890 }
891 
892 void
893 xnf_rx_ring_destroy(struct xnf_softc *sc)
894 {
895 	int i, slots = 0;
896 
897 	for (i = 0; i < XNF_RX_DESC; i++) {
898 		if (sc->sc_rx_buf[i] == NULL)
899 			continue;
900 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,
901 		    BUS_DMASYNC_POSTREAD);
902 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i]);
903 		m_freem(sc->sc_rx_buf[i]);
904 		sc->sc_rx_buf[i] = NULL;
905 		slots++;
906 	}
907 
908 	for (i = 0; i < XNF_RX_DESC; i++) {
909 		if (sc->sc_rx_dmap[i] == NULL)
910 			continue;
911 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i]);
912 		sc->sc_rx_dmap[i] = NULL;
913 	}
914 	if (sc->sc_rx_rmap) {
915 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
916 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
917 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap);
918 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap);
919 	}
920 	if (sc->sc_rx_ring) {
921 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,
922 		    PAGE_SIZE);
923 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1);
924 	}
925 	sc->sc_rx_ring = NULL;
926 	sc->sc_rx_rmap = NULL;
927 	sc->sc_rx_cons = 0;
928 }
929 
930 int
931 xnf_tx_ring_create(struct xnf_softc *sc)
932 {
933 	struct ifnet *ifp = &sc->sc_ac.ac_if;
934 	int i, flags, nsegs, rsegs;
935 	bus_size_t segsz;
936 
937 	sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG ? XNF_TX_FRAG : 1;
938 
939 	/* Allocate a page of memory for the ring */
940 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
941 	    &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_WAITOK)) {
942 		printf("%s: failed to allocate memory for the tx ring\n",
943 		    sc->sc_dev.dv_xname);
944 		return (-1);
945 	}
946 	/* Map in the allocated memory into the ring structure */
947 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,
948 	    (caddr_t *)&sc->sc_tx_ring, BUS_DMA_WAITOK)) {
949 		printf("%s: failed to map memory for the tx ring\n",
950 		    sc->sc_dev.dv_xname);
951 		goto errout;
952 	}
953 	/* Create a map to load the ring memory into */
954 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
955 	    BUS_DMA_WAITOK, &sc->sc_tx_rmap)) {
956 		printf("%s: failed to create a memory map for the tx ring\n",
957 		    sc->sc_dev.dv_xname);
958 		goto errout;
959 	}
960 	/* Load the ring into the ring map to extract the PA */
961 	flags = (sc->sc_domid << 16) | BUS_DMA_WAITOK;
962 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,
963 	    PAGE_SIZE, NULL, flags)) {
964 		printf("%s: failed to load the tx ring map\n",
965 		    sc->sc_dev.dv_xname);
966 		goto errout;
967 	}
968 	sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
969 
970 	sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
971 
972 	if (sc->sc_caps & XNF_CAP_SG) {
973 		nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN) / XNF_MCLEN + 1;
974 		segsz = nsegs * XNF_MCLEN;
975 	} else {
976 		nsegs = 1;
977 		segsz = XNF_MCLEN;
978 	}
979 	for (i = 0; i < XNF_TX_DESC; i++) {
980 		if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,
981 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->sc_tx_dmap[i])) {
982 			printf("%s: failed to create a memory map for the"
983 			    " tx slot %d\n", sc->sc_dev.dv_xname, i);
984 			goto errout;
985 		}
986 		sc->sc_tx_ring->txr_desc[i].txd_req.txq_id = i;
987 	}
988 
989 	return (0);
990 
991  errout:
992 	xnf_tx_ring_destroy(sc);
993 	return (-1);
994 }
995 
996 void
997 xnf_tx_ring_drain(struct xnf_softc *sc)
998 {
999 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
1000 
1001 	if (sc->sc_tx_cons != txr->txr_cons)
1002 		xnf_txeof(sc);
1003 }
1004 
1005 void
1006 xnf_tx_ring_destroy(struct xnf_softc *sc)
1007 {
1008 	int i;
1009 
1010 	for (i = 0; i < XNF_TX_DESC; i++) {
1011 		if (sc->sc_tx_dmap[i] == NULL)
1012 			continue;
1013 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmap[i], 0, 0,
1014 		    BUS_DMASYNC_POSTWRITE);
1015 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_dmap[i]);
1016 		if (sc->sc_tx_buf[i] == NULL)
1017 			continue;
1018 		m_free(sc->sc_tx_buf[i]);
1019 		sc->sc_tx_buf[i] = NULL;
1020 	}
1021 	for (i = 0; i < XNF_TX_DESC; i++) {
1022 		if (sc->sc_tx_dmap[i] == NULL)
1023 			continue;
1024 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmap[i]);
1025 		sc->sc_tx_dmap[i] = NULL;
1026 	}
1027 	if (sc->sc_tx_rmap) {
1028 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
1029 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1030 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
1031 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
1032 	}
1033 	if (sc->sc_tx_ring) {
1034 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,
1035 		    PAGE_SIZE);
1036 		bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1);
1037 	}
1038 	sc->sc_tx_ring = NULL;
1039 	sc->sc_tx_rmap = NULL;
1040 }
1041 
1042 int
1043 xnf_capabilities(struct xnf_softc *sc)
1044 {
1045 	const char *prop;
1046 	char val[32];
1047 	int error;
1048 
1049 	/* Query scatter-gather capability */
1050 	prop = "feature-sg";
1051 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1052 	    prop, val, sizeof(val))) == 0) {
1053 		if (val[0] == '1')
1054 			sc->sc_caps |= XNF_CAP_SG;
1055 	} else if (error != ENOENT)
1056 		goto errout;
1057 
1058 	/* Query IPv4 checksum offloading capability, enabled by default */
1059 	sc->sc_caps |= XNF_CAP_CSUM4;
1060 	prop = "feature-no-csum-offload";
1061 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1062 	    prop, val, sizeof(val))) == 0) {
1063 		if (val[0] == '1')
1064 			sc->sc_caps &= ~XNF_CAP_CSUM4;
1065 	} else if (error != ENOENT)
1066 		goto errout;
1067 
1068 	/* Query IPv6 checksum offloading capability */
1069 	prop = "feature-ipv6-csum-offload";
1070 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1071 	    prop, val, sizeof(val))) == 0) {
1072 		if (val[0] == '1')
1073 			sc->sc_caps |= XNF_CAP_CSUM6;
1074 	} else if (error != ENOENT)
1075 		goto errout;
1076 
1077 	/* Query multicast traffic contol capability */
1078 	prop = "feature-multicast-control";
1079 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1080 	    prop, val, sizeof(val))) == 0) {
1081 		if (val[0] == '1')
1082 			sc->sc_caps |= XNF_CAP_MCAST;
1083 	} else if (error != ENOENT)
1084 		goto errout;
1085 
1086 	/* Query split Rx/Tx event channel capability */
1087 	prop = "feature-split-event-channels";
1088 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1089 	    prop, val, sizeof(val))) == 0) {
1090 		if (val[0] == '1')
1091 			sc->sc_caps |= XNF_CAP_SPLIT;
1092 	} else if (error != ENOENT)
1093 		goto errout;
1094 
1095 	/* Query multiqueue capability */
1096 	prop = "multi-queue-max-queues";
1097 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1098 	    prop, val, sizeof(val))) == 0)
1099 		sc->sc_caps |= XNF_CAP_MULTIQ;
1100 	else if (error != ENOENT)
1101 		goto errout;
1102 
1103 	DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1104 	    "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1105 	return (0);
1106 
1107  errout:
1108 	printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1109 	    prop);
1110 	return (-1);
1111 }
1112 
1113 int
1114 xnf_init_backend(struct xnf_softc *sc)
1115 {
1116 	const char *prop;
1117 	char val[32];
1118 
1119 	/* Plumb the Rx ring */
1120 	prop = "rx-ring-ref";
1121 	snprintf(val, sizeof(val), "%u", sc->sc_rx_ref);
1122 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1123 	    strlen(val)))
1124 		goto errout;
1125 	/* Enable "copy" mode */
1126 	prop = "request-rx-copy";
1127 	snprintf(val, sizeof(val), "%u", 1);
1128 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1129 	    strlen(val)))
1130 		goto errout;
1131 	/* Enable notify mode */
1132 	prop = "feature-rx-notify";
1133 	snprintf(val, sizeof(val), "%u", 1);
1134 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1135 	    strlen(val)))
1136 		goto errout;
1137 
1138 	/* Plumb the Tx ring */
1139 	prop = "tx-ring-ref";
1140 	snprintf(val, sizeof(val), "%u", sc->sc_tx_ref);
1141 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1142 	    strlen(val)))
1143 		goto errout;
1144 	/* Enable scatter-gather mode */
1145 	if (sc->sc_tx_frags > 1) {
1146 		prop = "feature-sg";
1147 		snprintf(val, sizeof(val), "%u", 1);
1148 		if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop,
1149 		    val, strlen(val)))
1150 			goto errout;
1151 	}
1152 
1153 	/* Enable IPv6 checksum offloading */
1154 	if (sc->sc_caps & XNF_CAP_CSUM6) {
1155 		prop = "feature-ipv6-csum-offload";
1156 		snprintf(val, sizeof(val), "%u", 1);
1157 		if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop,
1158 		    val, strlen(val)))
1159 			goto errout;
1160 	}
1161 
1162 	/* Plumb the event channel port */
1163 	prop = "event-channel";
1164 	snprintf(val, sizeof(val), "%u", sc->sc_xih);
1165 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1166 	    strlen(val)))
1167 		goto errout;
1168 
1169 	/* Connect the device */
1170 	prop = "state";
1171 	snprintf(val, sizeof(val), "%u", 4);
1172 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1173 	    strlen(val)))
1174 		goto errout;
1175 
1176 	return (0);
1177 
1178  errout:
1179 	printf("%s: failed to set \"%s\" property to \"%s\"\n",
1180 	    sc->sc_dev.dv_xname, prop, val);
1181 	return (-1);
1182 }
1183