xref: /openbsd-src/sys/dev/pv/if_xnf.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_xnf.c,v 1.64 2020/07/10 13:26:40 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 2015, 2016 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "xen.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/atomic.h>
26 #include <sys/device.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/pool.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/task.h>
35 #include <sys/timeout.h>
36 
37 #include <machine/bus.h>
38 
39 #include <dev/pv/xenreg.h>
40 #include <dev/pv/xenvar.h>
41 
42 #include <net/if.h>
43 #include <net/if_media.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 
48 #ifdef INET6
49 #include <netinet/ip6.h>
50 #endif
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 /* #define XNF_DEBUG */
57 
58 #ifdef XNF_DEBUG
59 #define DPRINTF(x...)		printf(x)
60 #else
61 #define DPRINTF(x...)
62 #endif
63 
64 /*
65  * Rx ring
66  */
67 
68 struct xnf_rx_req {
69 	uint16_t		 rxq_id;
70 	uint16_t		 rxq_pad;
71 	uint32_t		 rxq_ref;
72 } __packed;
73 
74 struct xnf_rx_rsp {
75 	uint16_t		 rxp_id;
76 	uint16_t		 rxp_offset;
77 	uint16_t		 rxp_flags;
78 #define  XNF_RXF_CSUM_VALID	  0x0001
79 #define  XNF_RXF_CSUM_BLANK	  0x0002
80 #define  XNF_RXF_CHUNK		  0x0004
81 #define  XNF_RXF_MGMT		  0x0008
82 	int16_t			 rxp_status;
83 } __packed;
84 
85 union xnf_rx_desc {
86 	struct xnf_rx_req	 rxd_req;
87 	struct xnf_rx_rsp	 rxd_rsp;
88 } __packed;
89 
90 #define XNF_RX_DESC		256
91 #define XNF_MCLEN		PAGE_SIZE
92 #define XNF_RX_MIN		32
93 
94 struct xnf_rx_ring {
95 	volatile uint32_t	 rxr_prod;
96 	volatile uint32_t	 rxr_prod_event;
97 	volatile uint32_t	 rxr_cons;
98 	volatile uint32_t	 rxr_cons_event;
99 	uint32_t		 rxr_reserved[12];
100 	union xnf_rx_desc	 rxr_desc[XNF_RX_DESC];
101 } __packed;
102 
103 
104 /*
105  * Tx ring
106  */
107 
108 struct xnf_tx_req {
109 	uint32_t		 txq_ref;
110 	uint16_t		 txq_offset;
111 	uint16_t		 txq_flags;
112 #define  XNF_TXF_CSUM_BLANK	  0x0001
113 #define  XNF_TXF_CSUM_VALID	  0x0002
114 #define  XNF_TXF_CHUNK		  0x0004
115 #define  XNF_TXF_ETXRA		  0x0008
116 	uint16_t		 txq_id;
117 	uint16_t		 txq_size;
118 } __packed;
119 
120 struct xnf_tx_rsp {
121 	uint16_t		 txp_id;
122 	int16_t			 txp_status;
123 } __packed;
124 
125 union xnf_tx_desc {
126 	struct xnf_tx_req	 txd_req;
127 	struct xnf_tx_rsp	 txd_rsp;
128 } __packed;
129 
130 #define XNF_TX_DESC		256
131 #define XNF_TX_FRAG		18
132 
133 struct xnf_tx_ring {
134 	volatile uint32_t	 txr_prod;
135 	volatile uint32_t	 txr_prod_event;
136 	volatile uint32_t	 txr_cons;
137 	volatile uint32_t	 txr_cons_event;
138 	uint32_t		 txr_reserved[12];
139 	union xnf_tx_desc	 txr_desc[XNF_TX_DESC];
140 } __packed;
141 
142 struct xnf_tx_buf {
143 	uint32_t		 txb_ndesc;
144 	bus_dmamap_t		 txb_dmap;
145 	struct mbuf		*txb_mbuf;
146 };
147 
148 /* Management frame, "extra info" in Xen parlance */
149 struct xnf_mgmt {
150 	uint8_t			 mg_type;
151 #define  XNF_MGMT_MCAST_ADD	2
152 #define  XNF_MGMT_MCAST_DEL	3
153 	uint8_t			 mg_flags;
154 	union {
155 		uint8_t		 mgu_mcaddr[ETHER_ADDR_LEN];
156 		uint16_t	 mgu_pad[3];
157 	} u;
158 #define mg_mcaddr		 u.mgu_mcaddr
159 } __packed;
160 
161 
162 struct xnf_softc {
163 	struct device		 sc_dev;
164 	struct device		*sc_parent;
165 	char			 sc_node[XEN_MAX_NODE_LEN];
166 	char			 sc_backend[XEN_MAX_BACKEND_LEN];
167 	bus_dma_tag_t		 sc_dmat;
168 	int			 sc_domid;
169 
170 	struct arpcom		 sc_ac;
171 	struct ifmedia		 sc_media;
172 
173 	xen_intr_handle_t	 sc_xih;
174 
175 	int			 sc_caps;
176 #define  XNF_CAP_SG		  0x0001
177 #define  XNF_CAP_CSUM4		  0x0002
178 #define  XNF_CAP_CSUM6		  0x0004
179 #define  XNF_CAP_MCAST		  0x0008
180 #define  XNF_CAP_SPLIT		  0x0010
181 #define  XNF_CAP_MULTIQ		  0x0020
182 
183 	/* Rx ring */
184 	struct xnf_rx_ring	*sc_rx_ring;
185 	bus_dmamap_t		 sc_rx_rmap;		  /* map for the ring */
186 	bus_dma_segment_t	 sc_rx_seg;
187 	uint32_t		 sc_rx_ref;		  /* grant table ref */
188 	uint32_t		 sc_rx_cons;
189 	struct mbuf		*sc_rx_buf[XNF_RX_DESC];
190 	bus_dmamap_t		 sc_rx_dmap[XNF_RX_DESC]; /* maps for packets */
191 	struct mbuf		*sc_rx_cbuf[2];	  	  /* chain handling */
192 
193 	/* Tx ring */
194 	struct xnf_tx_ring	*sc_tx_ring;
195 	bus_dmamap_t		 sc_tx_rmap;		  /* map for the ring */
196 	bus_dma_segment_t	 sc_tx_seg;
197 	uint32_t		 sc_tx_ref;		  /* grant table ref */
198 	uint32_t		 sc_tx_cons;
199 	int			 sc_tx_frags;
200 	uint32_t		 sc_tx_next;		  /* next buffer */
201 	volatile unsigned int	 sc_tx_avail;
202 	struct xnf_tx_buf	 sc_tx_buf[XNF_TX_DESC];
203 };
204 
205 int	xnf_match(struct device *, void *, void *);
206 void	xnf_attach(struct device *, struct device *, void *);
207 int	xnf_detach(struct device *, int);
208 int	xnf_lladdr(struct xnf_softc *);
209 int	xnf_ioctl(struct ifnet *, u_long, caddr_t);
210 int	xnf_media_change(struct ifnet *);
211 void	xnf_media_status(struct ifnet *, struct ifmediareq *);
212 int	xnf_iff(struct xnf_softc *);
213 void	xnf_init(struct xnf_softc *);
214 void	xnf_stop(struct xnf_softc *);
215 void	xnf_start(struct ifqueue *);
216 int	xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
217 void	xnf_intr(void *);
218 void	xnf_watchdog(struct ifnet *);
219 void	xnf_txeof(struct xnf_softc *);
220 void	xnf_rxeof(struct xnf_softc *);
221 int	xnf_rx_ring_fill(struct xnf_softc *);
222 int	xnf_rx_ring_create(struct xnf_softc *);
223 void	xnf_rx_ring_drain(struct xnf_softc *);
224 void	xnf_rx_ring_destroy(struct xnf_softc *);
225 int	xnf_tx_ring_create(struct xnf_softc *);
226 void	xnf_tx_ring_drain(struct xnf_softc *);
227 void	xnf_tx_ring_destroy(struct xnf_softc *);
228 int	xnf_capabilities(struct xnf_softc *sc);
229 int	xnf_init_backend(struct xnf_softc *);
230 
231 struct cfdriver xnf_cd = {
232 	NULL, "xnf", DV_IFNET
233 };
234 
235 const struct cfattach xnf_ca = {
236 	sizeof(struct xnf_softc), xnf_match, xnf_attach, xnf_detach
237 };
238 
239 int
240 xnf_match(struct device *parent, void *match, void *aux)
241 {
242 	struct xen_attach_args *xa = aux;
243 
244 	if (strcmp("vif", xa->xa_name))
245 		return (0);
246 
247 	return (1);
248 }
249 
250 void
251 xnf_attach(struct device *parent, struct device *self, void *aux)
252 {
253 	struct xen_attach_args *xa = aux;
254 	struct xnf_softc *sc = (struct xnf_softc *)self;
255 	struct ifnet *ifp = &sc->sc_ac.ac_if;
256 
257 	sc->sc_parent = parent;
258 	sc->sc_dmat = xa->xa_dmat;
259 	sc->sc_domid = xa->xa_domid;
260 
261 	memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
262 	memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
263 
264 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
265 
266 	if (xnf_lladdr(sc)) {
267 		printf(": failed to obtain MAC address\n");
268 		return;
269 	}
270 
271 	if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
272 	    ifp->if_xname)) {
273 		printf(": failed to establish an interrupt\n");
274 		return;
275 	}
276 	xen_intr_mask(sc->sc_xih);
277 
278 	printf(" backend %d channel %u: address %s\n", sc->sc_domid,
279 	    sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
280 
281 	if (xnf_capabilities(sc)) {
282 		xen_intr_disestablish(sc->sc_xih);
283 		return;
284 	}
285 
286 	if (sc->sc_caps & XNF_CAP_SG)
287 		ifp->if_hardmtu = 9000;
288 
289 	if (xnf_rx_ring_create(sc)) {
290 		xen_intr_disestablish(sc->sc_xih);
291 		return;
292 	}
293 	if (xnf_tx_ring_create(sc)) {
294 		xen_intr_disestablish(sc->sc_xih);
295 		xnf_rx_ring_destroy(sc);
296 		return;
297 	}
298 	if (xnf_init_backend(sc)) {
299 		xen_intr_disestablish(sc->sc_xih);
300 		xnf_rx_ring_destroy(sc);
301 		xnf_tx_ring_destroy(sc);
302 		return;
303 	}
304 
305 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
306 	ifp->if_xflags = IFXF_MPSAFE;
307 	ifp->if_ioctl = xnf_ioctl;
308 	ifp->if_qstart = xnf_start;
309 	ifp->if_watchdog = xnf_watchdog;
310 	ifp->if_softc = sc;
311 
312 	ifp->if_capabilities = IFCAP_VLAN_MTU;
313 	if (sc->sc_caps & XNF_CAP_CSUM4)
314 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
315 	if (sc->sc_caps & XNF_CAP_CSUM6)
316 		ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
317 
318 	ifq_set_maxlen(&ifp->if_snd, XNF_TX_DESC - 1);
319 
320 	ifmedia_init(&sc->sc_media, IFM_IMASK, xnf_media_change,
321 	    xnf_media_status);
322 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
323 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
324 
325 	if_attach(ifp);
326 	ether_ifattach(ifp);
327 
328 	/* Kick out emulated em's and re's */
329 	xen_unplug_emulated(parent, XEN_UNPLUG_NIC);
330 }
331 
332 int
333 xnf_detach(struct device *self, int flags)
334 {
335 	struct xnf_softc *sc = (struct xnf_softc *)self;
336 	struct ifnet *ifp = &sc->sc_ac.ac_if;
337 
338 	xnf_stop(sc);
339 
340 	ether_ifdetach(ifp);
341 	if_detach(ifp);
342 
343 	xen_intr_disestablish(sc->sc_xih);
344 
345 	if (sc->sc_tx_ring)
346 		xnf_tx_ring_destroy(sc);
347 	if (sc->sc_rx_ring)
348 		xnf_rx_ring_destroy(sc);
349 
350 	return (0);
351 }
352 
353 static int
354 nibble(int ch)
355 {
356 	if (ch >= '0' && ch <= '9')
357 		return (ch - '0');
358 	if (ch >= 'A' && ch <= 'F')
359 		return (10 + ch - 'A');
360 	if (ch >= 'a' && ch <= 'f')
361 		return (10 + ch - 'a');
362 	return (-1);
363 }
364 
365 int
366 xnf_lladdr(struct xnf_softc *sc)
367 {
368 	char enaddr[ETHER_ADDR_LEN];
369 	char mac[32];
370 	int i, j, lo, hi;
371 
372 	if (xs_getprop(sc->sc_parent, sc->sc_backend, "mac", mac, sizeof(mac)))
373 		return (-1);
374 
375 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 3, j++) {
376 		if ((hi = nibble(mac[i])) == -1 ||
377 		    (lo = nibble(mac[i+1])) == -1)
378 			return (-1);
379 		enaddr[j] = hi << 4 | lo;
380 	}
381 
382 	memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
383 	return (0);
384 }
385 
386 int
387 xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
388 {
389 	struct xnf_softc *sc = ifp->if_softc;
390 	struct ifreq *ifr = (struct ifreq *)data;
391 	int s, error = 0;
392 
393 	s = splnet();
394 
395 	switch (command) {
396 	case SIOCSIFADDR:
397 		ifp->if_flags |= IFF_UP;
398 		if (!(ifp->if_flags & IFF_RUNNING))
399 			xnf_init(sc);
400 		break;
401 	case SIOCSIFFLAGS:
402 		if (ifp->if_flags & IFF_UP) {
403 			if (ifp->if_flags & IFF_RUNNING)
404 				error = ENETRESET;
405 			else
406 				xnf_init(sc);
407 		} else {
408 			if (ifp->if_flags & IFF_RUNNING)
409 				xnf_stop(sc);
410 		}
411 		break;
412 	case SIOCGIFMEDIA:
413 	case SIOCSIFMEDIA:
414 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
415 		break;
416 	default:
417 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
418 		break;
419 	}
420 
421 	if (error == ENETRESET) {
422 		if (ifp->if_flags & IFF_RUNNING)
423 			xnf_iff(sc);
424 		error = 0;
425 	}
426 
427 	splx(s);
428 
429 	return (error);
430 }
431 
432 int
433 xnf_media_change(struct ifnet *ifp)
434 {
435 	return (0);
436 }
437 
438 void
439 xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
440 {
441 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
442 	ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
443 }
444 
445 int
446 xnf_iff(struct xnf_softc *sc)
447 {
448 	return (0);
449 }
450 
451 void
452 xnf_init(struct xnf_softc *sc)
453 {
454 	struct ifnet *ifp = &sc->sc_ac.ac_if;
455 
456 	xnf_stop(sc);
457 
458 	xnf_iff(sc);
459 
460 	xnf_rx_ring_fill(sc);
461 
462 	if (xen_intr_unmask(sc->sc_xih)) {
463 		printf("%s: failed to enable interrupts\n", ifp->if_xname);
464 		xnf_stop(sc);
465 		return;
466 	}
467 
468 	ifp->if_flags |= IFF_RUNNING;
469 	ifq_clr_oactive(&ifp->if_snd);
470 }
471 
472 void
473 xnf_stop(struct xnf_softc *sc)
474 {
475 	struct ifnet *ifp = &sc->sc_ac.ac_if;
476 
477 	ifp->if_flags &= ~IFF_RUNNING;
478 
479 	xen_intr_mask(sc->sc_xih);
480 
481 	ifp->if_timer = 0;
482 
483 	ifq_barrier(&ifp->if_snd);
484 	xen_intr_barrier(sc->sc_xih);
485 
486 	ifq_clr_oactive(&ifp->if_snd);
487 
488 	if (sc->sc_tx_ring)
489 		xnf_tx_ring_drain(sc);
490 	if (sc->sc_rx_ring)
491 		xnf_rx_ring_drain(sc);
492 }
493 
494 void
495 xnf_start(struct ifqueue *ifq)
496 {
497 	struct ifnet *ifp = ifq->ifq_if;
498 	struct xnf_softc *sc = ifp->if_softc;
499 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
500 	struct mbuf *m;
501 	int pkts = 0;
502 	uint32_t prod, oprod;
503 
504 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
505 	    BUS_DMASYNC_POSTREAD);
506 
507 	prod = oprod = txr->txr_prod;
508 
509 	for (;;) {
510 		if (((XNF_TX_DESC - (prod - sc->sc_tx_cons)) <
511 		    sc->sc_tx_frags) || !sc->sc_tx_avail) {
512 			/* transient */
513 			ifq_set_oactive(ifq);
514 			break;
515 		}
516 
517 		m = ifq_dequeue(ifq);
518 		if (m == NULL)
519 			break;
520 
521 		if (xnf_encap(sc, m, &prod)) {
522 			/* the chain is too large */
523 			ifp->if_oerrors++;
524 			m_freem(m);
525 			continue;
526 		}
527 
528 #if NBPFILTER > 0
529 		if (ifp->if_bpf)
530 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
531 #endif
532 		pkts++;
533 	}
534 	if (pkts > 0) {
535 		txr->txr_prod = prod;
536 		if (txr->txr_cons_event <= txr->txr_cons)
537 			txr->txr_cons_event = txr->txr_cons +
538 			    ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
539 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
540 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541 		if (prod - txr->txr_prod_event < prod - oprod)
542 			xen_intr_signal(sc->sc_xih);
543 		ifp->if_timer = 5;
544 	}
545 }
546 
547 static inline int
548 xnf_fragcount(struct mbuf *m_head)
549 {
550 	struct mbuf *m;
551 	vaddr_t va, va0;
552 	int n = 0;
553 
554 	for (m = m_head; m != NULL; m = m->m_next) {
555 		if (m->m_len == 0)
556 			continue;
557 		     /* start of the buffer */
558 		for (va0 = va = mtod(m, vaddr_t);
559 		     /* does the buffer end on this page? */
560 		     va + (PAGE_SIZE - (va & PAGE_MASK)) < va0 + m->m_len;
561 		     /* move on to the next page */
562 		     va += PAGE_SIZE - (va & PAGE_MASK))
563 			n++;
564 		n++;
565 	}
566 	return (n);
567 }
568 
569 int
570 xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
571 {
572 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
573 	struct xnf_tx_buf *txb = NULL;
574 	union xnf_tx_desc *txd = NULL;
575 	struct mbuf *m;
576 	uint32_t oprod = *prod;
577 	uint16_t id;
578 	int i, flags, n, used = 0;
579 
580 	if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
581 	    m_defrag(m_head, M_DONTWAIT))
582 		return (ENOBUFS);
583 
584 	flags = (sc->sc_domid << 16) | BUS_DMA_WRITE | BUS_DMA_NOWAIT;
585 
586 	for (m = m_head; m != NULL && m->m_len > 0; m = m->m_next) {
587 		i = *prod & (XNF_TX_DESC - 1);
588 		txd = &txr->txr_desc[i];
589 
590 		/*
591 		 * Find an unused TX buffer.  We're guaranteed to find one
592 		 * because xnf_encap cannot be called with sc_tx_avail == 0.
593 		 */
594 		do {
595 			id = sc->sc_tx_next++ & (XNF_TX_DESC - 1);
596 			txb = &sc->sc_tx_buf[id];
597 		} while (txb->txb_mbuf);
598 
599 		if (bus_dmamap_load(sc->sc_dmat, txb->txb_dmap, m->m_data,
600 		    m->m_len, NULL, flags)) {
601 			DPRINTF("%s: failed to load %u bytes @%lu\n",
602 			    sc->sc_dev.dv_xname, m->m_len,
603 			    mtod(m, vaddr_t) & PAGE_MASK);
604 			goto unroll;
605 		}
606 
607 		for (n = 0; n < txb->txb_dmap->dm_nsegs; n++) {
608 			i = *prod & (XNF_TX_DESC - 1);
609 			txd = &txr->txr_desc[i];
610 
611 			if (m == m_head && n == 0) {
612 				if (m->m_pkthdr.csum_flags &
613 				    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
614 					txd->txd_req.txq_flags =
615 					    XNF_TXF_CSUM_BLANK |
616 					    XNF_TXF_CSUM_VALID;
617 				txd->txd_req.txq_size = m->m_pkthdr.len;
618 			} else {
619 				txd->txd_req.txq_size =
620 				    txb->txb_dmap->dm_segs[n].ds_len;
621 			}
622 			txd->txd_req.txq_ref =
623 			    txb->txb_dmap->dm_segs[n].ds_addr;
624 			if (n == 0)
625 				txd->txd_req.txq_offset =
626 				    mtod(m, vaddr_t) & PAGE_MASK;
627 			/* The chunk flag will be removed from the last one */
628 			txd->txd_req.txq_flags |= XNF_TXF_CHUNK;
629 			txd->txd_req.txq_id = id;
630 
631 			txb->txb_ndesc++;
632 			(*prod)++;
633 		}
634 
635 		txb->txb_mbuf = m;
636 		used++;
637 	}
638 
639 	/* Clear the chunk flag from the last segment */
640 	txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK;
641 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
642 	    BUS_DMASYNC_PREWRITE);
643 
644 	KASSERT(sc->sc_tx_avail > used);
645 	atomic_sub_int(&sc->sc_tx_avail, used);
646 
647 	return (0);
648 
649  unroll:
650 	DPRINTF("%s: unrolling from %u to %u\n", sc->sc_dev.dv_xname,
651 	    *prod, oprod);
652 	for (; *prod != oprod; (*prod)--) {
653 		i = (*prod - 1) & (XNF_TX_DESC - 1);
654 		txd = &txr->txr_desc[i];
655 		id = txd->txd_req.txq_id;
656 		txb = &sc->sc_tx_buf[id];
657 
658 		memset(txd, 0, sizeof(*txd));
659 
660 		if (txb->txb_mbuf) {
661 			bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,
662 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
663 			bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap);
664 
665 			txb->txb_mbuf = NULL;
666 			txb->txb_ndesc = 0;
667 		}
668 	}
669 	return (ENOBUFS);
670 }
671 
672 void
673 xnf_intr(void *arg)
674 {
675 	struct xnf_softc *sc = arg;
676 	struct ifnet *ifp = &sc->sc_ac.ac_if;
677 
678 	if (ifp->if_flags & IFF_RUNNING) {
679 		xnf_txeof(sc);
680 		xnf_rxeof(sc);
681 	}
682 }
683 
684 void
685 xnf_watchdog(struct ifnet *ifp)
686 {
687 	struct xnf_softc *sc = ifp->if_softc;
688 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
689 
690 	printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
691 	    ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
692 	    txr->txr_prod_event, txr->txr_cons_event);
693 }
694 
695 void
696 xnf_txeof(struct xnf_softc *sc)
697 {
698 	struct ifnet *ifp = &sc->sc_ac.ac_if;
699 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
700 	struct xnf_tx_buf *txb;
701 	union xnf_tx_desc *txd;
702 	uint done = 0;
703 	uint32_t cons;
704 	uint16_t id;
705 	int i;
706 
707 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
708 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
709 
710 	for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
711 		i = cons & (XNF_TX_DESC - 1);
712 		txd = &txr->txr_desc[i];
713 		id = txd->txd_rsp.txp_id;
714 		txb = &sc->sc_tx_buf[id];
715 
716 		KASSERT(txb->txb_ndesc > 0);
717 		if (--txb->txb_ndesc == 0) {
718 			bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,
719 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
720 			bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap);
721 
722 			m_free(txb->txb_mbuf);
723 			txb->txb_mbuf = NULL;
724 			done++;
725 		}
726 
727 		memset(txd, 0, sizeof(*txd));
728 	}
729 
730 	sc->sc_tx_cons = cons;
731 	txr->txr_cons_event = sc->sc_tx_cons +
732 	    ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
733 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
734 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
735 
736 	atomic_add_int(&sc->sc_tx_avail, done);
737 
738 	if (sc->sc_tx_cons == txr->txr_prod)
739 		ifp->if_timer = 0;
740 	if (ifq_is_oactive(&ifp->if_snd))
741 		ifq_restart(&ifp->if_snd);
742 }
743 
744 void
745 xnf_rxeof(struct xnf_softc *sc)
746 {
747 	struct ifnet *ifp = &sc->sc_ac.ac_if;
748 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
749 	union xnf_rx_desc *rxd;
750 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
751 	struct mbuf *fmp = sc->sc_rx_cbuf[0];
752 	struct mbuf *lmp = sc->sc_rx_cbuf[1];
753 	struct mbuf *m;
754 	bus_dmamap_t dmap;
755 	uint32_t cons;
756 	uint16_t id;
757 	int i, flags, len, offset;
758 
759 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
760 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
761 
762 	for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
763 		i = cons & (XNF_RX_DESC - 1);
764 		rxd = &rxr->rxr_desc[i];
765 
766 		id = rxd->rxd_rsp.rxp_id;
767 		len = rxd->rxd_rsp.rxp_status;
768 		flags = rxd->rxd_rsp.rxp_flags;
769 		offset = rxd->rxd_rsp.rxp_offset;
770 
771 		dmap = sc->sc_rx_dmap[id];
772 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
773 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
774 		bus_dmamap_unload(sc->sc_dmat, dmap);
775 
776 		m = sc->sc_rx_buf[id];
777 		KASSERT(m != NULL);
778 		sc->sc_rx_buf[id] = NULL;
779 
780 		if (flags & XNF_RXF_MGMT) {
781 			printf("%s: management data present\n",
782 			    ifp->if_xname);
783 			m_freem(m);
784 			continue;
785 		}
786 
787 		if (flags & XNF_RXF_CSUM_VALID)
788 			m->m_pkthdr.csum_flags = M_TCP_CSUM_IN_OK |
789 			    M_UDP_CSUM_IN_OK;
790 
791 		if (len < 0 || (len + offset > PAGE_SIZE)) {
792 			ifp->if_ierrors++;
793 			m_freem(m);
794 			continue;
795 		}
796 
797 		m->m_len = len;
798 		m->m_data += offset;
799 
800 		if (fmp == NULL) {
801 			m->m_pkthdr.len = len;
802 			fmp = m;
803 		} else {
804 			m->m_flags &= ~M_PKTHDR;
805 			lmp->m_next = m;
806 			fmp->m_pkthdr.len += m->m_len;
807 		}
808 		lmp = m;
809 
810 		if (flags & XNF_RXF_CHUNK) {
811 			sc->sc_rx_cbuf[0] = fmp;
812 			sc->sc_rx_cbuf[1] = lmp;
813 			continue;
814 		}
815 
816 		m = fmp;
817 
818 		ml_enqueue(&ml, m);
819 		sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL;
820 
821 		memset(rxd, 0, sizeof(*rxd));
822 		rxd->rxd_req.rxq_id = id;
823 	}
824 
825 	sc->sc_rx_cons = cons;
826 	rxr->rxr_cons_event = sc->sc_rx_cons + 1;
827 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
828 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
829 
830 	if_input(ifp, &ml);
831 
832 	if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
833 		xen_intr_schedule(sc->sc_xih);
834 }
835 
836 int
837 xnf_rx_ring_fill(struct xnf_softc *sc)
838 {
839 	struct ifnet *ifp = &sc->sc_ac.ac_if;
840 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
841 	union xnf_rx_desc *rxd;
842 	bus_dmamap_t dmap;
843 	struct mbuf *m;
844 	uint32_t cons, prod, oprod;
845 	uint16_t id;
846 	int i, flags, resched = 0;
847 
848 	cons = rxr->rxr_cons;
849 	prod = oprod = rxr->rxr_prod;
850 
851 	while (prod - cons < XNF_RX_DESC) {
852 		i = prod & (XNF_RX_DESC - 1);
853 		rxd = &rxr->rxr_desc[i];
854 
855 		id = rxd->rxd_rsp.rxp_id;
856 		if (sc->sc_rx_buf[id])
857 			break;
858 		m = MCLGETI(NULL, M_DONTWAIT, NULL, XNF_MCLEN);
859 		if (m == NULL)
860 			break;
861 		m->m_len = m->m_pkthdr.len = XNF_MCLEN;
862 		dmap = sc->sc_rx_dmap[id];
863 		flags = (sc->sc_domid << 16) | BUS_DMA_READ | BUS_DMA_NOWAIT;
864 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)) {
865 			m_freem(m);
866 			break;
867 		}
868 		sc->sc_rx_buf[id] = m;
869 		rxd->rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
870 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE);
871 		prod++;
872 	}
873 
874 	rxr->rxr_prod = prod;
875 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
876 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
877 
878 	if ((prod - cons < XNF_RX_MIN) && (ifp->if_flags & IFF_RUNNING))
879 		resched = 1;
880 	if (prod - rxr->rxr_prod_event < prod - oprod)
881 		xen_intr_signal(sc->sc_xih);
882 
883 	return (resched);
884 }
885 
886 int
887 xnf_rx_ring_create(struct xnf_softc *sc)
888 {
889 	int i, flags, rsegs;
890 
891 	/* Allocate a page of memory for the ring */
892 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
893 	    &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
894 		printf("%s: failed to allocate memory for the rx ring\n",
895 		    sc->sc_dev.dv_xname);
896 		return (-1);
897 	}
898 	/* Map in the allocated memory into the ring structure */
899 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,
900 	    (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_NOWAIT)) {
901 		printf("%s: failed to map memory for the rx ring\n",
902 		    sc->sc_dev.dv_xname);
903 		goto errout;
904 	}
905 	/* Create a map to load the ring memory into */
906 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
907 	    BUS_DMA_NOWAIT, &sc->sc_rx_rmap)) {
908 		printf("%s: failed to create a memory map for the rx ring\n",
909 		    sc->sc_dev.dv_xname);
910 		goto errout;
911 	}
912 	/* Load the ring into the ring map to extract the PA */
913 	flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
914 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,
915 	    PAGE_SIZE, NULL, flags)) {
916 		printf("%s: failed to load the rx ring map\n",
917 		    sc->sc_dev.dv_xname);
918 		goto errout;
919 	}
920 	sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
921 
922 	sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
923 
924 	for (i = 0; i < XNF_RX_DESC; i++) {
925 		if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,
926 		    PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_rx_dmap[i])) {
927 			printf("%s: failed to create a memory map for the"
928 			    " rx slot %d\n", sc->sc_dev.dv_xname, i);
929 			goto errout;
930 		}
931 		sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
932 	}
933 
934 	return (0);
935 
936  errout:
937 	xnf_rx_ring_destroy(sc);
938 	return (-1);
939 }
940 
941 void
942 xnf_rx_ring_drain(struct xnf_softc *sc)
943 {
944 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
945 
946 	if (sc->sc_rx_cons != rxr->rxr_cons)
947 		xnf_rxeof(sc);
948 }
949 
950 void
951 xnf_rx_ring_destroy(struct xnf_softc *sc)
952 {
953 	int i;
954 
955 	for (i = 0; i < XNF_RX_DESC; i++) {
956 		if (sc->sc_rx_buf[i] == NULL)
957 			continue;
958 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,
959 		    BUS_DMASYNC_POSTREAD);
960 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i]);
961 		m_freem(sc->sc_rx_buf[i]);
962 		sc->sc_rx_buf[i] = NULL;
963 	}
964 
965 	for (i = 0; i < XNF_RX_DESC; i++) {
966 		if (sc->sc_rx_dmap[i] == NULL)
967 			continue;
968 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i]);
969 		sc->sc_rx_dmap[i] = NULL;
970 	}
971 	if (sc->sc_rx_rmap) {
972 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
973 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
974 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap);
975 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap);
976 	}
977 	if (sc->sc_rx_ring) {
978 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,
979 		    PAGE_SIZE);
980 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1);
981 	}
982 	sc->sc_rx_ring = NULL;
983 	sc->sc_rx_rmap = NULL;
984 	sc->sc_rx_cons = 0;
985 }
986 
987 int
988 xnf_tx_ring_create(struct xnf_softc *sc)
989 {
990 	struct ifnet *ifp = &sc->sc_ac.ac_if;
991 	int i, flags, nsegs, rsegs;
992 	bus_size_t segsz;
993 
994 	sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG ? XNF_TX_FRAG : 1;
995 
996 	/* Allocate a page of memory for the ring */
997 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
998 	    &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
999 		printf("%s: failed to allocate memory for the tx ring\n",
1000 		    sc->sc_dev.dv_xname);
1001 		return (-1);
1002 	}
1003 	/* Map in the allocated memory into the ring structure */
1004 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,
1005 	    (caddr_t *)&sc->sc_tx_ring, BUS_DMA_NOWAIT)) {
1006 		printf("%s: failed to map memory for the tx ring\n",
1007 		    sc->sc_dev.dv_xname);
1008 		goto errout;
1009 	}
1010 	/* Create a map to load the ring memory into */
1011 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1012 	    BUS_DMA_NOWAIT, &sc->sc_tx_rmap)) {
1013 		printf("%s: failed to create a memory map for the tx ring\n",
1014 		    sc->sc_dev.dv_xname);
1015 		goto errout;
1016 	}
1017 	/* Load the ring into the ring map to extract the PA */
1018 	flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
1019 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,
1020 	    PAGE_SIZE, NULL, flags)) {
1021 		printf("%s: failed to load the tx ring map\n",
1022 		    sc->sc_dev.dv_xname);
1023 		goto errout;
1024 	}
1025 	sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
1026 
1027 	sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
1028 
1029 	if (sc->sc_caps & XNF_CAP_SG) {
1030 		nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN) / XNF_MCLEN + 1;
1031 		segsz = nsegs * XNF_MCLEN;
1032 	} else {
1033 		nsegs = 1;
1034 		segsz = XNF_MCLEN;
1035 	}
1036 	for (i = 0; i < XNF_TX_DESC; i++) {
1037 		if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,
1038 		    PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_tx_buf[i].txb_dmap)) {
1039 			printf("%s: failed to create a memory map for the"
1040 			    " tx slot %d\n", sc->sc_dev.dv_xname, i);
1041 			goto errout;
1042 		}
1043 	}
1044 
1045 	sc->sc_tx_avail = XNF_TX_DESC;
1046 	sc->sc_tx_next = 0;
1047 
1048 	return (0);
1049 
1050  errout:
1051 	xnf_tx_ring_destroy(sc);
1052 	return (-1);
1053 }
1054 
1055 void
1056 xnf_tx_ring_drain(struct xnf_softc *sc)
1057 {
1058 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
1059 
1060 	if (sc->sc_tx_cons != txr->txr_cons)
1061 		xnf_txeof(sc);
1062 }
1063 
1064 void
1065 xnf_tx_ring_destroy(struct xnf_softc *sc)
1066 {
1067 	int i;
1068 
1069 	for (i = 0; i < XNF_TX_DESC; i++) {
1070 		if (sc->sc_tx_buf[i].txb_dmap == NULL)
1071 			continue;
1072 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap, 0, 0,
1073 		    BUS_DMASYNC_POSTWRITE);
1074 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap);
1075 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap);
1076 		sc->sc_tx_buf[i].txb_dmap = NULL;
1077 		if (sc->sc_tx_buf[i].txb_mbuf == NULL)
1078 			continue;
1079 		m_free(sc->sc_tx_buf[i].txb_mbuf);
1080 		sc->sc_tx_buf[i].txb_mbuf = NULL;
1081 		sc->sc_tx_buf[i].txb_ndesc = 0;
1082 	}
1083 	if (sc->sc_tx_rmap) {
1084 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
1085 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1086 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
1087 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
1088 	}
1089 	if (sc->sc_tx_ring) {
1090 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,
1091 		    PAGE_SIZE);
1092 		bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1);
1093 	}
1094 	sc->sc_tx_ring = NULL;
1095 	sc->sc_tx_rmap = NULL;
1096 	sc->sc_tx_avail = XNF_TX_DESC;
1097 	sc->sc_tx_next = 0;
1098 }
1099 
1100 int
1101 xnf_capabilities(struct xnf_softc *sc)
1102 {
1103 	unsigned long long res;
1104 	const char *prop;
1105 	int error;
1106 
1107 	/* Query scatter-gather capability */
1108 	prop = "feature-sg";
1109 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1110 	    && error != ENOENT)
1111 		goto errout;
1112 	if (error == 0 && res == 1)
1113 		sc->sc_caps |= XNF_CAP_SG;
1114 
1115 #if 0
1116 	/* Query IPv4 checksum offloading capability, enabled by default */
1117 	sc->sc_caps |= XNF_CAP_CSUM4;
1118 	prop = "feature-no-csum-offload";
1119 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1120 	    && error != ENOENT)
1121 		goto errout;
1122 	if (error == 0 && res == 1)
1123 		sc->sc_caps &= ~XNF_CAP_CSUM4;
1124 
1125 	/* Query IPv6 checksum offloading capability */
1126 	prop = "feature-ipv6-csum-offload";
1127 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1128 	    && error != ENOENT)
1129 		goto errout;
1130 	if (error == 0 && res == 1)
1131 		sc->sc_caps |= XNF_CAP_CSUM6;
1132 #endif
1133 
1134 	/* Query multicast traffic contol capability */
1135 	prop = "feature-multicast-control";
1136 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1137 	    && error != ENOENT)
1138 		goto errout;
1139 	if (error == 0 && res == 1)
1140 		sc->sc_caps |= XNF_CAP_MCAST;
1141 
1142 	/* Query split Rx/Tx event channel capability */
1143 	prop = "feature-split-event-channels";
1144 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1145 	    && error != ENOENT)
1146 		goto errout;
1147 	if (error == 0 && res == 1)
1148 		sc->sc_caps |= XNF_CAP_SPLIT;
1149 
1150 	/* Query multiqueue capability */
1151 	prop = "multi-queue-max-queues";
1152 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1153 	    && error != ENOENT)
1154 		goto errout;
1155 	if (error == 0)
1156 		sc->sc_caps |= XNF_CAP_MULTIQ;
1157 
1158 	DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1159 	    "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1160 	return (0);
1161 
1162  errout:
1163 	printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1164 	    prop);
1165 	return (-1);
1166 }
1167 
1168 int
1169 xnf_init_backend(struct xnf_softc *sc)
1170 {
1171 	const char *prop;
1172 
1173 	/* Plumb the Rx ring */
1174 	prop = "rx-ring-ref";
1175 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_rx_ref))
1176 		goto errout;
1177 	/* Enable "copy" mode */
1178 	prop = "request-rx-copy";
1179 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1180 		goto errout;
1181 	/* Enable notify mode */
1182 	prop = "feature-rx-notify";
1183 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1184 		goto errout;
1185 
1186 	/* Plumb the Tx ring */
1187 	prop = "tx-ring-ref";
1188 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_tx_ref))
1189 		goto errout;
1190 	/* Enable scatter-gather mode */
1191 	if (sc->sc_tx_frags > 1) {
1192 		prop = "feature-sg";
1193 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1194 			goto errout;
1195 	}
1196 
1197 	/* Disable IPv4 checksum offloading */
1198 	if (!(sc->sc_caps & XNF_CAP_CSUM4)) {
1199 		prop = "feature-no-csum-offload";
1200 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1201 			goto errout;
1202 	}
1203 
1204 	/* Enable IPv6 checksum offloading */
1205 	if (sc->sc_caps & XNF_CAP_CSUM6) {
1206 		prop = "feature-ipv6-csum-offload";
1207 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1208 			goto errout;
1209 	}
1210 
1211 	/* Plumb the event channel port */
1212 	prop = "event-channel";
1213 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1214 		goto errout;
1215 
1216 	/* Connect the device */
1217 	prop = "state";
1218 	if (xs_setprop(sc->sc_parent, sc->sc_node, prop, XEN_STATE_CONNECTED,
1219 	    strlen(XEN_STATE_CONNECTED)))
1220 		goto errout;
1221 
1222 	return (0);
1223 
1224  errout:
1225 	printf("%s: failed to set \"%s\" property\n", sc->sc_dev.dv_xname, prop);
1226 	return (-1);
1227 }
1228