xref: /openbsd-src/sys/dev/pci/if_vmx.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_vmx.c,v 1.55 2019/10/27 22:24:40 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2013 Tsubai Masanari
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/device.h>
23 #include <sys/mbuf.h>
24 #include <sys/socket.h>
25 #include <sys/sockio.h>
26 #include <sys/systm.h>
27 #include <sys/atomic.h>
28 
29 #include <net/bpf.h>
30 #include <net/if.h>
31 #include <net/if_media.h>
32 
33 #include <netinet/in.h>
34 #include <netinet/if_ether.h>
35 #include <netinet/ip.h>
36 #include <netinet/tcp.h>
37 #include <netinet/udp.h>
38 
39 #include <machine/bus.h>
40 
41 #include <dev/pci/if_vmxreg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44 
45 #define NRXQUEUE 1
46 #define NTXQUEUE 1
47 
48 #define NTXDESC 512 /* tx ring size */
49 #define NTXSEGS 8 /* tx descriptors per packet */
50 #define NRXDESC 512
51 #define NTXCOMPDESC NTXDESC
52 #define NRXCOMPDESC (NRXDESC * 2)	/* ring1 + ring2 */
53 
54 #define VMXNET3_DRIVER_VERSION 0x00010000
55 
56 #define VMX_TX_GEN	htole32(VMXNET3_TX_GEN_M << VMXNET3_TX_GEN_S)
57 #define VMX_TXC_GEN	htole32(VMXNET3_TXC_GEN_M << VMXNET3_TXC_GEN_S)
58 #define VMX_RX_GEN	htole32(VMXNET3_RX_GEN_M << VMXNET3_RX_GEN_S)
59 #define VMX_RXC_GEN	htole32(VMXNET3_RXC_GEN_M << VMXNET3_RXC_GEN_S)
60 
61 struct vmxnet3_softc;
62 
63 struct vmxnet3_txring {
64 	struct mbuf *m[NTXDESC];
65 	bus_dmamap_t dmap[NTXDESC];
66 	struct vmxnet3_txdesc *txd;
67 	u_int32_t gen;
68 	u_int prod;
69 	u_int cons;
70 };
71 
72 struct vmxnet3_rxring {
73 	struct vmxnet3_softc *sc;
74 	struct mbuf *m[NRXDESC];
75 	bus_dmamap_t dmap[NRXDESC];
76 	struct mutex mtx;
77 	struct if_rxring rxr;
78 	struct timeout refill;
79 	struct vmxnet3_rxdesc *rxd;
80 	u_int fill;
81 	u_int32_t gen;
82 	u_int8_t rid;
83 };
84 
85 struct vmxnet3_comp_ring {
86 	union {
87 		struct vmxnet3_txcompdesc *txcd;
88 		struct vmxnet3_rxcompdesc *rxcd;
89 	};
90 	u_int next;
91 	u_int32_t gen;
92 };
93 
94 struct vmxnet3_txqueue {
95 	struct vmxnet3_txring cmd_ring;
96 	struct vmxnet3_comp_ring comp_ring;
97 	struct vmxnet3_txq_shared *ts;
98 };
99 
100 struct vmxnet3_rxqueue {
101 	struct vmxnet3_rxring cmd_ring[2];
102 	struct vmxnet3_comp_ring comp_ring;
103 	struct vmxnet3_rxq_shared *rs;
104 };
105 
106 struct vmxnet3_softc {
107 	struct device sc_dev;
108 	struct arpcom sc_arpcom;
109 	struct ifmedia sc_media;
110 
111 	bus_space_tag_t	sc_iot0;
112 	bus_space_tag_t	sc_iot1;
113 	bus_space_handle_t sc_ioh0;
114 	bus_space_handle_t sc_ioh1;
115 	bus_dma_tag_t sc_dmat;
116 	void *sc_ih;
117 
118 	struct vmxnet3_txqueue sc_txq[NTXQUEUE];
119 	struct vmxnet3_rxqueue sc_rxq[NRXQUEUE];
120 	struct vmxnet3_driver_shared *sc_ds;
121 	u_int8_t *sc_mcast;
122 };
123 
124 #define VMXNET3_STAT
125 
126 #ifdef VMXNET3_STAT
127 struct {
128 	u_int ntxdesc;
129 	u_int nrxdesc;
130 	u_int txhead;
131 	u_int txdone;
132 	u_int maxtxlen;
133 	u_int rxdone;
134 	u_int rxfill;
135 	u_int intr;
136 } vmxstat = {
137 	NTXDESC, NRXDESC
138 };
139 #endif
140 
141 #define JUMBO_LEN (1024 * 9)
142 #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
143 
144 #define READ_BAR0(sc, reg) bus_space_read_4((sc)->sc_iot0, (sc)->sc_ioh0, reg)
145 #define READ_BAR1(sc, reg) bus_space_read_4((sc)->sc_iot1, (sc)->sc_ioh1, reg)
146 #define WRITE_BAR0(sc, reg, val) \
147 	bus_space_write_4((sc)->sc_iot0, (sc)->sc_ioh0, reg, val)
148 #define WRITE_BAR1(sc, reg, val) \
149 	bus_space_write_4((sc)->sc_iot1, (sc)->sc_ioh1, reg, val)
150 #define WRITE_CMD(sc, cmd) WRITE_BAR1(sc, VMXNET3_BAR1_CMD, cmd)
151 #define vtophys(va) 0		/* XXX ok? */
152 
153 int vmxnet3_match(struct device *, void *, void *);
154 void vmxnet3_attach(struct device *, struct device *, void *);
155 int vmxnet3_dma_init(struct vmxnet3_softc *);
156 int vmxnet3_alloc_txring(struct vmxnet3_softc *, int);
157 int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int);
158 void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
159 void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
160 void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
161 void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
162 void vmxnet3_link_state(struct vmxnet3_softc *);
163 void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
164 void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
165 int vmxnet3_intr(void *);
166 int vmxnet3_intr_intx(void *);
167 void vmxnet3_evintr(struct vmxnet3_softc *);
168 void vmxnet3_txintr(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
169 void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
170 void vmxnet3_rxfill_tick(void *);
171 void vmxnet3_rxfill(struct vmxnet3_rxring *);
172 void vmxnet3_iff(struct vmxnet3_softc *);
173 void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
174 void vmxnet3_stop(struct ifnet *);
175 void vmxnet3_reset(struct vmxnet3_softc *);
176 int vmxnet3_init(struct vmxnet3_softc *);
177 int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
178 void vmxnet3_start(struct ifqueue *);
179 int vmxnet3_load_mbuf(struct vmxnet3_softc *, struct vmxnet3_txring *,
180     struct mbuf **);
181 void vmxnet3_watchdog(struct ifnet *);
182 void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
183 int vmxnet3_media_change(struct ifnet *);
184 void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
185 
186 const struct pci_matchid vmx_devices[] = {
187 	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET_3 }
188 };
189 
190 struct cfattach vmx_ca = {
191 	sizeof(struct vmxnet3_softc), vmxnet3_match, vmxnet3_attach
192 };
193 
194 struct cfdriver vmx_cd = {
195 	NULL, "vmx", DV_IFNET
196 };
197 
198 int
199 vmxnet3_match(struct device *parent, void *match, void *aux)
200 {
201 	return (pci_matchbyid(aux, vmx_devices, nitems(vmx_devices)));
202 }
203 
204 void
205 vmxnet3_attach(struct device *parent, struct device *self, void *aux)
206 {
207 	struct vmxnet3_softc *sc = (void *)self;
208 	struct pci_attach_args *pa = aux;
209 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
210 	pci_intr_handle_t ih;
211 	const char *intrstr;
212 	u_int memtype, ver, macl, mach, intrcfg;
213 	u_char enaddr[ETHER_ADDR_LEN];
214 	int (*isr)(void *);
215 
216 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x10);
217 	if (pci_mapreg_map(pa, 0x10, memtype, 0, &sc->sc_iot0, &sc->sc_ioh0,
218 	    NULL, NULL, 0)) {
219 		printf(": failed to map BAR0\n");
220 		return;
221 	}
222 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x14);
223 	if (pci_mapreg_map(pa, 0x14, memtype, 0, &sc->sc_iot1, &sc->sc_ioh1,
224 	    NULL, NULL, 0)) {
225 		printf(": failed to map BAR1\n");
226 		return;
227 	}
228 
229 	ver = READ_BAR1(sc, VMXNET3_BAR1_VRRS);
230 	if ((ver & 0x1) == 0) {
231 		printf(": unsupported hardware version 0x%x\n", ver);
232 		return;
233 	}
234 	WRITE_BAR1(sc, VMXNET3_BAR1_VRRS, 1);
235 
236 	ver = READ_BAR1(sc, VMXNET3_BAR1_UVRS);
237 	if ((ver & 0x1) == 0) {
238 		printf(": incompatiable UPT version 0x%x\n", ver);
239 		return;
240 	}
241 	WRITE_BAR1(sc, VMXNET3_BAR1_UVRS, 1);
242 
243 	sc->sc_dmat = pa->pa_dmat;
244 	if (vmxnet3_dma_init(sc)) {
245 		printf(": failed to setup DMA\n");
246 		return;
247 	}
248 
249 	WRITE_CMD(sc, VMXNET3_CMD_GET_INTRCFG);
250 	intrcfg = READ_BAR1(sc, VMXNET3_BAR1_CMD);
251 	isr = vmxnet3_intr;
252 
253 	switch (intrcfg & VMXNET3_INTRCFG_TYPE_MASK) {
254 	case VMXNET3_INTRCFG_TYPE_AUTO:
255 	case VMXNET3_INTRCFG_TYPE_MSIX:
256 		/* FALLTHROUGH */
257 	case VMXNET3_INTRCFG_TYPE_MSI:
258 		if (pci_intr_map_msi(pa, &ih) == 0)
259 			break;
260 
261 		/* FALLTHROUGH */
262 	case VMXNET3_INTRCFG_TYPE_INTX:
263 		isr = vmxnet3_intr_intx;
264 		if (pci_intr_map(pa, &ih) == 0)
265 			break;
266 
267 		printf(": failed to map interrupt\n");
268 		return;
269 	}
270 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
271 	    isr, sc, self->dv_xname);
272 	intrstr = pci_intr_string(pa->pa_pc, ih);
273 	if (intrstr)
274 		printf(": %s", intrstr);
275 
276 	WRITE_CMD(sc, VMXNET3_CMD_GET_MACL);
277 	macl = READ_BAR1(sc, VMXNET3_BAR1_CMD);
278 	enaddr[0] = macl;
279 	enaddr[1] = macl >> 8;
280 	enaddr[2] = macl >> 16;
281 	enaddr[3] = macl >> 24;
282 	WRITE_CMD(sc, VMXNET3_CMD_GET_MACH);
283 	mach = READ_BAR1(sc, VMXNET3_BAR1_CMD);
284 	enaddr[4] = mach;
285 	enaddr[5] = mach >> 8;
286 
287 	WRITE_BAR1(sc, VMXNET3_BAR1_MACL, macl);
288 	WRITE_BAR1(sc, VMXNET3_BAR1_MACH, mach);
289 	printf(", address %s\n", ether_sprintf(enaddr));
290 
291 	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, 6);
292 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
293 	ifp->if_softc = sc;
294 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
295 	ifp->if_xflags = IFXF_MPSAFE;
296 	ifp->if_ioctl = vmxnet3_ioctl;
297 	ifp->if_qstart = vmxnet3_start;
298 	ifp->if_watchdog = vmxnet3_watchdog;
299 	ifp->if_hardmtu = VMXNET3_MAX_MTU;
300 	ifp->if_capabilities = IFCAP_VLAN_MTU;
301 #if 0
302 	if (sc->sc_ds->upt_features & UPT1_F_CSUM)
303 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
304 #endif
305 	if (sc->sc_ds->upt_features & UPT1_F_VLAN)
306 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
307 
308 	IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESC);
309 
310 	ifmedia_init(&sc->sc_media, IFM_IMASK, vmxnet3_media_change,
311 	    vmxnet3_media_status);
312 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
313 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T|IFM_FDX, 0, NULL);
314 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T, 0, NULL);
315 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
316 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
317 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
318 
319 	if_attach(ifp);
320 	ether_ifattach(ifp);
321 	vmxnet3_link_state(sc);
322 }
323 
324 int
325 vmxnet3_dma_init(struct vmxnet3_softc *sc)
326 {
327 	struct vmxnet3_driver_shared *ds;
328 	struct vmxnet3_txq_shared *ts;
329 	struct vmxnet3_rxq_shared *rs;
330 	bus_addr_t ds_pa, qs_pa, mcast_pa;
331 	int i, queue, qs_len;
332 	u_int major, minor, release_code, rev;
333 
334 	qs_len = NTXQUEUE * sizeof *ts + NRXQUEUE * sizeof *rs;
335 	ts = vmxnet3_dma_allocmem(sc, qs_len, VMXNET3_DMADESC_ALIGN, &qs_pa);
336 	if (ts == NULL)
337 		return -1;
338 	for (queue = 0; queue < NTXQUEUE; queue++)
339 		sc->sc_txq[queue].ts = ts++;
340 	rs = (void *)ts;
341 	for (queue = 0; queue < NRXQUEUE; queue++)
342 		sc->sc_rxq[queue].rs = rs++;
343 
344 	for (queue = 0; queue < NTXQUEUE; queue++)
345 		if (vmxnet3_alloc_txring(sc, queue))
346 			return -1;
347 	for (queue = 0; queue < NRXQUEUE; queue++)
348 		if (vmxnet3_alloc_rxring(sc, queue))
349 			return -1;
350 
351 	sc->sc_mcast = vmxnet3_dma_allocmem(sc, 682 * ETHER_ADDR_LEN, 32, &mcast_pa);
352 	if (sc->sc_mcast == NULL)
353 		return -1;
354 
355 	ds = vmxnet3_dma_allocmem(sc, sizeof *sc->sc_ds, 8, &ds_pa);
356 	if (ds == NULL)
357 		return -1;
358 	sc->sc_ds = ds;
359 	ds->magic = VMXNET3_REV1_MAGIC;
360 	ds->version = VMXNET3_DRIVER_VERSION;
361 
362 	/*
363 	 * XXX FreeBSD version uses following values:
364 	 * (Does the device behavior depend on them?)
365 	 *
366 	 * major = __FreeBSD_version / 100000;
367 	 * minor = (__FreeBSD_version / 1000) % 100;
368 	 * release_code = (__FreeBSD_version / 100) % 10;
369 	 * rev = __FreeBSD_version % 100;
370 	 */
371 	major = 0;
372 	minor = 0;
373 	release_code = 0;
374 	rev = 0;
375 #ifdef __LP64__
376 	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
377 	    | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_64BIT;
378 #else
379 	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
380 	    | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_32BIT;
381 #endif
382 	ds->vmxnet3_revision = 1;
383 	ds->upt_version = 1;
384 	ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN;
385 	ds->driver_data = vtophys(sc);
386 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
387 	ds->queue_shared = qs_pa;
388 	ds->queue_shared_len = qs_len;
389 	ds->mtu = VMXNET3_MAX_MTU;
390 	ds->ntxqueue = NTXQUEUE;
391 	ds->nrxqueue = NRXQUEUE;
392 	ds->mcast_table = mcast_pa;
393 	ds->automask = 1;
394 	ds->nintr = VMXNET3_NINTR;
395 	ds->evintr = 0;
396 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
397 	for (i = 0; i < VMXNET3_NINTR; i++)
398 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
399 	WRITE_BAR1(sc, VMXNET3_BAR1_DSL, ds_pa);
400 	WRITE_BAR1(sc, VMXNET3_BAR1_DSH, (u_int64_t)ds_pa >> 32);
401 	return 0;
402 }
403 
404 int
405 vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue)
406 {
407 	struct vmxnet3_txqueue *tq = &sc->sc_txq[queue];
408 	struct vmxnet3_txq_shared *ts;
409 	struct vmxnet3_txring *ring = &tq->cmd_ring;
410 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
411 	bus_addr_t pa, comp_pa;
412 	int idx;
413 
414 	ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
415 	if (ring->txd == NULL)
416 		return -1;
417 	comp_ring->txcd = vmxnet3_dma_allocmem(sc,
418 	    NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
419 	if (comp_ring->txcd == NULL)
420 		return -1;
421 
422 	for (idx = 0; idx < NTXDESC; idx++) {
423 		if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
424 		    VMXNET3_TX_LEN_M + 1, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
425 			return -1;
426 	}
427 
428 	ts = tq->ts;
429 	bzero(ts, sizeof *ts);
430 	ts->npending = 0;
431 	ts->intr_threshold = 1;
432 	ts->cmd_ring = pa;
433 	ts->cmd_ring_len = NTXDESC;
434 	ts->comp_ring = comp_pa;
435 	ts->comp_ring_len = NTXCOMPDESC;
436 	ts->driver_data = vtophys(tq);
437 	ts->driver_data_len = sizeof *tq;
438 	ts->intr_idx = 0;
439 	ts->stopped = 1;
440 	ts->error = 0;
441 	return 0;
442 }
443 
444 int
445 vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue)
446 {
447 	struct vmxnet3_rxqueue *rq = &sc->sc_rxq[queue];
448 	struct vmxnet3_rxq_shared *rs;
449 	struct vmxnet3_rxring *ring;
450 	struct vmxnet3_comp_ring *comp_ring;
451 	bus_addr_t pa[2], comp_pa;
452 	int i, idx;
453 
454 	for (i = 0; i < 2; i++) {
455 		ring = &rq->cmd_ring[i];
456 		ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
457 		    512, &pa[i]);
458 		if (ring->rxd == NULL)
459 			return -1;
460 	}
461 	comp_ring = &rq->comp_ring;
462 	comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
463 	    NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
464 	if (comp_ring->rxcd == NULL)
465 		return -1;
466 
467 	for (i = 0; i < 2; i++) {
468 		ring = &rq->cmd_ring[i];
469 		ring->sc = sc;
470 		ring->rid = i;
471 		mtx_init(&ring->mtx, IPL_NET);
472 		timeout_set(&ring->refill, vmxnet3_rxfill_tick, ring);
473 		for (idx = 0; idx < NRXDESC; idx++) {
474 			if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, 1,
475 			    JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
476 				return -1;
477 		}
478 	}
479 
480 	rs = rq->rs;
481 	bzero(rs, sizeof *rs);
482 	rs->cmd_ring[0] = pa[0];
483 	rs->cmd_ring[1] = pa[1];
484 	rs->cmd_ring_len[0] = NRXDESC;
485 	rs->cmd_ring_len[1] = NRXDESC;
486 	rs->comp_ring = comp_pa;
487 	rs->comp_ring_len = NRXCOMPDESC;
488 	rs->driver_data = vtophys(rq);
489 	rs->driver_data_len = sizeof *rq;
490 	rs->intr_idx = 0;
491 	rs->stopped = 1;
492 	rs->error = 0;
493 	return 0;
494 }
495 
496 void
497 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
498 {
499 	struct vmxnet3_txring *ring = &tq->cmd_ring;
500 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
501 
502 	ring->cons = ring->prod = 0;
503 	ring->gen = VMX_TX_GEN;
504 	comp_ring->next = 0;
505 	comp_ring->gen = VMX_TXC_GEN;
506 	bzero(ring->txd, NTXDESC * sizeof ring->txd[0]);
507 	bzero(comp_ring->txcd, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
508 }
509 
510 void
511 vmxnet3_rxfill_tick(void *arg)
512 {
513 	struct vmxnet3_rxring *ring = arg;
514 
515 	if (!mtx_enter_try(&ring->mtx))
516 		return;
517 
518 	vmxnet3_rxfill(ring);
519 	mtx_leave(&ring->mtx);
520 }
521 
522 void
523 vmxnet3_rxfill(struct vmxnet3_rxring *ring)
524 {
525 	struct vmxnet3_softc *sc = ring->sc;
526 	struct vmxnet3_rxdesc *rxd;
527 	struct mbuf *m;
528 	bus_dmamap_t map;
529 	u_int slots;
530 	unsigned int prod;
531 	uint32_t rgen;
532 	uint32_t type = htole32(VMXNET3_BTYPE_HEAD << VMXNET3_RX_BTYPE_S);
533 
534 	MUTEX_ASSERT_LOCKED(&ring->mtx);
535 
536 	prod = ring->fill;
537 	rgen = ring->gen;
538 
539 	for (slots = if_rxr_get(&ring->rxr, NRXDESC); slots > 0; slots--) {
540 		KASSERT(ring->m[prod] == NULL);
541 
542 		m = MCLGETI(NULL, M_DONTWAIT, NULL, JUMBO_LEN);
543 		if (m == NULL)
544 			break;
545 
546 		m->m_pkthdr.len = m->m_len = JUMBO_LEN;
547 		m_adj(m, ETHER_ALIGN);
548 
549 		map = ring->dmap[prod];
550 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
551 			panic("load mbuf");
552 
553 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
554 		    BUS_DMASYNC_PREREAD);
555 
556 		ring->m[prod] = m;
557 
558 		rxd = &ring->rxd[prod];
559 		rxd->rx_addr = htole64(DMAADDR(map));
560 		membar_producer();
561 		rxd->rx_word2 = (htole32(m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
562 		    VMXNET3_RX_LEN_S) | type | rgen;
563 
564 		if (++prod == NRXDESC) {
565 			prod = 0;
566 			rgen ^= VMX_RX_GEN;
567 		}
568 	}
569 	if_rxr_put(&ring->rxr, slots);
570 
571 	ring->fill = prod;
572 	ring->gen = rgen;
573 
574 	if (if_rxr_inuse(&ring->rxr) == 0)
575 		timeout_add(&ring->refill, 1);
576 }
577 
578 void
579 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
580 {
581 	struct vmxnet3_rxring *ring;
582 	struct vmxnet3_comp_ring *comp_ring;
583 	int i;
584 
585 	for (i = 0; i < 2; i++) {
586 		ring = &rq->cmd_ring[i];
587 		ring->fill = 0;
588 		ring->gen = VMX_RX_GEN;
589 		bzero(ring->rxd, NRXDESC * sizeof ring->rxd[0]);
590 		if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
591 	}
592 
593 	/* XXX only fill ring 0 */
594 	ring = &rq->cmd_ring[0];
595 	mtx_enter(&ring->mtx);
596 	vmxnet3_rxfill(ring);
597 	mtx_leave(&ring->mtx);
598 
599 	comp_ring = &rq->comp_ring;
600 	comp_ring->next = 0;
601 	comp_ring->gen = VMX_RXC_GEN;
602 	bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
603 }
604 
605 void
606 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
607 {
608 	struct vmxnet3_txring *ring = &tq->cmd_ring;
609 	int idx;
610 
611 	for (idx = 0; idx < NTXDESC; idx++) {
612 		if (ring->m[idx]) {
613 			bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
614 			m_freem(ring->m[idx]);
615 			ring->m[idx] = NULL;
616 		}
617 	}
618 }
619 
620 void
621 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
622 {
623 	struct vmxnet3_rxring *ring;
624 	int i, idx;
625 
626 	for (i = 0; i < 2; i++) {
627 		ring = &rq->cmd_ring[i];
628 		timeout_del(&ring->refill);
629 		for (idx = 0; idx < NRXDESC; idx++) {
630 			struct mbuf *m = ring->m[idx];
631 			if (m == NULL)
632 				continue;
633 
634 			ring->m[idx] = NULL;
635 			m_freem(m);
636 			bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
637 		}
638 	}
639 }
640 
641 void
642 vmxnet3_link_state(struct vmxnet3_softc *sc)
643 {
644 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
645 	u_int x, link, speed;
646 
647 	WRITE_CMD(sc, VMXNET3_CMD_GET_LINK);
648 	x = READ_BAR1(sc, VMXNET3_BAR1_CMD);
649 	speed = x >> 16;
650 	if (x & 1) {
651 		ifp->if_baudrate = IF_Mbps(speed);
652 		link = LINK_STATE_UP;
653 	} else
654 		link = LINK_STATE_DOWN;
655 
656 	if (ifp->if_link_state != link) {
657 		ifp->if_link_state = link;
658 		if_link_state_change(ifp);
659 	}
660 }
661 
662 static inline void
663 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
664 {
665 	WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 0);
666 }
667 
668 static inline void
669 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
670 {
671 	WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 1);
672 }
673 
674 void
675 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
676 {
677 	int i;
678 
679 	sc->sc_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
680 	for (i = 0; i < VMXNET3_NINTR; i++)
681 		vmxnet3_enable_intr(sc, i);
682 }
683 
684 void
685 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
686 {
687 	int i;
688 
689 	sc->sc_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
690 	for (i = 0; i < VMXNET3_NINTR; i++)
691 		vmxnet3_disable_intr(sc, i);
692 }
693 
694 int
695 vmxnet3_intr_intx(void *arg)
696 {
697 	struct vmxnet3_softc *sc = arg;
698 
699 	if (READ_BAR1(sc, VMXNET3_BAR1_INTR) == 0)
700 		return 0;
701 
702 	return (vmxnet3_intr(sc));
703 }
704 
705 int
706 vmxnet3_intr(void *arg)
707 {
708 	struct vmxnet3_softc *sc = arg;
709 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
710 
711 	if (sc->sc_ds->event) {
712 		KERNEL_LOCK();
713 		vmxnet3_evintr(sc);
714 		KERNEL_UNLOCK();
715 	}
716 
717 	if (ifp->if_flags & IFF_RUNNING) {
718 		vmxnet3_rxintr(sc, &sc->sc_rxq[0]);
719 		vmxnet3_txintr(sc, &sc->sc_txq[0]);
720 		vmxnet3_enable_intr(sc, 0);
721 	}
722 
723 	return 1;
724 }
725 
726 void
727 vmxnet3_evintr(struct vmxnet3_softc *sc)
728 {
729 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
730 	u_int event = sc->sc_ds->event;
731 	struct vmxnet3_txq_shared *ts;
732 	struct vmxnet3_rxq_shared *rs;
733 
734 	/* Clear events. */
735 	WRITE_BAR1(sc, VMXNET3_BAR1_EVENT, event);
736 
737 	/* Link state change? */
738 	if (event & VMXNET3_EVENT_LINK)
739 		vmxnet3_link_state(sc);
740 
741 	/* Queue error? */
742 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
743 		WRITE_CMD(sc, VMXNET3_CMD_GET_STATUS);
744 
745 		ts = sc->sc_txq[0].ts;
746 		if (ts->stopped)
747 			printf("%s: TX error 0x%x\n", ifp->if_xname, ts->error);
748 		rs = sc->sc_rxq[0].rs;
749 		if (rs->stopped)
750 			printf("%s: RX error 0x%x\n", ifp->if_xname, rs->error);
751 		vmxnet3_init(sc);
752 	}
753 
754 	if (event & VMXNET3_EVENT_DIC)
755 		printf("%s: device implementation change event\n",
756 		    ifp->if_xname);
757 	if (event & VMXNET3_EVENT_DEBUG)
758 		printf("%s: debug event\n", ifp->if_xname);
759 }
760 
761 void
762 vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
763 {
764 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
765 	struct vmxnet3_txring *ring = &tq->cmd_ring;
766 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
767 	struct vmxnet3_txcompdesc *txcd;
768 	bus_dmamap_t map;
769 	struct mbuf *m;
770 	u_int cons, next;
771 	uint32_t rgen;
772 
773 	cons = ring->cons;
774 	if (cons == ring->prod)
775 		return;
776 
777 	next = comp_ring->next;
778 	rgen = comp_ring->gen;
779 
780 	/* postread */
781 	for (;;) {
782 		txcd = &comp_ring->txcd[next];
783 		if ((txcd->txc_word3 & VMX_TXC_GEN) != rgen)
784 			break;
785 
786 		if (++next == NTXCOMPDESC) {
787 			next = 0;
788 			rgen ^= VMX_TXC_GEN;
789 		}
790 
791 		m = ring->m[cons];
792 		ring->m[cons] = NULL;
793 
794 		KASSERT(m != NULL);
795 
796 		map = ring->dmap[cons];
797 		bus_dmamap_unload(sc->sc_dmat, map);
798 		m_freem(m);
799 
800 		cons = (letoh32(txcd->txc_word0) >> VMXNET3_TXC_EOPIDX_S) &
801 		    VMXNET3_TXC_EOPIDX_M;
802 		cons++;
803 		cons %= NTXDESC;
804 	}
805 	/* preread */
806 
807 	comp_ring->next = next;
808 	comp_ring->gen = rgen;
809 	ring->cons = cons;
810 
811 	if (ifq_is_oactive(&ifp->if_snd))
812 		ifq_restart(&ifp->if_snd);
813 }
814 
815 void
816 vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
817 {
818 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
819 	struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
820 	struct vmxnet3_rxring *ring;
821 	struct vmxnet3_rxcompdesc *rxcd;
822 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
823 	struct mbuf *m;
824 	bus_dmamap_t map;
825 	unsigned int idx, len;
826 	unsigned int next, rgen;
827 	unsigned int done = 0;
828 
829 	next = comp_ring->next;
830 	rgen = comp_ring->gen;
831 
832 	for (;;) {
833 		rxcd = &comp_ring->rxcd[next];
834 		if ((rxcd->rxc_word3 & VMX_RXC_GEN) != rgen)
835 			break;
836 
837 		if (++next == NRXCOMPDESC) {
838 			next = 0;
839 			rgen ^= VMX_RXC_GEN;
840 		}
841 
842 		idx = letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_IDX_S) &
843 		    VMXNET3_RXC_IDX_M);
844 		if (letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) &
845 		    VMXNET3_RXC_QID_M) < NRXQUEUE)
846 			ring = &rq->cmd_ring[0];
847 		else
848 			ring = &rq->cmd_ring[1];
849 
850 		m = ring->m[idx];
851 		KASSERT(m != NULL);
852 		ring->m[idx] = NULL;
853 
854 		map = ring->dmap[idx];
855 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
856 		    BUS_DMASYNC_POSTREAD);
857 		bus_dmamap_unload(sc->sc_dmat, map);
858 
859 		done++;
860 
861 		if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
862 			ifp->if_ierrors++;
863 			m_freem(m);
864 			goto skip_buffer;
865 		}
866 
867 		len = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
868 		    VMXNET3_RXC_LEN_M);
869 		if (len < VMXNET3_MIN_MTU) {
870 			m_freem(m);
871 			goto skip_buffer;
872 		}
873 		m->m_pkthdr.len = m->m_len = len;
874 
875 		vmxnet3_rx_csum(rxcd, m);
876 		if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_VLAN)) {
877 			m->m_flags |= M_VLANTAG;
878 			m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >>
879 			    VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M);
880 		}
881 
882 		ml_enqueue(&ml, m);
883 
884 skip_buffer:
885 #ifdef VMXNET3_STAT
886 		vmxstat.rxdone = idx;
887 #endif
888 		if (rq->rs->update_rxhead) {
889 			u_int qid = letoh32((rxcd->rxc_word0 >>
890 			    VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
891 
892 			idx = (idx + 1) % NRXDESC;
893 			if (qid < NRXQUEUE) {
894 				WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
895 			} else {
896 				qid -= NRXQUEUE;
897 				WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
898 			}
899 		}
900 	}
901 
902 	comp_ring->next = next;
903 	comp_ring->gen = rgen;
904 
905 	if (done == 0)
906 		return;
907 
908 	ring = &rq->cmd_ring[0];
909 
910 	if (ifiq_input(&ifp->if_rcv, &ml))
911 		if_rxr_livelocked(&ring->rxr);
912 
913 	/* XXX Should we (try to) allocate buffers for ring 2 too? */
914 	mtx_enter(&ring->mtx);
915 	if_rxr_put(&ring->rxr, done);
916 	vmxnet3_rxfill(ring);
917 	mtx_leave(&ring->mtx);
918 }
919 
920 void
921 vmxnet3_iff(struct vmxnet3_softc *sc)
922 {
923 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
924 	struct arpcom *ac = &sc->sc_arpcom;
925 	struct vmxnet3_driver_shared *ds = sc->sc_ds;
926 	struct ether_multi *enm;
927 	struct ether_multistep step;
928 	u_int mode;
929 	u_int8_t *p;
930 
931 	ds->mcast_tablelen = 0;
932 	CLR(ifp->if_flags, IFF_ALLMULTI);
933 
934 	/*
935 	 * Always accept broadcast frames.
936 	 * Always accept frames destined to our station address.
937 	 */
938 	mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
939 
940 	if (ISSET(ifp->if_flags, IFF_PROMISC) || ac->ac_multirangecnt > 0 ||
941 	    ac->ac_multicnt > 682) {
942 		SET(ifp->if_flags, IFF_ALLMULTI);
943 		SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
944 		if (ifp->if_flags & IFF_PROMISC)
945 			SET(mode, VMXNET3_RXMODE_PROMISC);
946 	} else {
947 		p = sc->sc_mcast;
948 		ETHER_FIRST_MULTI(step, ac, enm);
949 		while (enm != NULL) {
950 			bcopy(enm->enm_addrlo, p, ETHER_ADDR_LEN);
951 
952 			p += ETHER_ADDR_LEN;
953 
954 			ETHER_NEXT_MULTI(step, enm);
955 		}
956 
957 		if (ac->ac_multicnt > 0) {
958 			SET(mode, VMXNET3_RXMODE_MCAST);
959 			ds->mcast_tablelen = p - sc->sc_mcast;
960 		}
961 	}
962 
963 	WRITE_CMD(sc, VMXNET3_CMD_SET_FILTER);
964 	ds->rxmode = mode;
965 	WRITE_CMD(sc, VMXNET3_CMD_SET_RXMODE);
966 }
967 
968 
969 void
970 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
971 {
972 	if (letoh32(rxcd->rxc_word0 & VMXNET3_RXC_NOCSUM))
973 		return;
974 
975 	if ((rxcd->rxc_word3 & (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK)) ==
976 	    (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK))
977 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
978 
979 	if (rxcd->rxc_word3 & VMXNET3_RXC_FRAGMENT)
980 		return;
981 
982 	if (rxcd->rxc_word3 & (VMXNET3_RXC_TCP | VMXNET3_RXC_UDP)) {
983 		if (rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK)
984 			m->m_pkthdr.csum_flags |=
985 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
986 	}
987 }
988 
989 void
990 vmxnet3_stop(struct ifnet *ifp)
991 {
992 	struct vmxnet3_softc *sc = ifp->if_softc;
993 	int queue;
994 
995 	ifp->if_flags &= ~IFF_RUNNING;
996 	ifq_clr_oactive(&ifp->if_snd);
997 	ifp->if_timer = 0;
998 
999 	vmxnet3_disable_all_intrs(sc);
1000 
1001 	WRITE_CMD(sc, VMXNET3_CMD_DISABLE);
1002 
1003 	intr_barrier(sc->sc_ih);
1004 
1005 	for (queue = 0; queue < NTXQUEUE; queue++)
1006 		vmxnet3_txstop(sc, &sc->sc_txq[queue]);
1007 	for (queue = 0; queue < NRXQUEUE; queue++)
1008 		vmxnet3_rxstop(sc, &sc->sc_rxq[queue]);
1009 }
1010 
1011 void
1012 vmxnet3_reset(struct vmxnet3_softc *sc)
1013 {
1014 	WRITE_CMD(sc, VMXNET3_CMD_RESET);
1015 }
1016 
1017 int
1018 vmxnet3_init(struct vmxnet3_softc *sc)
1019 {
1020 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1021 	int queue;
1022 
1023 	/*
1024 	 * Cancel pending I/O and free all RX/TX buffers.
1025 	 */
1026 	vmxnet3_stop(ifp);
1027 
1028 #if 0
1029 	/* Put controller into known state. */
1030 	vmxnet3_reset(sc);
1031 #endif
1032 
1033 	for (queue = 0; queue < NTXQUEUE; queue++)
1034 		vmxnet3_txinit(sc, &sc->sc_txq[queue]);
1035 	for (queue = 0; queue < NRXQUEUE; queue++)
1036 		vmxnet3_rxinit(sc, &sc->sc_rxq[queue]);
1037 
1038 	for (queue = 0; queue < NRXQUEUE; queue++) {
1039 		WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(queue), 0);
1040 		WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(queue), 0);
1041 	}
1042 
1043 	WRITE_CMD(sc, VMXNET3_CMD_ENABLE);
1044 	if (READ_BAR1(sc, VMXNET3_BAR1_CMD)) {
1045 		printf("%s: failed to initialize\n", ifp->if_xname);
1046 		vmxnet3_stop(ifp);
1047 		return EIO;
1048 	}
1049 
1050 	/* Program promiscuous mode and multicast filters. */
1051 	vmxnet3_iff(sc);
1052 
1053 	vmxnet3_enable_all_intrs(sc);
1054 
1055 	vmxnet3_link_state(sc);
1056 
1057 	ifp->if_flags |= IFF_RUNNING;
1058 	ifq_clr_oactive(&ifp->if_snd);
1059 
1060 	return 0;
1061 }
1062 
1063 int
1064 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1065 {
1066 	struct vmxnet3_softc *sc = ifp->if_softc;
1067 	struct ifreq *ifr = (struct ifreq *)data;
1068 	int error = 0, s;
1069 
1070 	s = splnet();
1071 
1072 	switch (cmd) {
1073 	case SIOCSIFADDR:
1074 		ifp->if_flags |= IFF_UP;
1075 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1076 			error = vmxnet3_init(sc);
1077 		break;
1078 	case SIOCSIFFLAGS:
1079 		if (ifp->if_flags & IFF_UP) {
1080 			if (ifp->if_flags & IFF_RUNNING)
1081 				error = ENETRESET;
1082 			else
1083 				error = vmxnet3_init(sc);
1084 		} else {
1085 			if (ifp->if_flags & IFF_RUNNING)
1086 				vmxnet3_stop(ifp);
1087 		}
1088 		break;
1089 	case SIOCSIFMEDIA:
1090 	case SIOCGIFMEDIA:
1091 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1092 		break;
1093 	case SIOCGIFRXR:
1094 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1095 		    NULL, JUMBO_LEN, &sc->sc_rxq[0].cmd_ring[0].rxr);
1096 		break;
1097 	default:
1098 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1099 	}
1100 
1101 	if (error == ENETRESET) {
1102 		if (ifp->if_flags & IFF_RUNNING)
1103 			vmxnet3_iff(sc);
1104 		error = 0;
1105 	}
1106 
1107 	splx(s);
1108 	return error;
1109 }
1110 
1111 static inline int
1112 vmx_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1113 {
1114 	int error;
1115 
1116 	error = bus_dmamap_load_mbuf(dmat, map, m,
1117 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
1118 	if (error != EFBIG)
1119 		return (error);
1120 
1121 	error = m_defrag(m, M_DONTWAIT);
1122 	if (error != 0)
1123 		return (error);
1124 
1125 	return (bus_dmamap_load_mbuf(dmat, map, m,
1126 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
1127 }
1128 
1129 void
1130 vmxnet3_start(struct ifqueue *ifq)
1131 {
1132 	struct ifnet *ifp = ifq->ifq_if;
1133 	struct vmxnet3_softc *sc = ifp->if_softc;
1134 	struct vmxnet3_txqueue *tq = sc->sc_txq;
1135 	struct vmxnet3_txring *ring = &tq->cmd_ring;
1136 	struct vmxnet3_txdesc *txd, *sop;
1137 	bus_dmamap_t map;
1138         unsigned int prod, free, i;
1139 	unsigned int post = 0;
1140 	uint32_t rgen, gen;
1141 
1142 	struct mbuf *m;
1143 
1144 	free = ring->cons;
1145 	prod = ring->prod;
1146 	if (free <= prod)
1147 		free += NTXDESC;
1148 	free -= prod;
1149 
1150 	rgen = ring->gen;
1151 
1152 	for (;;) {
1153 		if (free <= NTXSEGS) {
1154 			ifq_set_oactive(ifq);
1155 			break;
1156 		}
1157 
1158 		m = ifq_dequeue(ifq);
1159 		if (m == NULL)
1160 			break;
1161 
1162 		map = ring->dmap[prod];
1163 
1164 		if (vmx_load_mbuf(sc->sc_dmat, map, m) != 0) {
1165 			ifq->ifq_errors++;
1166 			m_freem(m);
1167 			continue;
1168 		}
1169 
1170 #if NBPFILTER > 0
1171 		if (ifp->if_bpf)
1172 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1173 #endif
1174 
1175 		ring->m[prod] = m;
1176 
1177 		bus_dmamap_sync(sc->sc_dmat, map, 0,
1178 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1179 
1180 		gen = rgen ^ VMX_TX_GEN;
1181 		sop = &ring->txd[prod];
1182 		for (i = 0; i < map->dm_nsegs; i++) {
1183 			txd = &ring->txd[prod];
1184 			txd->tx_addr = htole64(map->dm_segs[i].ds_addr);
1185 			txd->tx_word2 = htole32(map->dm_segs[i].ds_len <<
1186 			    VMXNET3_TX_LEN_S) | gen;
1187 			txd->tx_word3 = 0;
1188 
1189 			if (++prod == NTXDESC) {
1190 				prod = 0;
1191 				rgen ^= VMX_TX_GEN;
1192 			}
1193 
1194 			gen = rgen;
1195 		}
1196 		txd->tx_word3 = htole32(VMXNET3_TX_EOP | VMXNET3_TX_COMPREQ);
1197 
1198 		if (ISSET(m->m_flags, M_VLANTAG)) {
1199 			sop->tx_word3 |= htole32(VMXNET3_TX_VTAG_MODE);
1200 			sop->tx_word3 |= htole32((m->m_pkthdr.ether_vtag &
1201 			    VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
1202 		}
1203 
1204 		/* Change the ownership by flipping the "generation" bit */
1205 		membar_producer();
1206 		sop->tx_word2 ^= VMX_TX_GEN;
1207 
1208 		free -= i;
1209 		post = 1;
1210 	}
1211 
1212 	if (!post)
1213 		return;
1214 
1215 	ring->prod = prod;
1216 	ring->gen = rgen;
1217 
1218 	WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), prod);
1219 }
1220 
1221 void
1222 vmxnet3_watchdog(struct ifnet *ifp)
1223 {
1224 	struct vmxnet3_softc *sc = ifp->if_softc;
1225 	int s;
1226 
1227 	printf("%s: device timeout\n", ifp->if_xname);
1228 	s = splnet();
1229 	vmxnet3_init(sc);
1230 	splx(s);
1231 }
1232 
1233 void
1234 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1235 {
1236 	struct vmxnet3_softc *sc = ifp->if_softc;
1237 
1238 	vmxnet3_link_state(sc);
1239 
1240 	ifmr->ifm_status = IFM_AVALID;
1241 	ifmr->ifm_active = IFM_ETHER;
1242 
1243 	if (ifp->if_link_state != LINK_STATE_UP)
1244 		return;
1245 
1246 	ifmr->ifm_status |= IFM_ACTIVE;
1247 
1248 	if (ifp->if_baudrate >= IF_Gbps(10))
1249 		ifmr->ifm_active |= IFM_10G_T;
1250 }
1251 
1252 int
1253 vmxnet3_media_change(struct ifnet *ifp)
1254 {
1255 	return 0;
1256 }
1257 
1258 void *
1259 vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr_t *pa)
1260 {
1261 	bus_dma_tag_t t = sc->sc_dmat;
1262 	bus_dma_segment_t segs[1];
1263 	bus_dmamap_t map;
1264 	caddr_t va;
1265 	int n;
1266 
1267 	if (bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT))
1268 		return NULL;
1269 	if (bus_dmamem_map(t, segs, 1, size, &va, BUS_DMA_NOWAIT))
1270 		return NULL;
1271 	if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &map))
1272 		return NULL;
1273 	if (bus_dmamap_load(t, map, va, size, NULL, BUS_DMA_NOWAIT))
1274 		return NULL;
1275 	bzero(va, size);
1276 	*pa = DMAADDR(map);
1277 	bus_dmamap_unload(t, map);
1278 	bus_dmamap_destroy(t, map);
1279 	return va;
1280 }
1281