xref: /netbsd-src/sys/dev/pci/if_vmx.c (revision 481d3881954fd794ca5f2d880b68c53a5db8620e)
1*481d3881Srin /*	$NetBSD: if_vmx.c,v 1.17 2024/07/05 04:31:51 rin Exp $	*/
2d87a4d00Sryo /*	$OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $	*/
3d87a4d00Sryo 
4d87a4d00Sryo /*
5d87a4d00Sryo  * Copyright (c) 2013 Tsubai Masanari
6d87a4d00Sryo  * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
7d87a4d00Sryo  *
8d87a4d00Sryo  * Permission to use, copy, modify, and distribute this software for any
9d87a4d00Sryo  * purpose with or without fee is hereby granted, provided that the above
10d87a4d00Sryo  * copyright notice and this permission notice appear in all copies.
11d87a4d00Sryo  *
12d87a4d00Sryo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13d87a4d00Sryo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14d87a4d00Sryo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15d87a4d00Sryo  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16d87a4d00Sryo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17d87a4d00Sryo  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18d87a4d00Sryo  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19d87a4d00Sryo  */
20d87a4d00Sryo 
21d87a4d00Sryo #include <sys/cdefs.h>
22*481d3881Srin __KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.17 2024/07/05 04:31:51 rin Exp $");
232e9219acSknakahara 
242e9219acSknakahara #ifdef _KERNEL_OPT
252e9219acSknakahara #include "opt_if_vmx.h"
262e9219acSknakahara #endif
27d87a4d00Sryo 
28d87a4d00Sryo #include <sys/param.h>
29d87a4d00Sryo #include <sys/cpu.h>
30d87a4d00Sryo #include <sys/kernel.h>
31d87a4d00Sryo #include <sys/kmem.h>
32d87a4d00Sryo #include <sys/bitops.h>
33d87a4d00Sryo #include <sys/bus.h>
34d87a4d00Sryo #include <sys/device.h>
35d87a4d00Sryo #include <sys/mbuf.h>
3635492473Sryo #include <sys/module.h>
37d87a4d00Sryo #include <sys/sockio.h>
38d87a4d00Sryo #include <sys/pcq.h>
39d87a4d00Sryo #include <sys/workqueue.h>
40d87a4d00Sryo #include <sys/interrupt.h>
41d87a4d00Sryo 
42d87a4d00Sryo #include <net/bpf.h>
43d87a4d00Sryo #include <net/if.h>
44d87a4d00Sryo #include <net/if_ether.h>
45d87a4d00Sryo #include <net/if_media.h>
46d87a4d00Sryo 
47d87a4d00Sryo #include <netinet/if_inarp.h>
48d87a4d00Sryo #include <netinet/in_systm.h>	/* for <netinet/ip.h> */
49d87a4d00Sryo #include <netinet/in.h>		/* for <netinet/ip.h> */
50d87a4d00Sryo #include <netinet/ip.h>		/* for struct ip */
51d87a4d00Sryo #include <netinet/ip6.h>	/* for struct ip6_hdr */
52d87a4d00Sryo #include <netinet/tcp.h>	/* for struct tcphdr */
53d87a4d00Sryo #include <netinet/udp.h>	/* for struct udphdr */
54d87a4d00Sryo 
55d87a4d00Sryo #include <dev/pci/pcivar.h>
56d87a4d00Sryo #include <dev/pci/pcireg.h>
57d87a4d00Sryo #include <dev/pci/pcidevs.h>
58d87a4d00Sryo 
59d87a4d00Sryo #include <dev/pci/if_vmxreg.h>
60d87a4d00Sryo 
61d87a4d00Sryo #define VMXNET3_DRIVER_VERSION 0x00010000
62d87a4d00Sryo 
63d87a4d00Sryo /*
64d87a4d00Sryo  * Max descriptors per Tx packet. We must limit the size of the
65d87a4d00Sryo  * any TSO packets based on the number of segments.
66d87a4d00Sryo  */
67d87a4d00Sryo #define VMXNET3_TX_MAXSEGS		32
68d87a4d00Sryo #define VMXNET3_TX_MAXSIZE		(VMXNET3_TX_MAXSEGS * MCLBYTES)
69d87a4d00Sryo 
70d87a4d00Sryo /*
71d87a4d00Sryo  * Maximum support Tx segments size. The length field in the
72d87a4d00Sryo  * Tx descriptor is 14 bits.
73d87a4d00Sryo  */
74d87a4d00Sryo #define VMXNET3_TX_MAXSEGSIZE		(1 << 14)
75d87a4d00Sryo 
76d87a4d00Sryo /*
77d87a4d00Sryo  * The maximum number of Rx segments we accept.
78d87a4d00Sryo  */
79d87a4d00Sryo #define VMXNET3_MAX_RX_SEGS		0	/* no segments */
80d87a4d00Sryo 
81d87a4d00Sryo /*
82d87a4d00Sryo  * Predetermined size of the multicast MACs filter table. If the
83d87a4d00Sryo  * number of multicast addresses exceeds this size, then the
84d87a4d00Sryo  * ALL_MULTI mode is use instead.
85d87a4d00Sryo  */
86d87a4d00Sryo #define VMXNET3_MULTICAST_MAX		32
87d87a4d00Sryo 
88d87a4d00Sryo /*
89d87a4d00Sryo  * Our Tx watchdog timeout.
90d87a4d00Sryo  */
91d87a4d00Sryo #define VMXNET3_WATCHDOG_TIMEOUT	5
92d87a4d00Sryo 
93d87a4d00Sryo /*
94d87a4d00Sryo  * Default value for vmx_intr_{rx,tx}_process_limit which is used for
95d87a4d00Sryo  * max number of packets to process for interrupt handler
96d87a4d00Sryo  */
97d87a4d00Sryo #define VMXNET3_RX_INTR_PROCESS_LIMIT 0U
98d87a4d00Sryo #define VMXNET3_TX_INTR_PROCESS_LIMIT 256
99d87a4d00Sryo 
100d87a4d00Sryo /*
101d87a4d00Sryo  * Default value for vmx_{rx,tx}_process_limit which is used for
102d87a4d00Sryo  * max number of packets to process for deferred processing
103d87a4d00Sryo  */
104d87a4d00Sryo #define VMXNET3_RX_PROCESS_LIMIT 256
105d87a4d00Sryo #define VMXNET3_TX_PROCESS_LIMIT 256
106d87a4d00Sryo 
107d87a4d00Sryo #define VMXNET3_WORKQUEUE_PRI PRI_SOFTNET
108d87a4d00Sryo 
109d87a4d00Sryo /*
110d87a4d00Sryo  * IP protocols that we can perform Tx checksum offloading of.
111d87a4d00Sryo  */
112d87a4d00Sryo #define VMXNET3_CSUM_OFFLOAD \
113d87a4d00Sryo     (M_CSUM_TCPv4 | M_CSUM_UDPv4)
114d87a4d00Sryo #define VMXNET3_CSUM_OFFLOAD_IPV6 \
115d87a4d00Sryo     (M_CSUM_TCPv6 | M_CSUM_UDPv6)
116d87a4d00Sryo 
117d87a4d00Sryo #define VMXNET3_CSUM_ALL_OFFLOAD \
118d87a4d00Sryo     (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)
119d87a4d00Sryo 
120d87a4d00Sryo #define VMXNET3_RXRINGS_PERQ 2
121d87a4d00Sryo 
122d87a4d00Sryo #define VMXNET3_CORE_LOCK(_sc)		mutex_enter((_sc)->vmx_mtx)
123d87a4d00Sryo #define VMXNET3_CORE_UNLOCK(_sc)	mutex_exit((_sc)->vmx_mtx)
124d87a4d00Sryo #define VMXNET3_CORE_LOCK_ASSERT(_sc)	mutex_owned((_sc)->vmx_mtx)
125d87a4d00Sryo 
126d87a4d00Sryo #define VMXNET3_RXQ_LOCK(_rxq)		mutex_enter((_rxq)->vxrxq_mtx)
127d87a4d00Sryo #define VMXNET3_RXQ_UNLOCK(_rxq)	mutex_exit((_rxq)->vxrxq_mtx)
128d87a4d00Sryo #define VMXNET3_RXQ_LOCK_ASSERT(_rxq)		\
129d87a4d00Sryo     mutex_owned((_rxq)->vxrxq_mtx)
130d87a4d00Sryo 
131d87a4d00Sryo #define VMXNET3_TXQ_LOCK(_txq)		mutex_enter((_txq)->vxtxq_mtx)
132d87a4d00Sryo #define VMXNET3_TXQ_TRYLOCK(_txq)	mutex_tryenter((_txq)->vxtxq_mtx)
133d87a4d00Sryo #define VMXNET3_TXQ_UNLOCK(_txq)	mutex_exit((_txq)->vxtxq_mtx)
134d87a4d00Sryo #define VMXNET3_TXQ_LOCK_ASSERT(_txq)		\
135d87a4d00Sryo     mutex_owned((_txq)->vxtxq_mtx)
136d87a4d00Sryo 
137d87a4d00Sryo struct vmxnet3_dma_alloc {
138d87a4d00Sryo 	bus_addr_t dma_paddr;
139d87a4d00Sryo 	void *dma_vaddr;
140d87a4d00Sryo 	bus_dmamap_t dma_map;
141d87a4d00Sryo 	bus_size_t dma_size;
142d87a4d00Sryo 	bus_dma_segment_t dma_segs[1];
143d87a4d00Sryo };
144d87a4d00Sryo 
145d87a4d00Sryo struct vmxnet3_txbuf {
146d87a4d00Sryo 	bus_dmamap_t vtxb_dmamap;
147d87a4d00Sryo 	struct mbuf *vtxb_m;
148d87a4d00Sryo };
149d87a4d00Sryo 
150d87a4d00Sryo struct vmxnet3_txring {
151d87a4d00Sryo 	struct vmxnet3_txbuf *vxtxr_txbuf;
152d87a4d00Sryo 	struct vmxnet3_txdesc *vxtxr_txd;
153d87a4d00Sryo 	u_int vxtxr_head;
154d87a4d00Sryo 	u_int vxtxr_next;
155d87a4d00Sryo 	u_int vxtxr_ndesc;
156d87a4d00Sryo 	int vxtxr_gen;
157d87a4d00Sryo 	struct vmxnet3_dma_alloc vxtxr_dma;
158d87a4d00Sryo };
159d87a4d00Sryo 
160d87a4d00Sryo struct vmxnet3_rxbuf {
161d87a4d00Sryo 	bus_dmamap_t vrxb_dmamap;
162d87a4d00Sryo 	struct mbuf *vrxb_m;
163d87a4d00Sryo };
164d87a4d00Sryo 
165d87a4d00Sryo struct vmxnet3_rxring {
166d87a4d00Sryo 	struct vmxnet3_rxbuf *vxrxr_rxbuf;
167d87a4d00Sryo 	struct vmxnet3_rxdesc *vxrxr_rxd;
168d87a4d00Sryo 	u_int vxrxr_fill;
169d87a4d00Sryo 	u_int vxrxr_ndesc;
170d87a4d00Sryo 	int vxrxr_gen;
171d87a4d00Sryo 	int vxrxr_rid;
172d87a4d00Sryo 	struct vmxnet3_dma_alloc vxrxr_dma;
173d87a4d00Sryo 	bus_dmamap_t vxrxr_spare_dmap;
174d87a4d00Sryo };
175d87a4d00Sryo 
176d87a4d00Sryo struct vmxnet3_comp_ring {
177d87a4d00Sryo 	union {
178d87a4d00Sryo 		struct vmxnet3_txcompdesc *txcd;
179d87a4d00Sryo 		struct vmxnet3_rxcompdesc *rxcd;
180d87a4d00Sryo 	} vxcr_u;
181d87a4d00Sryo 	u_int vxcr_next;
182d87a4d00Sryo 	u_int vxcr_ndesc;
183d87a4d00Sryo 	int vxcr_gen;
184d87a4d00Sryo 	struct vmxnet3_dma_alloc vxcr_dma;
185d87a4d00Sryo };
186d87a4d00Sryo 
187d87a4d00Sryo struct vmxnet3_txq_stats {
188d87a4d00Sryo 	uint64_t vmtxs_csum;
189d87a4d00Sryo 	uint64_t vmtxs_tso;
190d87a4d00Sryo 	uint64_t vmtxs_full;
191d87a4d00Sryo 	uint64_t vmtxs_offload_failed;
192d87a4d00Sryo };
193d87a4d00Sryo 
194d87a4d00Sryo struct vmxnet3_txqueue {
195d87a4d00Sryo 	kmutex_t *vxtxq_mtx;
196d87a4d00Sryo 	struct vmxnet3_softc *vxtxq_sc;
197d87a4d00Sryo 	int vxtxq_watchdog;
198d87a4d00Sryo 	pcq_t *vxtxq_interq;
199d87a4d00Sryo 	struct vmxnet3_txring vxtxq_cmd_ring;
200d87a4d00Sryo 	struct vmxnet3_comp_ring vxtxq_comp_ring;
201d87a4d00Sryo 	struct vmxnet3_txq_stats vxtxq_stats;
202d87a4d00Sryo 	struct vmxnet3_txq_shared *vxtxq_ts;
203d87a4d00Sryo 	char vxtxq_name[16];
204d87a4d00Sryo 
205d87a4d00Sryo 	void *vxtxq_si;
206d87a4d00Sryo 
207d87a4d00Sryo 	struct evcnt vxtxq_intr;
208d87a4d00Sryo 	struct evcnt vxtxq_defer;
209d87a4d00Sryo 	struct evcnt vxtxq_deferreq;
210d87a4d00Sryo 	struct evcnt vxtxq_pcqdrop;
211d87a4d00Sryo 	struct evcnt vxtxq_transmitdef;
212d87a4d00Sryo 	struct evcnt vxtxq_watchdogto;
213d87a4d00Sryo 	struct evcnt vxtxq_defragged;
214d87a4d00Sryo 	struct evcnt vxtxq_defrag_failed;
215db465cddSriastradh 
216db465cddSriastradh 	bool vxtxq_stopping;
217d87a4d00Sryo };
218d87a4d00Sryo 
219d87a4d00Sryo 
220d87a4d00Sryo struct vmxnet3_rxqueue {
221d87a4d00Sryo 	kmutex_t *vxrxq_mtx;
222d87a4d00Sryo 	struct vmxnet3_softc *vxrxq_sc;
223d87a4d00Sryo 	struct mbuf *vxrxq_mhead;
224d87a4d00Sryo 	struct mbuf *vxrxq_mtail;
225d87a4d00Sryo 	struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
226d87a4d00Sryo 	struct vmxnet3_comp_ring vxrxq_comp_ring;
227d87a4d00Sryo 	struct vmxnet3_rxq_shared *vxrxq_rs;
228d87a4d00Sryo 	char vxrxq_name[16];
229d87a4d00Sryo 
230d87a4d00Sryo 	struct evcnt vxrxq_intr;
231d87a4d00Sryo 	struct evcnt vxrxq_defer;
232d87a4d00Sryo 	struct evcnt vxrxq_deferreq;
233d87a4d00Sryo 	struct evcnt vxrxq_mgetcl_failed;
234d87a4d00Sryo 	struct evcnt vxrxq_mbuf_load_failed;
235db465cddSriastradh 
236db465cddSriastradh 	bool vxrxq_stopping;
237d87a4d00Sryo };
238d87a4d00Sryo 
239d87a4d00Sryo struct vmxnet3_queue {
240d87a4d00Sryo 	int vxq_id;
241d87a4d00Sryo 	int vxq_intr_idx;
242d87a4d00Sryo 
243d87a4d00Sryo 	struct vmxnet3_txqueue vxq_txqueue;
244d87a4d00Sryo 	struct vmxnet3_rxqueue vxq_rxqueue;
245d87a4d00Sryo 
246d87a4d00Sryo 	void *vxq_si;
247d87a4d00Sryo 	bool vxq_workqueue;
248c3c564a7Sknakahara 	bool vxq_wq_enqueued;
249d87a4d00Sryo 	struct work vxq_wq_cookie;
250d87a4d00Sryo };
251d87a4d00Sryo 
252d87a4d00Sryo struct vmxnet3_softc {
253d87a4d00Sryo 	device_t vmx_dev;
254d87a4d00Sryo 	struct ethercom vmx_ethercom;
255d87a4d00Sryo 	struct ifmedia vmx_media;
256d87a4d00Sryo 	struct vmxnet3_driver_shared *vmx_ds;
257d87a4d00Sryo 	int vmx_flags;
258d87a4d00Sryo #define VMXNET3_FLAG_NO_MSIX	(1 << 0)
259d87a4d00Sryo #define VMXNET3_FLAG_RSS	(1 << 1)
260d87a4d00Sryo #define VMXNET3_FLAG_ATTACHED	(1 << 2)
261d87a4d00Sryo 
262d87a4d00Sryo 	struct vmxnet3_queue *vmx_queue;
263d87a4d00Sryo 
264d87a4d00Sryo 	struct pci_attach_args *vmx_pa;
265d87a4d00Sryo 	pci_chipset_tag_t vmx_pc;
266d87a4d00Sryo 
267d87a4d00Sryo 	bus_space_tag_t vmx_iot0;
268d87a4d00Sryo 	bus_space_tag_t vmx_iot1;
269d87a4d00Sryo 	bus_space_handle_t vmx_ioh0;
270d87a4d00Sryo 	bus_space_handle_t vmx_ioh1;
271d87a4d00Sryo 	bus_size_t vmx_ios0;
272d87a4d00Sryo 	bus_size_t vmx_ios1;
273d87a4d00Sryo 	bus_dma_tag_t vmx_dmat;
274d87a4d00Sryo 
275d87a4d00Sryo 	int vmx_link_active;
276d87a4d00Sryo 	int vmx_ntxqueues;
277d87a4d00Sryo 	int vmx_nrxqueues;
278d87a4d00Sryo 	int vmx_ntxdescs;
279d87a4d00Sryo 	int vmx_nrxdescs;
280d87a4d00Sryo 	int vmx_max_rxsegs;
281d87a4d00Sryo 
282d87a4d00Sryo 	struct evcnt vmx_event_intr;
283d87a4d00Sryo 	struct evcnt vmx_event_link;
284d87a4d00Sryo 	struct evcnt vmx_event_txqerror;
285d87a4d00Sryo 	struct evcnt vmx_event_rxqerror;
286d87a4d00Sryo 	struct evcnt vmx_event_dic;
287d87a4d00Sryo 	struct evcnt vmx_event_debug;
288d87a4d00Sryo 
289d87a4d00Sryo 	int vmx_intr_type;
290d87a4d00Sryo 	int vmx_intr_mask_mode;
291d87a4d00Sryo 	int vmx_event_intr_idx;
292d87a4d00Sryo 	int vmx_nintrs;
293d87a4d00Sryo 	pci_intr_handle_t *vmx_intrs;	/* legacy use vmx_intrs[0] */
294d87a4d00Sryo 	void *vmx_ihs[VMXNET3_MAX_INTRS];
295d87a4d00Sryo 
296d87a4d00Sryo 	kmutex_t *vmx_mtx;
297d87a4d00Sryo 
298db465cddSriastradh 	int vmx_if_flags;
299db465cddSriastradh 	bool vmx_promisc;
300db465cddSriastradh 	bool vmx_mcastactive;
301d87a4d00Sryo 	uint8_t *vmx_mcast;
302d87a4d00Sryo 	void *vmx_qs;
303d87a4d00Sryo 	struct vmxnet3_rss_shared *vmx_rss;
304d87a4d00Sryo 	callout_t vmx_tick;
305d87a4d00Sryo 	struct vmxnet3_dma_alloc vmx_ds_dma;
306d87a4d00Sryo 	struct vmxnet3_dma_alloc vmx_qs_dma;
307d87a4d00Sryo 	struct vmxnet3_dma_alloc vmx_mcast_dma;
308d87a4d00Sryo 	struct vmxnet3_dma_alloc vmx_rss_dma;
309d87a4d00Sryo 	int vmx_max_ntxqueues;
310d87a4d00Sryo 	int vmx_max_nrxqueues;
311d87a4d00Sryo 	uint8_t vmx_lladdr[ETHER_ADDR_LEN];
312d87a4d00Sryo 
313d87a4d00Sryo 	u_int vmx_rx_intr_process_limit;
314d87a4d00Sryo 	u_int vmx_tx_intr_process_limit;
315d87a4d00Sryo 	u_int vmx_rx_process_limit;
316d87a4d00Sryo 	u_int vmx_tx_process_limit;
317d87a4d00Sryo 	struct sysctllog *vmx_sysctllog;
318d87a4d00Sryo 
319d87a4d00Sryo 	bool vmx_txrx_workqueue;
320d87a4d00Sryo 	struct workqueue *vmx_queue_wq;
321db465cddSriastradh 
322db465cddSriastradh 	struct workqueue *vmx_reset_wq;
323db465cddSriastradh 	struct work vmx_reset_work;
324db465cddSriastradh 	bool vmx_reset_pending;
325d87a4d00Sryo };
326d87a4d00Sryo 
327d87a4d00Sryo #define VMXNET3_STAT
328d87a4d00Sryo 
329d87a4d00Sryo #ifdef VMXNET3_STAT
330d87a4d00Sryo struct {
331d87a4d00Sryo 	u_int txhead;
332d87a4d00Sryo 	u_int txdone;
333d87a4d00Sryo 	u_int maxtxlen;
334d87a4d00Sryo 	u_int rxdone;
335d87a4d00Sryo 	u_int rxfill;
336d87a4d00Sryo 	u_int intr;
337d87a4d00Sryo } vmxstat;
338d87a4d00Sryo #endif
339d87a4d00Sryo 
340d87a4d00Sryo typedef enum {
341d87a4d00Sryo 	VMXNET3_BARRIER_RD,
342d87a4d00Sryo 	VMXNET3_BARRIER_WR,
343d87a4d00Sryo } vmxnet3_barrier_t;
344d87a4d00Sryo 
345d87a4d00Sryo #define JUMBO_LEN (MCLBYTES - ETHER_ALIGN)	/* XXX */
346d87a4d00Sryo #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
347d87a4d00Sryo 
348d87a4d00Sryo #define vtophys(va) 0		/* XXX ok? */
349d87a4d00Sryo 
350d87a4d00Sryo static int vmxnet3_match(device_t, cfdata_t, void *);
351d87a4d00Sryo static void vmxnet3_attach(device_t, device_t, void *);
352d87a4d00Sryo static int vmxnet3_detach(device_t, int);
353d87a4d00Sryo 
354d87a4d00Sryo static int vmxnet3_alloc_pci_resources(struct vmxnet3_softc *);
355d87a4d00Sryo static void vmxnet3_free_pci_resources(struct vmxnet3_softc *);
356d87a4d00Sryo static int vmxnet3_check_version(struct vmxnet3_softc *);
357d87a4d00Sryo static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
358d87a4d00Sryo 
359d87a4d00Sryo static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
360d87a4d00Sryo static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
361d87a4d00Sryo static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
362d87a4d00Sryo static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
363d87a4d00Sryo static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
364d87a4d00Sryo 
365d87a4d00Sryo static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
366d87a4d00Sryo static int vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *);
367d87a4d00Sryo static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
368d87a4d00Sryo static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *);
369d87a4d00Sryo static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
370d87a4d00Sryo static int vmxnet3_setup_sysctl(struct vmxnet3_softc *);
371d87a4d00Sryo 
372d87a4d00Sryo static int vmxnet3_setup_stats(struct vmxnet3_softc *);
373d87a4d00Sryo static void vmxnet3_teardown_stats(struct vmxnet3_softc *);
374d87a4d00Sryo 
375d87a4d00Sryo static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
376d87a4d00Sryo static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
377d87a4d00Sryo static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
378d87a4d00Sryo static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
379d87a4d00Sryo static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
380d87a4d00Sryo static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
381d87a4d00Sryo 
382d87a4d00Sryo static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
383d87a4d00Sryo static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
384d87a4d00Sryo static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
385d87a4d00Sryo static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
386d87a4d00Sryo static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
387d87a4d00Sryo static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
388d87a4d00Sryo static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
389d87a4d00Sryo static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
390d87a4d00Sryo static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
391d87a4d00Sryo static void vmxnet3_free_mcast_table(struct vmxnet3_softc *);
392d87a4d00Sryo static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
393d87a4d00Sryo static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
394d87a4d00Sryo static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
395d87a4d00Sryo static int vmxnet3_alloc_data(struct vmxnet3_softc *);
396d87a4d00Sryo static void vmxnet3_free_data(struct vmxnet3_softc *);
397d87a4d00Sryo static int vmxnet3_setup_interface(struct vmxnet3_softc *);
398d87a4d00Sryo 
399d87a4d00Sryo static void vmxnet3_evintr(struct vmxnet3_softc *);
400d87a4d00Sryo static bool vmxnet3_txq_eof(struct vmxnet3_txqueue *, u_int);
401d87a4d00Sryo static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxqueue *,
402d87a4d00Sryo     struct vmxnet3_rxring *);
403d87a4d00Sryo static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
404d87a4d00Sryo     struct vmxnet3_rxring *, int);
405d87a4d00Sryo static void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *);
406d87a4d00Sryo static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
407d87a4d00Sryo static void vmxnet3_rxq_input(struct vmxnet3_rxqueue *,
408d87a4d00Sryo     struct vmxnet3_rxcompdesc *, struct mbuf *);
409d87a4d00Sryo static bool vmxnet3_rxq_eof(struct vmxnet3_rxqueue *, u_int);
410d87a4d00Sryo static int vmxnet3_legacy_intr(void *);
411d87a4d00Sryo static int vmxnet3_txrxq_intr(void *);
412d87a4d00Sryo static void vmxnet3_handle_queue(void *);
413d87a4d00Sryo static void vmxnet3_handle_queue_work(struct work *, void *);
414d87a4d00Sryo static int vmxnet3_event_intr(void *);
415d87a4d00Sryo 
416d87a4d00Sryo static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
417d87a4d00Sryo static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
418d87a4d00Sryo static void vmxnet3_stop_locked(struct vmxnet3_softc *);
419d87a4d00Sryo static void vmxnet3_stop_rendezvous(struct vmxnet3_softc *);
420d87a4d00Sryo static void vmxnet3_stop(struct ifnet *, int);
421d87a4d00Sryo 
422d87a4d00Sryo static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
423d87a4d00Sryo static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
424d87a4d00Sryo static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
425d87a4d00Sryo static int vmxnet3_enable_device(struct vmxnet3_softc *);
426d87a4d00Sryo static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
427d87a4d00Sryo static int vmxnet3_reinit(struct vmxnet3_softc *);
428d87a4d00Sryo 
429d87a4d00Sryo static int vmxnet3_init_locked(struct vmxnet3_softc *);
430d87a4d00Sryo static int vmxnet3_init(struct ifnet *);
431d87a4d00Sryo 
432d87a4d00Sryo static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *, struct mbuf *, int *, int *);
433d87a4d00Sryo static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, bus_dmamap_t);
434d87a4d00Sryo static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
435d87a4d00Sryo static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
436d87a4d00Sryo static void vmxnet3_start_locked(struct ifnet *);
437d87a4d00Sryo static void vmxnet3_start(struct ifnet *);
438d87a4d00Sryo static void vmxnet3_transmit_locked(struct ifnet *, struct vmxnet3_txqueue *);
439d87a4d00Sryo static int vmxnet3_transmit(struct ifnet *, struct mbuf *);
440d87a4d00Sryo static void vmxnet3_deferred_transmit(void *);
441d87a4d00Sryo 
442d87a4d00Sryo static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
443d87a4d00Sryo static int vmxnet3_ioctl(struct ifnet *, u_long, void *);
444d87a4d00Sryo static int vmxnet3_ifflags_cb(struct ethercom *);
445d87a4d00Sryo 
446d87a4d00Sryo static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
447d87a4d00Sryo static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
448d87a4d00Sryo static void vmxnet3_tick(void *);
449db465cddSriastradh static void vmxnet3_reset_work(struct work *, void *);
450d87a4d00Sryo static void vmxnet3_if_link_status(struct vmxnet3_softc *);
451d87a4d00Sryo static bool vmxnet3_cmd_link_status(struct ifnet *);
452d87a4d00Sryo static void vmxnet3_ifmedia_status(struct ifnet *, struct ifmediareq *);
453d87a4d00Sryo static int vmxnet3_ifmedia_change(struct ifnet *);
454d87a4d00Sryo static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
455d87a4d00Sryo static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
456d87a4d00Sryo 
457d87a4d00Sryo static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
458d87a4d00Sryo static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
459d87a4d00Sryo 
460d87a4d00Sryo static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, bus_size_t,
461d87a4d00Sryo     struct vmxnet3_dma_alloc *);
462d87a4d00Sryo static void vmxnet3_dma_free(struct vmxnet3_softc *, struct vmxnet3_dma_alloc *);
463d87a4d00Sryo 
464d87a4d00Sryo CFATTACH_DECL3_NEW(vmx, sizeof(struct vmxnet3_softc),
465d87a4d00Sryo     vmxnet3_match, vmxnet3_attach, vmxnet3_detach, NULL, NULL, NULL, 0);
466d87a4d00Sryo 
467d87a4d00Sryo /* round down to the nearest power of 2 */
468d87a4d00Sryo static int
vmxnet3_calc_queue_size(int n)469d87a4d00Sryo vmxnet3_calc_queue_size(int n)
470d87a4d00Sryo {
471d87a4d00Sryo 
472d87a4d00Sryo 	if (__predict_false(n <= 0))
473d87a4d00Sryo 		return 1;
474d87a4d00Sryo 
475d87a4d00Sryo 	return (1U << (fls32(n) - 1));
476d87a4d00Sryo }
477d87a4d00Sryo 
478d87a4d00Sryo static inline void
vmxnet3_write_bar0(struct vmxnet3_softc * sc,bus_size_t r,uint32_t v)479d87a4d00Sryo vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
480d87a4d00Sryo {
481d87a4d00Sryo 
482d87a4d00Sryo 	bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
483d87a4d00Sryo }
484d87a4d00Sryo 
485d87a4d00Sryo static inline uint32_t
vmxnet3_read_bar1(struct vmxnet3_softc * sc,bus_size_t r)486d87a4d00Sryo vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
487d87a4d00Sryo {
488d87a4d00Sryo 
489d87a4d00Sryo 	return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
490d87a4d00Sryo }
491d87a4d00Sryo 
492d87a4d00Sryo static inline void
vmxnet3_write_bar1(struct vmxnet3_softc * sc,bus_size_t r,uint32_t v)493d87a4d00Sryo vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
494d87a4d00Sryo {
495d87a4d00Sryo 
496d87a4d00Sryo 	bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
497d87a4d00Sryo }
498d87a4d00Sryo 
499d87a4d00Sryo static inline void
vmxnet3_write_cmd(struct vmxnet3_softc * sc,uint32_t cmd)500d87a4d00Sryo vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
501d87a4d00Sryo {
502d87a4d00Sryo 
503d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
504d87a4d00Sryo }
505d87a4d00Sryo 
506d87a4d00Sryo static inline uint32_t
vmxnet3_read_cmd(struct vmxnet3_softc * sc,uint32_t cmd)507d87a4d00Sryo vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
508d87a4d00Sryo {
509d87a4d00Sryo 
510d87a4d00Sryo 	vmxnet3_write_cmd(sc, cmd);
511d87a4d00Sryo 	return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
512d87a4d00Sryo }
513d87a4d00Sryo 
514d87a4d00Sryo static inline void
vmxnet3_enable_intr(struct vmxnet3_softc * sc,int irq)515d87a4d00Sryo vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
516d87a4d00Sryo {
517d87a4d00Sryo 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
518d87a4d00Sryo }
519d87a4d00Sryo 
520d87a4d00Sryo static inline void
vmxnet3_disable_intr(struct vmxnet3_softc * sc,int irq)521d87a4d00Sryo vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
522d87a4d00Sryo {
523d87a4d00Sryo 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
524d87a4d00Sryo }
525d87a4d00Sryo 
526d87a4d00Sryo static inline void
vmxnet3_rxr_increment_fill(struct vmxnet3_rxring * rxr)527d87a4d00Sryo vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr)
528d87a4d00Sryo {
529d87a4d00Sryo 
530d87a4d00Sryo 	if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) {
531d87a4d00Sryo 		rxr->vxrxr_fill = 0;
532d87a4d00Sryo 		rxr->vxrxr_gen ^= 1;
533d87a4d00Sryo 	}
534d87a4d00Sryo }
535d87a4d00Sryo 
536d87a4d00Sryo static inline int
vmxnet3_txring_avail(struct vmxnet3_txring * txr)537d87a4d00Sryo vmxnet3_txring_avail(struct vmxnet3_txring *txr)
538d87a4d00Sryo {
539d87a4d00Sryo 	int avail = txr->vxtxr_next - txr->vxtxr_head - 1;
54035492473Sryo 	return (avail < 0 ? (int)txr->vxtxr_ndesc + avail : avail);
541d87a4d00Sryo }
542d87a4d00Sryo 
543d87a4d00Sryo /*
544d87a4d00Sryo  * Since this is a purely paravirtualized device, we do not have
545d87a4d00Sryo  * to worry about DMA coherency. But at times, we must make sure
546d87a4d00Sryo  * both the compiler and CPU do not reorder memory operations.
547d87a4d00Sryo  */
548d87a4d00Sryo static inline void
vmxnet3_barrier(struct vmxnet3_softc * sc,vmxnet3_barrier_t type)549d87a4d00Sryo vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
550d87a4d00Sryo {
551d87a4d00Sryo 
552d87a4d00Sryo 	switch (type) {
553d87a4d00Sryo 	case VMXNET3_BARRIER_RD:
554d87a4d00Sryo 		membar_consumer();
555d87a4d00Sryo 		break;
556d87a4d00Sryo 	case VMXNET3_BARRIER_WR:
557d87a4d00Sryo 		membar_producer();
558d87a4d00Sryo 		break;
559d87a4d00Sryo 	default:
560d87a4d00Sryo 		panic("%s: bad barrier type %d", __func__, type);
561d87a4d00Sryo 	}
562d87a4d00Sryo }
563d87a4d00Sryo 
564d87a4d00Sryo static int
vmxnet3_match(device_t parent,cfdata_t match,void * aux)565d87a4d00Sryo vmxnet3_match(device_t parent, cfdata_t match, void *aux)
566d87a4d00Sryo {
567d87a4d00Sryo 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
568d87a4d00Sryo 
569d87a4d00Sryo 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
570d87a4d00Sryo 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_VMXNET3)
571d87a4d00Sryo 		return 1;
572d87a4d00Sryo 
573d87a4d00Sryo 	return 0;
574d87a4d00Sryo }
575d87a4d00Sryo 
576d87a4d00Sryo static void
vmxnet3_attach(device_t parent,device_t self,void * aux)577d87a4d00Sryo vmxnet3_attach(device_t parent, device_t self, void *aux)
578d87a4d00Sryo {
579d87a4d00Sryo 	struct vmxnet3_softc *sc = device_private(self);
580d87a4d00Sryo 	struct pci_attach_args *pa = aux;
581d87a4d00Sryo 	pcireg_t preg;
582d87a4d00Sryo 	int error;
583d87a4d00Sryo 	int candidate;
584d87a4d00Sryo 
585d87a4d00Sryo 	sc->vmx_dev = self;
586d87a4d00Sryo 	sc->vmx_pa = pa;
587d87a4d00Sryo 	sc->vmx_pc = pa->pa_pc;
588d87a4d00Sryo 	if (pci_dma64_available(pa))
589d87a4d00Sryo 		sc->vmx_dmat = pa->pa_dmat64;
590d87a4d00Sryo 	else
591d87a4d00Sryo 		sc->vmx_dmat = pa->pa_dmat;
592d87a4d00Sryo 
593d87a4d00Sryo 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", "vmxnet3", 1);
594d87a4d00Sryo 
595d87a4d00Sryo 	preg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
596d87a4d00Sryo 	preg |= PCI_COMMAND_MASTER_ENABLE;
597d87a4d00Sryo 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
598d87a4d00Sryo 
599d87a4d00Sryo 	sc->vmx_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
600d87a4d00Sryo 	callout_init(&sc->vmx_tick, CALLOUT_MPSAFE);
601d87a4d00Sryo 
602d87a4d00Sryo 	candidate = MIN(MIN(VMXNET3_MAX_TX_QUEUES, VMXNET3_MAX_RX_QUEUES),
603d87a4d00Sryo 	    ncpu);
604d87a4d00Sryo 	sc->vmx_max_ntxqueues = sc->vmx_max_nrxqueues =
605d87a4d00Sryo 	    vmxnet3_calc_queue_size(candidate);
606d87a4d00Sryo 	sc->vmx_ntxdescs = 512;
607d87a4d00Sryo 	sc->vmx_nrxdescs = 256;
608d87a4d00Sryo 	sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
609d87a4d00Sryo 
610d87a4d00Sryo 	error = vmxnet3_alloc_pci_resources(sc);
611d87a4d00Sryo 	if (error)
612d87a4d00Sryo 		return;
613d87a4d00Sryo 
614d87a4d00Sryo 	error = vmxnet3_check_version(sc);
615d87a4d00Sryo 	if (error)
616d87a4d00Sryo 		return;
617d87a4d00Sryo 
618d87a4d00Sryo 	error = vmxnet3_alloc_rxtx_queues(sc);
619d87a4d00Sryo 	if (error)
620d87a4d00Sryo 		return;
621d87a4d00Sryo 
622d87a4d00Sryo 	error = vmxnet3_alloc_interrupts(sc);
623d87a4d00Sryo 	if (error)
624d87a4d00Sryo 		return;
625d87a4d00Sryo 
626d87a4d00Sryo 	vmxnet3_check_multiqueue(sc);
627d87a4d00Sryo 
628d87a4d00Sryo 	error = vmxnet3_alloc_data(sc);
629d87a4d00Sryo 	if (error)
630d87a4d00Sryo 		return;
631d87a4d00Sryo 
632d87a4d00Sryo 	error = vmxnet3_setup_interface(sc);
633d87a4d00Sryo 	if (error)
634d87a4d00Sryo 		return;
635d87a4d00Sryo 
636d87a4d00Sryo 	error = vmxnet3_setup_interrupts(sc);
637d87a4d00Sryo 	if (error)
638d87a4d00Sryo 		return;
639d87a4d00Sryo 
640d87a4d00Sryo 	error = vmxnet3_setup_sysctl(sc);
641d87a4d00Sryo 	if (error)
642d87a4d00Sryo 		return;
643d87a4d00Sryo 
644d87a4d00Sryo 	error = vmxnet3_setup_stats(sc);
645d87a4d00Sryo 	if (error)
646d87a4d00Sryo 		return;
647d87a4d00Sryo 
648db465cddSriastradh 	char buf[128];
649db465cddSriastradh 	snprintf(buf, sizeof(buf), "%s_reset", device_xname(sc->vmx_dev));
650db465cddSriastradh 	error = workqueue_create(&sc->vmx_reset_wq, "%s_reset",
651db465cddSriastradh 	    vmxnet3_reset_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_SOFTCLOCK,
652db465cddSriastradh 	    WQ_MPSAFE);
653db465cddSriastradh 	if (error) {
654db465cddSriastradh 		aprint_error_dev(sc->vmx_dev,
655db465cddSriastradh 		    "failed to create reset workqueue: %d\n",
656db465cddSriastradh 		    error);
657db465cddSriastradh 		return;
658db465cddSriastradh 	}
659db465cddSriastradh 
660d87a4d00Sryo 	sc->vmx_flags |= VMXNET3_FLAG_ATTACHED;
661d87a4d00Sryo }
662d87a4d00Sryo 
663d87a4d00Sryo static int
vmxnet3_detach(device_t self,int flags)664d87a4d00Sryo vmxnet3_detach(device_t self, int flags)
665d87a4d00Sryo {
666d87a4d00Sryo 	struct vmxnet3_softc *sc;
667d87a4d00Sryo 	struct ifnet *ifp;
668d87a4d00Sryo 
669d87a4d00Sryo 	sc = device_private(self);
670d87a4d00Sryo 	ifp = &sc->vmx_ethercom.ec_if;
671d87a4d00Sryo 
672d87a4d00Sryo 	if (sc->vmx_flags & VMXNET3_FLAG_ATTACHED) {
673d87a4d00Sryo 		VMXNET3_CORE_LOCK(sc);
674d87a4d00Sryo 		vmxnet3_stop_locked(sc);
675d87a4d00Sryo 		callout_halt(&sc->vmx_tick, sc->vmx_mtx);
676d87a4d00Sryo 		callout_destroy(&sc->vmx_tick);
677d87a4d00Sryo 		VMXNET3_CORE_UNLOCK(sc);
678d87a4d00Sryo 
679d87a4d00Sryo 		ether_ifdetach(ifp);
680d87a4d00Sryo 		if_detach(ifp);
681d87a4d00Sryo 		ifmedia_fini(&sc->vmx_media);
682d87a4d00Sryo 	}
683d87a4d00Sryo 
684d87a4d00Sryo 	vmxnet3_teardown_stats(sc);
685d87a4d00Sryo 	sysctl_teardown(&sc->vmx_sysctllog);
686d87a4d00Sryo 
687d87a4d00Sryo 	vmxnet3_free_interrupts(sc);
688d87a4d00Sryo 
689d87a4d00Sryo 	vmxnet3_free_data(sc);
690d87a4d00Sryo 	vmxnet3_free_pci_resources(sc);
691d87a4d00Sryo 	vmxnet3_free_rxtx_queues(sc);
692d87a4d00Sryo 
693d87a4d00Sryo 	if (sc->vmx_mtx)
694d87a4d00Sryo 		mutex_obj_free(sc->vmx_mtx);
695d87a4d00Sryo 
696d87a4d00Sryo 	return (0);
697d87a4d00Sryo }
698d87a4d00Sryo 
699d87a4d00Sryo static int
vmxnet3_alloc_pci_resources(struct vmxnet3_softc * sc)700d87a4d00Sryo vmxnet3_alloc_pci_resources(struct vmxnet3_softc *sc)
701d87a4d00Sryo {
702d87a4d00Sryo 	struct pci_attach_args *pa = sc->vmx_pa;
703d87a4d00Sryo 	pcireg_t memtype;
704d87a4d00Sryo 
705d87a4d00Sryo 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
706d87a4d00Sryo 	if (pci_mapreg_map(pa, PCI_BAR(0), memtype, 0, &sc->vmx_iot0, &sc->vmx_ioh0,
707d87a4d00Sryo 	    NULL, &sc->vmx_ios0)) {
708d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "failed to map BAR0\n");
709d87a4d00Sryo 		return (ENXIO);
710d87a4d00Sryo 	}
711d87a4d00Sryo 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(1));
712d87a4d00Sryo 	if (pci_mapreg_map(pa, PCI_BAR(1), memtype, 0, &sc->vmx_iot1, &sc->vmx_ioh1,
713d87a4d00Sryo 	    NULL, &sc->vmx_ios1)) {
714d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "failed to map BAR1\n");
715d87a4d00Sryo 		return (ENXIO);
716d87a4d00Sryo 	}
717d87a4d00Sryo 
718d87a4d00Sryo 	if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL, NULL)) {
719d87a4d00Sryo 		sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
720d87a4d00Sryo 		return (0);
721d87a4d00Sryo 	}
722d87a4d00Sryo 
723d87a4d00Sryo 	return (0);
724d87a4d00Sryo }
725d87a4d00Sryo 
726d87a4d00Sryo static void
vmxnet3_free_pci_resources(struct vmxnet3_softc * sc)727d87a4d00Sryo vmxnet3_free_pci_resources(struct vmxnet3_softc *sc)
728d87a4d00Sryo {
729d87a4d00Sryo 
730d87a4d00Sryo 	if (sc->vmx_ios0) {
731d87a4d00Sryo 		bus_space_unmap(sc->vmx_iot0, sc->vmx_ioh0, sc->vmx_ios0);
732d87a4d00Sryo 		sc->vmx_ios0 = 0;
733d87a4d00Sryo 	}
734d87a4d00Sryo 
735d87a4d00Sryo 	if (sc->vmx_ios1) {
736d87a4d00Sryo 		bus_space_unmap(sc->vmx_iot1, sc->vmx_ioh1, sc->vmx_ios1);
737d87a4d00Sryo 		sc->vmx_ios1 = 0;
738d87a4d00Sryo 	}
739d87a4d00Sryo }
740d87a4d00Sryo 
741d87a4d00Sryo static int
vmxnet3_check_version(struct vmxnet3_softc * sc)742d87a4d00Sryo vmxnet3_check_version(struct vmxnet3_softc *sc)
743d87a4d00Sryo {
744d87a4d00Sryo 	u_int ver;
745d87a4d00Sryo 
746d87a4d00Sryo 	ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
747d87a4d00Sryo 	if ((ver & 0x1) == 0) {
748d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
749d87a4d00Sryo 		    "unsupported hardware version 0x%x\n", ver);
750d87a4d00Sryo 		return (ENOTSUP);
751d87a4d00Sryo 	}
752d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
753d87a4d00Sryo 
754d87a4d00Sryo 	ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
755d87a4d00Sryo 	if ((ver & 0x1) == 0) {
756d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
757650a2dd4Sandvar 		    "incompatible UPT version 0x%x\n", ver);
758d87a4d00Sryo 		return (ENOTSUP);
759d87a4d00Sryo 	}
760d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
761d87a4d00Sryo 
762d87a4d00Sryo 	return (0);
763d87a4d00Sryo }
764d87a4d00Sryo 
765d87a4d00Sryo static void
vmxnet3_check_multiqueue(struct vmxnet3_softc * sc)766d87a4d00Sryo vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
767d87a4d00Sryo {
768d87a4d00Sryo 
769d87a4d00Sryo 	if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
770d87a4d00Sryo 		goto out;
771d87a4d00Sryo 
772d87a4d00Sryo 	/* Just use the maximum configured for now. */
773d87a4d00Sryo 	sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
774d87a4d00Sryo 	sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
775d87a4d00Sryo 
776d87a4d00Sryo 	if (sc->vmx_nrxqueues > 1)
777d87a4d00Sryo 		sc->vmx_flags |= VMXNET3_FLAG_RSS;
778d87a4d00Sryo 
779d87a4d00Sryo 	return;
780d87a4d00Sryo 
781d87a4d00Sryo out:
782d87a4d00Sryo 	sc->vmx_ntxqueues = 1;
783d87a4d00Sryo 	sc->vmx_nrxqueues = 1;
784d87a4d00Sryo }
785d87a4d00Sryo 
786d87a4d00Sryo static int
vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc * sc)787d87a4d00Sryo vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
788d87a4d00Sryo {
789d87a4d00Sryo 	int required;
790d87a4d00Sryo 	struct pci_attach_args *pa = sc->vmx_pa;
791d87a4d00Sryo 
792d87a4d00Sryo 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
793d87a4d00Sryo 		return (1);
794d87a4d00Sryo 
795d87a4d00Sryo 	/* Allocate an additional vector for the events interrupt. */
796d87a4d00Sryo 	required = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues) + 1;
797d87a4d00Sryo 
798d87a4d00Sryo 	if (pci_msix_count(pa->pa_pc, pa->pa_tag) < required)
799d87a4d00Sryo 		return (1);
800d87a4d00Sryo 
801d87a4d00Sryo 	if (pci_msix_alloc_exact(pa, &sc->vmx_intrs, required) == 0) {
802d87a4d00Sryo 		sc->vmx_nintrs = required;
803d87a4d00Sryo 		return (0);
804d87a4d00Sryo 	}
805d87a4d00Sryo 
806d87a4d00Sryo 	return (1);
807d87a4d00Sryo }
808d87a4d00Sryo 
809d87a4d00Sryo static int
vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc * sc)810d87a4d00Sryo vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
811d87a4d00Sryo {
812d87a4d00Sryo 	int nmsi, required;
813d87a4d00Sryo 	struct pci_attach_args *pa = sc->vmx_pa;
814d87a4d00Sryo 
815d87a4d00Sryo 	required = 1;
816d87a4d00Sryo 
817d87a4d00Sryo 	nmsi = pci_msi_count(pa->pa_pc, pa->pa_tag);
818d87a4d00Sryo 	if (nmsi < required)
819d87a4d00Sryo 		return (1);
820d87a4d00Sryo 
821d87a4d00Sryo 	if (pci_msi_alloc_exact(pa, &sc->vmx_intrs, required) == 0) {
822d87a4d00Sryo 		sc->vmx_nintrs = required;
823d87a4d00Sryo 		return (0);
824d87a4d00Sryo 	}
825d87a4d00Sryo 
826d87a4d00Sryo 	return (1);
827d87a4d00Sryo }
828d87a4d00Sryo 
829d87a4d00Sryo static int
vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc * sc)830d87a4d00Sryo vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
831d87a4d00Sryo {
832d87a4d00Sryo 
833d87a4d00Sryo 	if (pci_intx_alloc(sc->vmx_pa, &sc->vmx_intrs) == 0) {
834d87a4d00Sryo 		sc->vmx_nintrs = 1;
835d87a4d00Sryo 		return (0);
836d87a4d00Sryo 	}
837d87a4d00Sryo 
838d87a4d00Sryo 	return (1);
839d87a4d00Sryo }
840d87a4d00Sryo 
841d87a4d00Sryo static int
vmxnet3_alloc_interrupts(struct vmxnet3_softc * sc)842d87a4d00Sryo vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
843d87a4d00Sryo {
844d87a4d00Sryo 	u_int config;
845d87a4d00Sryo 	int error;
846d87a4d00Sryo 
847d87a4d00Sryo 	config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
848d87a4d00Sryo 
849d87a4d00Sryo 	sc->vmx_intr_type = config & 0x03;
850d87a4d00Sryo 	sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
851d87a4d00Sryo 
852d87a4d00Sryo 	switch (sc->vmx_intr_type) {
853d87a4d00Sryo 	case VMXNET3_IT_AUTO:
854d87a4d00Sryo 		sc->vmx_intr_type = VMXNET3_IT_MSIX;
855d87a4d00Sryo 		/* FALLTHROUGH */
856d87a4d00Sryo 	case VMXNET3_IT_MSIX:
857d87a4d00Sryo 		error = vmxnet3_alloc_msix_interrupts(sc);
858d87a4d00Sryo 		if (error == 0)
859d87a4d00Sryo 			break;
860d87a4d00Sryo 		sc->vmx_intr_type = VMXNET3_IT_MSI;
861d87a4d00Sryo 		/* FALLTHROUGH */
862d87a4d00Sryo 	case VMXNET3_IT_MSI:
863d87a4d00Sryo 		error = vmxnet3_alloc_msi_interrupts(sc);
864d87a4d00Sryo 		if (error == 0)
865d87a4d00Sryo 			break;
866d87a4d00Sryo 		sc->vmx_intr_type = VMXNET3_IT_LEGACY;
867d87a4d00Sryo 		/* FALLTHROUGH */
868d87a4d00Sryo 	case VMXNET3_IT_LEGACY:
869d87a4d00Sryo 		error = vmxnet3_alloc_legacy_interrupts(sc);
870d87a4d00Sryo 		if (error == 0)
871d87a4d00Sryo 			break;
872d87a4d00Sryo 		/* FALLTHROUGH */
873d87a4d00Sryo 	default:
874d87a4d00Sryo 		sc->vmx_intr_type = -1;
875d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "cannot allocate any interrupt resources\n");
876d87a4d00Sryo 		return (ENXIO);
877d87a4d00Sryo 	}
878d87a4d00Sryo 
879d87a4d00Sryo 	return (error);
880d87a4d00Sryo }
881d87a4d00Sryo 
882d87a4d00Sryo static void
vmxnet3_free_interrupts(struct vmxnet3_softc * sc)883d87a4d00Sryo vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
884d87a4d00Sryo {
885d87a4d00Sryo 	pci_chipset_tag_t pc = sc->vmx_pc;
886d87a4d00Sryo 	int i;
887d87a4d00Sryo 
888d87a4d00Sryo 	workqueue_destroy(sc->vmx_queue_wq);
889d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
890d87a4d00Sryo 		struct vmxnet3_queue *vmxq =  &sc->vmx_queue[i];
891d87a4d00Sryo 
892d87a4d00Sryo 		softint_disestablish(vmxq->vxq_si);
893d87a4d00Sryo 		vmxq->vxq_si = NULL;
894d87a4d00Sryo 	}
895d87a4d00Sryo 	for (i = 0; i < sc->vmx_nintrs; i++) {
896d87a4d00Sryo 		pci_intr_disestablish(pc, sc->vmx_ihs[i]);
897d87a4d00Sryo 	}
898d87a4d00Sryo 	pci_intr_release(pc, sc->vmx_intrs, sc->vmx_nintrs);
899d87a4d00Sryo }
900d87a4d00Sryo 
901d87a4d00Sryo static int
vmxnet3_setup_msix_interrupts(struct vmxnet3_softc * sc)902d87a4d00Sryo vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
903d87a4d00Sryo {
904d87a4d00Sryo 	pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
905d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
906d87a4d00Sryo 	pci_intr_handle_t *intr;
907d87a4d00Sryo 	void **ihs;
908d87a4d00Sryo 	int intr_idx, i, use_queues, error;
909d87a4d00Sryo 	kcpuset_t *affinity;
910d87a4d00Sryo 	const char *intrstr;
911d87a4d00Sryo 	char intrbuf[PCI_INTRSTR_LEN];
912d87a4d00Sryo 	char xnamebuf[32];
913d87a4d00Sryo 
914d87a4d00Sryo 	intr = sc->vmx_intrs;
915d87a4d00Sryo 	intr_idx = 0;
916d87a4d00Sryo 	ihs = sc->vmx_ihs;
917d87a4d00Sryo 
918d87a4d00Sryo 	/* See vmxnet3_alloc_msix_interrupts() */
919d87a4d00Sryo 	use_queues = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues);
920d87a4d00Sryo 	for (i = 0; i < use_queues; i++, intr++, ihs++, intr_idx++) {
921d87a4d00Sryo 		snprintf(xnamebuf, 32, "%s: txrx %d", device_xname(sc->vmx_dev), i);
922d87a4d00Sryo 
923d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
924d87a4d00Sryo 
925d87a4d00Sryo 		intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
926d87a4d00Sryo 
927d87a4d00Sryo 		pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
928d87a4d00Sryo 		*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
929d87a4d00Sryo 		    vmxnet3_txrxq_intr, vmxq, xnamebuf);
930d87a4d00Sryo 		if (*ihs == NULL) {
931d87a4d00Sryo 			aprint_error_dev(sc->vmx_dev,
932d87a4d00Sryo 			    "unable to establish txrx interrupt at %s\n", intrstr);
933d87a4d00Sryo 			return (-1);
934d87a4d00Sryo 		}
935d87a4d00Sryo 		aprint_normal_dev(sc->vmx_dev, "txrx interrupting at %s\n", intrstr);
936d87a4d00Sryo 
937d87a4d00Sryo 		kcpuset_create(&affinity, true);
938d87a4d00Sryo 		kcpuset_set(affinity, intr_idx % ncpu);
939d87a4d00Sryo 		error = interrupt_distribute(*ihs, affinity, NULL);
940d87a4d00Sryo 		if (error) {
941d87a4d00Sryo 			aprint_normal_dev(sc->vmx_dev,
942d87a4d00Sryo 			    "%s cannot be changed affinity, use default CPU\n",
943d87a4d00Sryo 			    intrstr);
944d87a4d00Sryo 		}
945d87a4d00Sryo 		kcpuset_destroy(affinity);
946d87a4d00Sryo 
947d87a4d00Sryo 		vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
948d87a4d00Sryo 		    vmxnet3_handle_queue, vmxq);
949d87a4d00Sryo 		if (vmxq->vxq_si == NULL) {
950d87a4d00Sryo 			aprint_error_dev(sc->vmx_dev,
951d87a4d00Sryo 			    "softint_establish for vxq_si failed\n");
952d87a4d00Sryo 			return (-1);
953d87a4d00Sryo 		}
954d87a4d00Sryo 
955d87a4d00Sryo 		vmxq->vxq_intr_idx = intr_idx;
956d87a4d00Sryo 	}
957d87a4d00Sryo 	snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(sc->vmx_dev));
958d87a4d00Sryo 	error = workqueue_create(&sc->vmx_queue_wq, xnamebuf,
959d87a4d00Sryo 	    vmxnet3_handle_queue_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_NET,
960d87a4d00Sryo 	    WQ_PERCPU | WQ_MPSAFE);
961d87a4d00Sryo 	if (error) {
962d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "workqueue_create failed\n");
963d87a4d00Sryo 		return (-1);
964d87a4d00Sryo 	}
965d87a4d00Sryo 	sc->vmx_txrx_workqueue = false;
966d87a4d00Sryo 
967d87a4d00Sryo 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
968d87a4d00Sryo 
969d87a4d00Sryo 	snprintf(xnamebuf, 32, "%s: link", device_xname(sc->vmx_dev));
970d87a4d00Sryo 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
971d87a4d00Sryo 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
972d87a4d00Sryo 	    vmxnet3_event_intr, sc, xnamebuf);
973d87a4d00Sryo 	if (*ihs == NULL) {
974d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
975d87a4d00Sryo 		    "unable to establish event interrupt at %s\n", intrstr);
976d87a4d00Sryo 		return (-1);
977d87a4d00Sryo 	}
978d87a4d00Sryo 	aprint_normal_dev(sc->vmx_dev, "event interrupting at %s\n", intrstr);
979d87a4d00Sryo 
980d87a4d00Sryo 	sc->vmx_event_intr_idx = intr_idx;
981d87a4d00Sryo 
982d87a4d00Sryo 	return (0);
983d87a4d00Sryo }
984d87a4d00Sryo 
985d87a4d00Sryo static int
vmxnet3_setup_msi_interrupt(struct vmxnet3_softc * sc)986d87a4d00Sryo vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *sc)
987d87a4d00Sryo {
988d87a4d00Sryo 	pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
989d87a4d00Sryo 	pci_intr_handle_t *intr;
990d87a4d00Sryo 	void **ihs;
991d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
992d87a4d00Sryo 	int i;
993d87a4d00Sryo 	const char *intrstr;
994d87a4d00Sryo 	char intrbuf[PCI_INTRSTR_LEN];
995d87a4d00Sryo 	char xnamebuf[32];
996d87a4d00Sryo 
997d87a4d00Sryo 	intr = &sc->vmx_intrs[0];
998d87a4d00Sryo 	ihs = sc->vmx_ihs;
999d87a4d00Sryo 	vmxq = &sc->vmx_queue[0];
1000d87a4d00Sryo 
1001d87a4d00Sryo 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
1002d87a4d00Sryo 
1003d87a4d00Sryo 	snprintf(xnamebuf, 32, "%s: msi", device_xname(sc->vmx_dev));
1004d87a4d00Sryo 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
1005d87a4d00Sryo 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
1006d87a4d00Sryo 	    vmxnet3_legacy_intr, sc, xnamebuf);
1007d87a4d00Sryo 	if (*ihs == NULL) {
1008d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
1009d87a4d00Sryo 		    "unable to establish interrupt at %s\n", intrstr);
1010d87a4d00Sryo 		return (-1);
1011d87a4d00Sryo 	}
1012d87a4d00Sryo 	aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr);
1013d87a4d00Sryo 
1014d87a4d00Sryo 	vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1015d87a4d00Sryo 	    vmxnet3_handle_queue, vmxq);
1016d87a4d00Sryo 	if (vmxq->vxq_si == NULL) {
1017d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
1018d87a4d00Sryo 		    "softint_establish for vxq_si failed\n");
1019d87a4d00Sryo 		return (-1);
1020d87a4d00Sryo 	}
1021d87a4d00Sryo 
1022d87a4d00Sryo 	for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++)
1023d87a4d00Sryo 		sc->vmx_queue[i].vxq_intr_idx = 0;
1024d87a4d00Sryo 	sc->vmx_event_intr_idx = 0;
1025d87a4d00Sryo 
1026d87a4d00Sryo 	return (0);
1027d87a4d00Sryo }
1028d87a4d00Sryo 
1029d87a4d00Sryo static int
vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc * sc)1030d87a4d00Sryo vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
1031d87a4d00Sryo {
1032d87a4d00Sryo 	pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
1033d87a4d00Sryo 	pci_intr_handle_t *intr;
1034d87a4d00Sryo 	void **ihs;
1035d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
1036d87a4d00Sryo 	int i;
1037d87a4d00Sryo 	const char *intrstr;
1038d87a4d00Sryo 	char intrbuf[PCI_INTRSTR_LEN];
1039d87a4d00Sryo 	char xnamebuf[32];
1040d87a4d00Sryo 
1041d87a4d00Sryo 	intr = &sc->vmx_intrs[0];
1042d87a4d00Sryo 	ihs = sc->vmx_ihs;
1043d87a4d00Sryo 	vmxq = &sc->vmx_queue[0];
1044d87a4d00Sryo 
1045d87a4d00Sryo 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
1046d87a4d00Sryo 
1047d87a4d00Sryo 	snprintf(xnamebuf, 32, "%s:legacy", device_xname(sc->vmx_dev));
1048d87a4d00Sryo 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
1049d87a4d00Sryo 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
1050d87a4d00Sryo 	    vmxnet3_legacy_intr, sc, xnamebuf);
1051d87a4d00Sryo 	if (*ihs == NULL) {
1052d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
1053d87a4d00Sryo 		    "unable to establish interrupt at %s\n", intrstr);
1054d87a4d00Sryo 		return (-1);
1055d87a4d00Sryo 	}
1056d87a4d00Sryo 	aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr);
1057d87a4d00Sryo 
1058d87a4d00Sryo 	vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1059d87a4d00Sryo 	    vmxnet3_handle_queue, vmxq);
1060d87a4d00Sryo 	if (vmxq->vxq_si == NULL) {
1061d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
1062d87a4d00Sryo 		    "softint_establish for vxq_si failed\n");
1063d87a4d00Sryo 		return (-1);
1064d87a4d00Sryo 	}
1065d87a4d00Sryo 
1066d87a4d00Sryo 	for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++)
1067d87a4d00Sryo 		sc->vmx_queue[i].vxq_intr_idx = 0;
1068d87a4d00Sryo 	sc->vmx_event_intr_idx = 0;
1069d87a4d00Sryo 
1070d87a4d00Sryo 	return (0);
1071d87a4d00Sryo }
1072d87a4d00Sryo 
1073d87a4d00Sryo static void
vmxnet3_set_interrupt_idx(struct vmxnet3_softc * sc)1074d87a4d00Sryo vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
1075d87a4d00Sryo {
1076d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
1077d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
1078d87a4d00Sryo 	struct vmxnet3_txq_shared *txs;
1079d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
1080d87a4d00Sryo 	struct vmxnet3_rxq_shared *rxs;
1081d87a4d00Sryo 	int i;
1082d87a4d00Sryo 
1083d87a4d00Sryo 	sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
1084d87a4d00Sryo 
1085d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1086d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
1087d87a4d00Sryo 		txq = &vmxq->vxq_txqueue;
1088d87a4d00Sryo 		txs = txq->vxtxq_ts;
1089d87a4d00Sryo 		txs->intr_idx = vmxq->vxq_intr_idx;
1090d87a4d00Sryo 	}
1091d87a4d00Sryo 
1092d87a4d00Sryo 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1093d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
1094d87a4d00Sryo 		rxq = &vmxq->vxq_rxqueue;
1095d87a4d00Sryo 		rxs = rxq->vxrxq_rs;
1096d87a4d00Sryo 		rxs->intr_idx = vmxq->vxq_intr_idx;
1097d87a4d00Sryo 	}
1098d87a4d00Sryo }
1099d87a4d00Sryo 
1100d87a4d00Sryo static int
vmxnet3_setup_interrupts(struct vmxnet3_softc * sc)1101d87a4d00Sryo vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
1102d87a4d00Sryo {
1103d87a4d00Sryo 	int error;
1104d87a4d00Sryo 
1105d87a4d00Sryo 	switch (sc->vmx_intr_type) {
1106d87a4d00Sryo 	case VMXNET3_IT_MSIX:
1107d87a4d00Sryo 		error = vmxnet3_setup_msix_interrupts(sc);
1108d87a4d00Sryo 		break;
1109d87a4d00Sryo 	case VMXNET3_IT_MSI:
1110d87a4d00Sryo 		error = vmxnet3_setup_msi_interrupt(sc);
1111d87a4d00Sryo 		break;
1112d87a4d00Sryo 	case VMXNET3_IT_LEGACY:
1113d87a4d00Sryo 		error = vmxnet3_setup_legacy_interrupt(sc);
1114d87a4d00Sryo 		break;
1115d87a4d00Sryo 	default:
1116d87a4d00Sryo 		panic("%s: invalid interrupt type %d", __func__,
1117d87a4d00Sryo 		    sc->vmx_intr_type);
1118d87a4d00Sryo 	}
1119d87a4d00Sryo 
1120d87a4d00Sryo 	if (error == 0)
1121d87a4d00Sryo 		vmxnet3_set_interrupt_idx(sc);
1122d87a4d00Sryo 
1123d87a4d00Sryo 	return (error);
1124d87a4d00Sryo }
1125d87a4d00Sryo 
1126d87a4d00Sryo static int
vmxnet3_init_rxq(struct vmxnet3_softc * sc,int q)1127d87a4d00Sryo vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
1128d87a4d00Sryo {
1129d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
1130d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
1131d87a4d00Sryo 	int i;
1132d87a4d00Sryo 
1133d87a4d00Sryo 	rxq = &sc->vmx_queue[q].vxq_rxqueue;
1134d87a4d00Sryo 
1135d87a4d00Sryo 	snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
1136d87a4d00Sryo 	    device_xname(sc->vmx_dev), q);
1137d87a4d00Sryo 	rxq->vxrxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */);
1138d87a4d00Sryo 
1139d87a4d00Sryo 	rxq->vxrxq_sc = sc;
1140d87a4d00Sryo 
1141d87a4d00Sryo 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1142d87a4d00Sryo 		rxr = &rxq->vxrxq_cmd_ring[i];
1143d87a4d00Sryo 		rxr->vxrxr_rid = i;
1144d87a4d00Sryo 		rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
1145d87a4d00Sryo 		rxr->vxrxr_rxbuf = kmem_zalloc(rxr->vxrxr_ndesc *
1146d87a4d00Sryo 		    sizeof(struct vmxnet3_rxbuf), KM_SLEEP);
1147d87a4d00Sryo 
1148d87a4d00Sryo 		rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
1149d87a4d00Sryo 	}
1150d87a4d00Sryo 
1151db465cddSriastradh 	rxq->vxrxq_stopping = true;
1152db465cddSriastradh 
1153d87a4d00Sryo 	return (0);
1154d87a4d00Sryo }
1155d87a4d00Sryo 
1156d87a4d00Sryo static int
vmxnet3_init_txq(struct vmxnet3_softc * sc,int q)1157d87a4d00Sryo vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
1158d87a4d00Sryo {
1159d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
1160d87a4d00Sryo 	struct vmxnet3_txring *txr;
1161d87a4d00Sryo 
1162d87a4d00Sryo 	txq = &sc->vmx_queue[q].vxq_txqueue;
1163d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
1164d87a4d00Sryo 
1165d87a4d00Sryo 	snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
1166d87a4d00Sryo 	    device_xname(sc->vmx_dev), q);
1167d87a4d00Sryo 	txq->vxtxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */);
1168d87a4d00Sryo 
1169d87a4d00Sryo 	txq->vxtxq_sc = sc;
1170d87a4d00Sryo 
1171d87a4d00Sryo 	txq->vxtxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1172d87a4d00Sryo 	    vmxnet3_deferred_transmit, txq);
1173d87a4d00Sryo 	if (txq->vxtxq_si == NULL) {
1174d87a4d00Sryo 		mutex_obj_free(txq->vxtxq_mtx);
1175d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
1176d87a4d00Sryo 		    "softint_establish for vxtxq_si failed\n");
1177d87a4d00Sryo 		return ENOMEM;
1178d87a4d00Sryo 	}
1179d87a4d00Sryo 
1180d87a4d00Sryo 	txr->vxtxr_ndesc = sc->vmx_ntxdescs;
1181d87a4d00Sryo 	txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc *
1182d87a4d00Sryo 	    sizeof(struct vmxnet3_txbuf), KM_SLEEP);
1183d87a4d00Sryo 
1184d87a4d00Sryo 	txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
1185d87a4d00Sryo 
1186d87a4d00Sryo 	txq->vxtxq_interq = pcq_create(sc->vmx_ntxdescs, KM_SLEEP);
1187d87a4d00Sryo 
1188db465cddSriastradh 	txq->vxtxq_stopping = true;
1189db465cddSriastradh 
1190d87a4d00Sryo 	return (0);
1191d87a4d00Sryo }
1192d87a4d00Sryo 
1193d87a4d00Sryo static int
vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc * sc)1194d87a4d00Sryo vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1195d87a4d00Sryo {
1196d87a4d00Sryo 	int i, error, max_nqueues;
1197d87a4d00Sryo 
1198d87a4d00Sryo 	KASSERT(!cpu_intr_p());
1199d87a4d00Sryo 	KASSERT(!cpu_softintr_p());
1200d87a4d00Sryo 
1201d87a4d00Sryo 	/*
1202d87a4d00Sryo 	 * Only attempt to create multiple queues if MSIX is available.
1203d87a4d00Sryo 	 * This check prevents us from allocating queue structures that
1204d87a4d00Sryo 	 * we will not use.
1205d87a4d00Sryo 	 *
1206d87a4d00Sryo 	 * FreeBSD:
1207d87a4d00Sryo 	 * MSIX is disabled by default because its apparently broken for
1208d87a4d00Sryo 	 * devices passed through by at least ESXi 5.1.
1209d87a4d00Sryo 	 * The hw.pci.honor_msi_blacklist tunable must be set to zero for MSIX.
1210d87a4d00Sryo 	 */
1211d87a4d00Sryo 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1212d87a4d00Sryo 		sc->vmx_max_nrxqueues = 1;
1213d87a4d00Sryo 		sc->vmx_max_ntxqueues = 1;
1214d87a4d00Sryo 	}
1215d87a4d00Sryo 
1216d87a4d00Sryo 	max_nqueues = MAX(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues);
1217d87a4d00Sryo 	sc->vmx_queue = kmem_zalloc(sizeof(struct vmxnet3_queue) * max_nqueues,
1218d87a4d00Sryo 	    KM_SLEEP);
1219d87a4d00Sryo 
1220d87a4d00Sryo 	for (i = 0; i < max_nqueues; i++) {
1221d87a4d00Sryo 		struct vmxnet3_queue *vmxq = &sc->vmx_queue[i];
1222d87a4d00Sryo 		vmxq->vxq_id = i;
1223d87a4d00Sryo 	}
1224d87a4d00Sryo 
1225d87a4d00Sryo 	for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1226d87a4d00Sryo 		error = vmxnet3_init_rxq(sc, i);
1227d87a4d00Sryo 		if (error)
1228d87a4d00Sryo 			return (error);
1229d87a4d00Sryo 	}
1230d87a4d00Sryo 
1231d87a4d00Sryo 	for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1232d87a4d00Sryo 		error = vmxnet3_init_txq(sc, i);
1233d87a4d00Sryo 		if (error)
1234d87a4d00Sryo 			return (error);
1235d87a4d00Sryo 	}
1236d87a4d00Sryo 
1237d87a4d00Sryo 	return (0);
1238d87a4d00Sryo }
1239d87a4d00Sryo 
1240d87a4d00Sryo static void
vmxnet3_destroy_rxq(struct vmxnet3_rxqueue * rxq)1241d87a4d00Sryo vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1242d87a4d00Sryo {
1243d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
1244d87a4d00Sryo 	int i;
1245d87a4d00Sryo 
1246d87a4d00Sryo 	rxq->vxrxq_sc = NULL;
1247d87a4d00Sryo 
1248d87a4d00Sryo 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1249d87a4d00Sryo 		rxr = &rxq->vxrxq_cmd_ring[i];
1250d87a4d00Sryo 
1251d87a4d00Sryo 		if (rxr->vxrxr_rxbuf != NULL) {
1252d87a4d00Sryo 			kmem_free(rxr->vxrxr_rxbuf,
1253d87a4d00Sryo 			    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxbuf));
1254d87a4d00Sryo 			rxr->vxrxr_rxbuf = NULL;
1255d87a4d00Sryo 		}
1256d87a4d00Sryo 	}
1257d87a4d00Sryo 
1258d87a4d00Sryo 	if (rxq->vxrxq_mtx != NULL)
1259d87a4d00Sryo 		mutex_obj_free(rxq->vxrxq_mtx);
1260d87a4d00Sryo }
1261d87a4d00Sryo 
1262d87a4d00Sryo static void
vmxnet3_destroy_txq(struct vmxnet3_txqueue * txq)1263d87a4d00Sryo vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1264d87a4d00Sryo {
1265d87a4d00Sryo 	struct vmxnet3_txring *txr;
1266d87a4d00Sryo 	struct mbuf *m;
1267d87a4d00Sryo 
1268d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
1269d87a4d00Sryo 
1270d87a4d00Sryo 	txq->vxtxq_sc = NULL;
1271d87a4d00Sryo 
1272d87a4d00Sryo 	softint_disestablish(txq->vxtxq_si);
1273d87a4d00Sryo 
1274d87a4d00Sryo 	while ((m = pcq_get(txq->vxtxq_interq)) != NULL)
1275d87a4d00Sryo 		m_freem(m);
1276d87a4d00Sryo 	pcq_destroy(txq->vxtxq_interq);
1277d87a4d00Sryo 
1278d87a4d00Sryo 	if (txr->vxtxr_txbuf != NULL) {
1279d87a4d00Sryo 		kmem_free(txr->vxtxr_txbuf,
1280d87a4d00Sryo 		    txr->vxtxr_ndesc * sizeof(struct vmxnet3_txbuf));
1281d87a4d00Sryo 		txr->vxtxr_txbuf = NULL;
1282d87a4d00Sryo 	}
1283d87a4d00Sryo 
1284d87a4d00Sryo 	if (txq->vxtxq_mtx != NULL)
1285d87a4d00Sryo 		mutex_obj_free(txq->vxtxq_mtx);
1286d87a4d00Sryo }
1287d87a4d00Sryo 
1288d87a4d00Sryo static void
vmxnet3_free_rxtx_queues(struct vmxnet3_softc * sc)1289d87a4d00Sryo vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1290d87a4d00Sryo {
1291d87a4d00Sryo 	int i;
1292d87a4d00Sryo 
1293d87a4d00Sryo 	if (sc->vmx_queue != NULL) {
1294d87a4d00Sryo 		int max_nqueues;
1295d87a4d00Sryo 
1296d87a4d00Sryo 		for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1297d87a4d00Sryo 			vmxnet3_destroy_rxq(&sc->vmx_queue[i].vxq_rxqueue);
1298d87a4d00Sryo 
1299d87a4d00Sryo 		for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1300d87a4d00Sryo 			vmxnet3_destroy_txq(&sc->vmx_queue[i].vxq_txqueue);
1301d87a4d00Sryo 
1302d87a4d00Sryo 		max_nqueues = MAX(sc->vmx_max_nrxqueues, sc->vmx_max_ntxqueues);
1303d87a4d00Sryo 		kmem_free(sc->vmx_queue,
1304d87a4d00Sryo 		    sizeof(struct vmxnet3_queue) * max_nqueues);
1305d87a4d00Sryo 	}
1306d87a4d00Sryo }
1307d87a4d00Sryo 
1308d87a4d00Sryo static int
vmxnet3_alloc_shared_data(struct vmxnet3_softc * sc)1309d87a4d00Sryo vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1310d87a4d00Sryo {
1311d87a4d00Sryo 	device_t dev;
1312d87a4d00Sryo 	uint8_t *kva;
1313d87a4d00Sryo 	size_t size;
1314d87a4d00Sryo 	int i, error;
1315d87a4d00Sryo 
1316d87a4d00Sryo 	dev = sc->vmx_dev;
1317d87a4d00Sryo 
1318d87a4d00Sryo 	size = sizeof(struct vmxnet3_driver_shared);
1319d87a4d00Sryo 	error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1320d87a4d00Sryo 	if (error) {
1321d87a4d00Sryo 		device_printf(dev, "cannot alloc shared memory\n");
1322d87a4d00Sryo 		return (error);
1323d87a4d00Sryo 	}
1324d87a4d00Sryo 	sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1325d87a4d00Sryo 
1326d87a4d00Sryo 	size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1327d87a4d00Sryo 	    sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1328d87a4d00Sryo 	error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1329d87a4d00Sryo 	if (error) {
1330d87a4d00Sryo 		device_printf(dev, "cannot alloc queue shared memory\n");
1331d87a4d00Sryo 		return (error);
1332d87a4d00Sryo 	}
1333d87a4d00Sryo 	sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1334d87a4d00Sryo 	kva = sc->vmx_qs;
1335d87a4d00Sryo 
1336d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1337d87a4d00Sryo 		sc->vmx_queue[i].vxq_txqueue.vxtxq_ts =
1338d87a4d00Sryo 		    (struct vmxnet3_txq_shared *) kva;
1339d87a4d00Sryo 		kva += sizeof(struct vmxnet3_txq_shared);
1340d87a4d00Sryo 	}
1341d87a4d00Sryo 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1342d87a4d00Sryo 		sc->vmx_queue[i].vxq_rxqueue.vxrxq_rs =
1343d87a4d00Sryo 		    (struct vmxnet3_rxq_shared *) kva;
1344d87a4d00Sryo 		kva += sizeof(struct vmxnet3_rxq_shared);
1345d87a4d00Sryo 	}
1346d87a4d00Sryo 
1347d87a4d00Sryo 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1348d87a4d00Sryo 		size = sizeof(struct vmxnet3_rss_shared);
1349d87a4d00Sryo 		error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1350d87a4d00Sryo 		if (error) {
1351d87a4d00Sryo 			device_printf(dev, "cannot alloc rss shared memory\n");
1352d87a4d00Sryo 			return (error);
1353d87a4d00Sryo 		}
1354d87a4d00Sryo 		sc->vmx_rss =
1355d87a4d00Sryo 		    (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1356d87a4d00Sryo 	}
1357d87a4d00Sryo 
1358d87a4d00Sryo 	return (0);
1359d87a4d00Sryo }
1360d87a4d00Sryo 
1361d87a4d00Sryo static void
vmxnet3_free_shared_data(struct vmxnet3_softc * sc)1362d87a4d00Sryo vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1363d87a4d00Sryo {
1364d87a4d00Sryo 
1365d87a4d00Sryo 	if (sc->vmx_rss != NULL) {
1366d87a4d00Sryo 		vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1367d87a4d00Sryo 		sc->vmx_rss = NULL;
1368d87a4d00Sryo 	}
1369d87a4d00Sryo 
1370d87a4d00Sryo 	if (sc->vmx_qs != NULL) {
1371d87a4d00Sryo 		vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1372d87a4d00Sryo 		sc->vmx_qs = NULL;
1373d87a4d00Sryo 	}
1374d87a4d00Sryo 
1375d87a4d00Sryo 	if (sc->vmx_ds != NULL) {
1376d87a4d00Sryo 		vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1377d87a4d00Sryo 		sc->vmx_ds = NULL;
1378d87a4d00Sryo 	}
1379d87a4d00Sryo }
1380d87a4d00Sryo 
1381d87a4d00Sryo static int
vmxnet3_alloc_txq_data(struct vmxnet3_softc * sc)1382d87a4d00Sryo vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1383d87a4d00Sryo {
1384d87a4d00Sryo 	device_t dev;
1385d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
1386d87a4d00Sryo 	struct vmxnet3_txring *txr;
1387d87a4d00Sryo 	struct vmxnet3_comp_ring *txc;
1388d87a4d00Sryo 	size_t descsz, compsz;
138935492473Sryo 	u_int i;
139035492473Sryo 	int q, error;
1391d87a4d00Sryo 
1392d87a4d00Sryo 	dev = sc->vmx_dev;
1393d87a4d00Sryo 
1394d87a4d00Sryo 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1395d87a4d00Sryo 		txq = &sc->vmx_queue[q].vxq_txqueue;
1396d87a4d00Sryo 		txr = &txq->vxtxq_cmd_ring;
1397d87a4d00Sryo 		txc = &txq->vxtxq_comp_ring;
1398d87a4d00Sryo 
1399d87a4d00Sryo 		descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1400d87a4d00Sryo 		compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1401d87a4d00Sryo 
1402d87a4d00Sryo 		error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1403d87a4d00Sryo 		if (error) {
1404d87a4d00Sryo 			device_printf(dev, "cannot alloc Tx descriptors for "
1405d87a4d00Sryo 			    "queue %d error %d\n", q, error);
1406d87a4d00Sryo 			return (error);
1407d87a4d00Sryo 		}
1408d87a4d00Sryo 		txr->vxtxr_txd =
1409d87a4d00Sryo 		    (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1410d87a4d00Sryo 
1411d87a4d00Sryo 		error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1412d87a4d00Sryo 		if (error) {
1413d87a4d00Sryo 			device_printf(dev, "cannot alloc Tx comp descriptors "
1414d87a4d00Sryo 			   "for queue %d error %d\n", q, error);
1415d87a4d00Sryo 			return (error);
1416d87a4d00Sryo 		}
1417d87a4d00Sryo 		txc->vxcr_u.txcd =
1418d87a4d00Sryo 		    (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1419d87a4d00Sryo 
1420d87a4d00Sryo 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1421d87a4d00Sryo 			error = bus_dmamap_create(sc->vmx_dmat, VMXNET3_TX_MAXSIZE,
1422d87a4d00Sryo 			    VMXNET3_TX_MAXSEGS, VMXNET3_TX_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1423d87a4d00Sryo 			    &txr->vxtxr_txbuf[i].vtxb_dmamap);
1424d87a4d00Sryo 			if (error) {
1425d87a4d00Sryo 				device_printf(dev, "unable to create Tx buf "
1426d87a4d00Sryo 				    "dmamap for queue %d idx %d\n", q, i);
1427d87a4d00Sryo 				return (error);
1428d87a4d00Sryo 			}
1429d87a4d00Sryo 		}
1430d87a4d00Sryo 	}
1431d87a4d00Sryo 
1432d87a4d00Sryo 	return (0);
1433d87a4d00Sryo }
1434d87a4d00Sryo 
1435d87a4d00Sryo static void
vmxnet3_free_txq_data(struct vmxnet3_softc * sc)1436d87a4d00Sryo vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1437d87a4d00Sryo {
1438d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
1439d87a4d00Sryo 	struct vmxnet3_txring *txr;
1440d87a4d00Sryo 	struct vmxnet3_comp_ring *txc;
1441d87a4d00Sryo 	struct vmxnet3_txbuf *txb;
144235492473Sryo 	u_int i;
144335492473Sryo 	int q;
1444d87a4d00Sryo 
1445d87a4d00Sryo 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1446d87a4d00Sryo 		txq = &sc->vmx_queue[q].vxq_txqueue;
1447d87a4d00Sryo 		txr = &txq->vxtxq_cmd_ring;
1448d87a4d00Sryo 		txc = &txq->vxtxq_comp_ring;
1449d87a4d00Sryo 
1450d87a4d00Sryo 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1451d87a4d00Sryo 			txb = &txr->vxtxr_txbuf[i];
1452d87a4d00Sryo 			if (txb->vtxb_dmamap != NULL) {
1453d87a4d00Sryo 				bus_dmamap_destroy(sc->vmx_dmat,
1454d87a4d00Sryo 				    txb->vtxb_dmamap);
1455d87a4d00Sryo 				txb->vtxb_dmamap = NULL;
1456d87a4d00Sryo 			}
1457d87a4d00Sryo 		}
1458d87a4d00Sryo 
1459d87a4d00Sryo 		if (txc->vxcr_u.txcd != NULL) {
1460d87a4d00Sryo 			vmxnet3_dma_free(sc, &txc->vxcr_dma);
1461d87a4d00Sryo 			txc->vxcr_u.txcd = NULL;
1462d87a4d00Sryo 		}
1463d87a4d00Sryo 
1464d87a4d00Sryo 		if (txr->vxtxr_txd != NULL) {
1465d87a4d00Sryo 			vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1466d87a4d00Sryo 			txr->vxtxr_txd = NULL;
1467d87a4d00Sryo 		}
1468d87a4d00Sryo 	}
1469d87a4d00Sryo }
1470d87a4d00Sryo 
1471d87a4d00Sryo static int
vmxnet3_alloc_rxq_data(struct vmxnet3_softc * sc)1472d87a4d00Sryo vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1473d87a4d00Sryo {
1474d87a4d00Sryo 	device_t dev;
1475d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
1476d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
1477d87a4d00Sryo 	struct vmxnet3_comp_ring *rxc;
1478d87a4d00Sryo 	int descsz, compsz;
147935492473Sryo 	u_int i, j;
148035492473Sryo 	int q, error;
1481d87a4d00Sryo 
1482d87a4d00Sryo 	dev = sc->vmx_dev;
1483d87a4d00Sryo 
1484d87a4d00Sryo 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1485d87a4d00Sryo 		rxq = &sc->vmx_queue[q].vxq_rxqueue;
1486d87a4d00Sryo 		rxc = &rxq->vxrxq_comp_ring;
1487d87a4d00Sryo 		compsz = 0;
1488d87a4d00Sryo 
1489d87a4d00Sryo 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1490d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[i];
1491d87a4d00Sryo 
1492d87a4d00Sryo 			descsz = rxr->vxrxr_ndesc *
1493d87a4d00Sryo 			    sizeof(struct vmxnet3_rxdesc);
1494d87a4d00Sryo 			compsz += rxr->vxrxr_ndesc *
1495d87a4d00Sryo 			    sizeof(struct vmxnet3_rxcompdesc);
1496d87a4d00Sryo 
1497d87a4d00Sryo 			error = vmxnet3_dma_malloc(sc, descsz, 512,
1498d87a4d00Sryo 			    &rxr->vxrxr_dma);
1499d87a4d00Sryo 			if (error) {
1500d87a4d00Sryo 				device_printf(dev, "cannot allocate Rx "
1501d87a4d00Sryo 				    "descriptors for queue %d/%d error %d\n",
1502d87a4d00Sryo 				    i, q, error);
1503d87a4d00Sryo 				return (error);
1504d87a4d00Sryo 			}
1505d87a4d00Sryo 			rxr->vxrxr_rxd =
1506d87a4d00Sryo 			    (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1507d87a4d00Sryo 		}
1508d87a4d00Sryo 
1509d87a4d00Sryo 		error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1510d87a4d00Sryo 		if (error) {
1511d87a4d00Sryo 			device_printf(dev, "cannot alloc Rx comp descriptors "
1512d87a4d00Sryo 			    "for queue %d error %d\n", q, error);
1513d87a4d00Sryo 			return (error);
1514d87a4d00Sryo 		}
1515d87a4d00Sryo 		rxc->vxcr_u.rxcd =
1516d87a4d00Sryo 		    (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1517d87a4d00Sryo 
1518d87a4d00Sryo 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1519d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[i];
1520d87a4d00Sryo 
1521d87a4d00Sryo 			error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1,
1522d87a4d00Sryo 			    JUMBO_LEN, 0, BUS_DMA_NOWAIT,
1523d87a4d00Sryo 			    &rxr->vxrxr_spare_dmap);
1524d87a4d00Sryo 			if (error) {
1525d87a4d00Sryo 				device_printf(dev, "unable to create spare "
1526d87a4d00Sryo 				    "dmamap for queue %d/%d error %d\n",
1527d87a4d00Sryo 				    q, i, error);
1528d87a4d00Sryo 				return (error);
1529d87a4d00Sryo 			}
1530d87a4d00Sryo 
1531d87a4d00Sryo 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1532d87a4d00Sryo 				error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1,
1533d87a4d00Sryo 				    JUMBO_LEN, 0, BUS_DMA_NOWAIT,
1534d87a4d00Sryo 				    &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1535d87a4d00Sryo 				if (error) {
1536d87a4d00Sryo 					device_printf(dev, "unable to create "
1537d87a4d00Sryo 					    "dmamap for queue %d/%d slot %d "
1538d87a4d00Sryo 					    "error %d\n",
1539d87a4d00Sryo 					    q, i, j, error);
1540d87a4d00Sryo 					return (error);
1541d87a4d00Sryo 				}
1542d87a4d00Sryo 			}
1543d87a4d00Sryo 		}
1544d87a4d00Sryo 	}
1545d87a4d00Sryo 
1546d87a4d00Sryo 	return (0);
1547d87a4d00Sryo }
1548d87a4d00Sryo 
1549d87a4d00Sryo static void
vmxnet3_free_rxq_data(struct vmxnet3_softc * sc)1550d87a4d00Sryo vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1551d87a4d00Sryo {
1552d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
1553d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
1554d87a4d00Sryo 	struct vmxnet3_comp_ring *rxc;
1555d87a4d00Sryo 	struct vmxnet3_rxbuf *rxb;
155635492473Sryo 	u_int i, j;
155735492473Sryo 	int q;
1558d87a4d00Sryo 
1559d87a4d00Sryo 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1560d87a4d00Sryo 		rxq = &sc->vmx_queue[q].vxq_rxqueue;
1561d87a4d00Sryo 		rxc = &rxq->vxrxq_comp_ring;
1562d87a4d00Sryo 
1563d87a4d00Sryo 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1564d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[i];
1565d87a4d00Sryo 
1566d87a4d00Sryo 			if (rxr->vxrxr_spare_dmap != NULL) {
1567d87a4d00Sryo 				bus_dmamap_destroy(sc->vmx_dmat,
1568d87a4d00Sryo 				    rxr->vxrxr_spare_dmap);
1569d87a4d00Sryo 				rxr->vxrxr_spare_dmap = NULL;
1570d87a4d00Sryo 			}
1571d87a4d00Sryo 
1572d87a4d00Sryo 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1573d87a4d00Sryo 				rxb = &rxr->vxrxr_rxbuf[j];
1574d87a4d00Sryo 				if (rxb->vrxb_dmamap != NULL) {
1575d87a4d00Sryo 					bus_dmamap_destroy(sc->vmx_dmat,
1576d87a4d00Sryo 					    rxb->vrxb_dmamap);
1577d87a4d00Sryo 					rxb->vrxb_dmamap = NULL;
1578d87a4d00Sryo 				}
1579d87a4d00Sryo 			}
1580d87a4d00Sryo 		}
1581d87a4d00Sryo 
1582d87a4d00Sryo 		if (rxc->vxcr_u.rxcd != NULL) {
1583d87a4d00Sryo 			vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1584d87a4d00Sryo 			rxc->vxcr_u.rxcd = NULL;
1585d87a4d00Sryo 		}
1586d87a4d00Sryo 
1587d87a4d00Sryo 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1588d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[i];
1589d87a4d00Sryo 
1590d87a4d00Sryo 			if (rxr->vxrxr_rxd != NULL) {
1591d87a4d00Sryo 				vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1592d87a4d00Sryo 				rxr->vxrxr_rxd = NULL;
1593d87a4d00Sryo 			}
1594d87a4d00Sryo 		}
1595d87a4d00Sryo 	}
1596d87a4d00Sryo }
1597d87a4d00Sryo 
1598d87a4d00Sryo static int
vmxnet3_alloc_queue_data(struct vmxnet3_softc * sc)1599d87a4d00Sryo vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1600d87a4d00Sryo {
1601d87a4d00Sryo 	int error;
1602d87a4d00Sryo 
1603d87a4d00Sryo 	error = vmxnet3_alloc_txq_data(sc);
1604d87a4d00Sryo 	if (error)
1605d87a4d00Sryo 		return (error);
1606d87a4d00Sryo 
1607d87a4d00Sryo 	error = vmxnet3_alloc_rxq_data(sc);
1608d87a4d00Sryo 	if (error)
1609d87a4d00Sryo 		return (error);
1610d87a4d00Sryo 
1611d87a4d00Sryo 	return (0);
1612d87a4d00Sryo }
1613d87a4d00Sryo 
1614d87a4d00Sryo static void
vmxnet3_free_queue_data(struct vmxnet3_softc * sc)1615d87a4d00Sryo vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1616d87a4d00Sryo {
1617d87a4d00Sryo 
1618d87a4d00Sryo 	if (sc->vmx_queue != NULL) {
1619d87a4d00Sryo 		vmxnet3_free_rxq_data(sc);
1620d87a4d00Sryo 		vmxnet3_free_txq_data(sc);
1621d87a4d00Sryo 	}
1622d87a4d00Sryo }
1623d87a4d00Sryo 
1624d87a4d00Sryo static int
vmxnet3_alloc_mcast_table(struct vmxnet3_softc * sc)1625d87a4d00Sryo vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1626d87a4d00Sryo {
1627d87a4d00Sryo 	int error;
1628d87a4d00Sryo 
1629d87a4d00Sryo 	error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1630d87a4d00Sryo 	    32, &sc->vmx_mcast_dma);
1631d87a4d00Sryo 	if (error)
1632d87a4d00Sryo 		device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1633d87a4d00Sryo 	else
1634d87a4d00Sryo 		sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1635d87a4d00Sryo 
1636d87a4d00Sryo 	return (error);
1637d87a4d00Sryo }
1638d87a4d00Sryo 
1639d87a4d00Sryo static void
vmxnet3_free_mcast_table(struct vmxnet3_softc * sc)1640d87a4d00Sryo vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1641d87a4d00Sryo {
1642d87a4d00Sryo 
1643d87a4d00Sryo 	if (sc->vmx_mcast != NULL) {
1644d87a4d00Sryo 		vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1645d87a4d00Sryo 		sc->vmx_mcast = NULL;
1646d87a4d00Sryo 	}
1647d87a4d00Sryo }
1648d87a4d00Sryo 
1649d87a4d00Sryo static void
vmxnet3_init_shared_data(struct vmxnet3_softc * sc)1650d87a4d00Sryo vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1651d87a4d00Sryo {
1652d87a4d00Sryo 	struct vmxnet3_driver_shared *ds;
1653d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
1654d87a4d00Sryo 	struct vmxnet3_txq_shared *txs;
1655d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
1656d87a4d00Sryo 	struct vmxnet3_rxq_shared *rxs;
1657d87a4d00Sryo 	int i;
1658d87a4d00Sryo 
1659d87a4d00Sryo 	ds = sc->vmx_ds;
1660d87a4d00Sryo 
1661d87a4d00Sryo 	/*
1662d87a4d00Sryo 	 * Initialize fields of the shared data that remains the same across
1663d87a4d00Sryo 	 * reinits. Note the shared data is zero'd when allocated.
1664d87a4d00Sryo 	 */
1665d87a4d00Sryo 
1666d87a4d00Sryo 	ds->magic = VMXNET3_REV1_MAGIC;
1667d87a4d00Sryo 
1668d87a4d00Sryo 	/* DriverInfo */
1669d87a4d00Sryo 	ds->version = VMXNET3_DRIVER_VERSION;
1670d87a4d00Sryo 	ds->guest = VMXNET3_GOS_FREEBSD |
1671d87a4d00Sryo #ifdef __LP64__
1672d87a4d00Sryo 	    VMXNET3_GOS_64BIT;
1673d87a4d00Sryo #else
1674d87a4d00Sryo 	    VMXNET3_GOS_32BIT;
1675d87a4d00Sryo #endif
1676d87a4d00Sryo 	ds->vmxnet3_revision = 1;
1677d87a4d00Sryo 	ds->upt_version = 1;
1678d87a4d00Sryo 
1679d87a4d00Sryo 	/* Misc. conf */
1680d87a4d00Sryo 	ds->driver_data = vtophys(sc);
1681d87a4d00Sryo 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
1682d87a4d00Sryo 	ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1683d87a4d00Sryo 	ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1684d87a4d00Sryo 	ds->nrxsg_max = sc->vmx_max_rxsegs;
1685d87a4d00Sryo 
1686d87a4d00Sryo 	/* RSS conf */
1687d87a4d00Sryo 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1688d87a4d00Sryo 		ds->rss.version = 1;
1689d87a4d00Sryo 		ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1690d87a4d00Sryo 		ds->rss.len = sc->vmx_rss_dma.dma_size;
1691d87a4d00Sryo 	}
1692d87a4d00Sryo 
1693d87a4d00Sryo 	/* Interrupt control. */
1694d87a4d00Sryo 	ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1695d87a4d00Sryo 	ds->nintr = sc->vmx_nintrs;
1696d87a4d00Sryo 	ds->evintr = sc->vmx_event_intr_idx;
1697d87a4d00Sryo 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1698d87a4d00Sryo 
1699d87a4d00Sryo 	for (i = 0; i < sc->vmx_nintrs; i++)
1700d87a4d00Sryo 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1701d87a4d00Sryo 
1702d87a4d00Sryo 	/* Receive filter. */
1703d87a4d00Sryo 	ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1704d87a4d00Sryo 	ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1705d87a4d00Sryo 
1706d87a4d00Sryo 	/* Tx queues */
1707d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1708d87a4d00Sryo 		txq = &sc->vmx_queue[i].vxq_txqueue;
1709d87a4d00Sryo 		txs = txq->vxtxq_ts;
1710d87a4d00Sryo 
1711d87a4d00Sryo 		txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1712d87a4d00Sryo 		txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1713d87a4d00Sryo 		txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1714d87a4d00Sryo 		txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1715d87a4d00Sryo 		txs->driver_data = vtophys(txq);
1716d87a4d00Sryo 		txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1717d87a4d00Sryo 	}
1718d87a4d00Sryo 
1719d87a4d00Sryo 	/* Rx queues */
1720d87a4d00Sryo 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1721d87a4d00Sryo 		rxq = &sc->vmx_queue[i].vxq_rxqueue;
1722d87a4d00Sryo 		rxs = rxq->vxrxq_rs;
1723d87a4d00Sryo 
1724d87a4d00Sryo 		rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1725d87a4d00Sryo 		rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1726d87a4d00Sryo 		rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1727d87a4d00Sryo 		rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1728d87a4d00Sryo 		rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1729d87a4d00Sryo 		rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1730d87a4d00Sryo 		rxs->driver_data = vtophys(rxq);
1731d87a4d00Sryo 		rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1732d87a4d00Sryo 	}
1733d87a4d00Sryo }
1734d87a4d00Sryo 
1735d87a4d00Sryo static void
vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc * sc)1736d87a4d00Sryo vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1737d87a4d00Sryo {
1738d87a4d00Sryo 	/*
1739d87a4d00Sryo 	 * Use the same key as the Linux driver until FreeBSD can do
1740d87a4d00Sryo 	 * RSS (presumably Toeplitz) in software.
1741d87a4d00Sryo 	 */
1742d87a4d00Sryo 	static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1743d87a4d00Sryo 	    0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1744d87a4d00Sryo 	    0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1745d87a4d00Sryo 	    0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1746d87a4d00Sryo 	    0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1747d87a4d00Sryo 	    0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1748d87a4d00Sryo 	};
1749d87a4d00Sryo 
1750d87a4d00Sryo 	struct vmxnet3_rss_shared *rss;
1751d87a4d00Sryo 	int i;
1752d87a4d00Sryo 
1753d87a4d00Sryo 	rss = sc->vmx_rss;
1754d87a4d00Sryo 
1755d87a4d00Sryo 	rss->hash_type =
1756d87a4d00Sryo 	    UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1757d87a4d00Sryo 	    UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1758d87a4d00Sryo 	rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1759d87a4d00Sryo 	rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1760d87a4d00Sryo 	rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1761d87a4d00Sryo 	memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1762d87a4d00Sryo 
1763d87a4d00Sryo 	for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1764d87a4d00Sryo 		rss->ind_table[i] = i % sc->vmx_nrxqueues;
1765d87a4d00Sryo }
1766d87a4d00Sryo 
1767d87a4d00Sryo static void
vmxnet3_reinit_shared_data(struct vmxnet3_softc * sc)1768d87a4d00Sryo vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1769d87a4d00Sryo {
1770d87a4d00Sryo 	struct ifnet *ifp;
1771d87a4d00Sryo 	struct vmxnet3_driver_shared *ds;
1772d87a4d00Sryo 
1773d87a4d00Sryo 	ifp = &sc->vmx_ethercom.ec_if;
1774d87a4d00Sryo 	ds = sc->vmx_ds;
1775d87a4d00Sryo 
1776d87a4d00Sryo 	ds->mtu = ifp->if_mtu;
1777d87a4d00Sryo 	ds->ntxqueue = sc->vmx_ntxqueues;
1778d87a4d00Sryo 	ds->nrxqueue = sc->vmx_nrxqueues;
1779d87a4d00Sryo 
1780d87a4d00Sryo 	ds->upt_features = 0;
1781d87a4d00Sryo 	if (ifp->if_capenable &
1782d87a4d00Sryo 	    (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1783d87a4d00Sryo 	    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
1784d87a4d00Sryo 		ds->upt_features |= UPT1_F_CSUM;
1785d87a4d00Sryo 	if (sc->vmx_ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
1786d87a4d00Sryo 		ds->upt_features |= UPT1_F_VLAN;
1787d87a4d00Sryo 
1788d87a4d00Sryo 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1789d87a4d00Sryo 		ds->upt_features |= UPT1_F_RSS;
1790d87a4d00Sryo 		vmxnet3_reinit_rss_shared_data(sc);
1791d87a4d00Sryo 	}
1792d87a4d00Sryo 
1793d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1794d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1795d87a4d00Sryo 	    (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1796d87a4d00Sryo }
1797d87a4d00Sryo 
1798d87a4d00Sryo static int
vmxnet3_alloc_data(struct vmxnet3_softc * sc)1799d87a4d00Sryo vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1800d87a4d00Sryo {
1801d87a4d00Sryo 	int error;
1802d87a4d00Sryo 
1803d87a4d00Sryo 	error = vmxnet3_alloc_shared_data(sc);
1804d87a4d00Sryo 	if (error)
1805d87a4d00Sryo 		return (error);
1806d87a4d00Sryo 
1807d87a4d00Sryo 	error = vmxnet3_alloc_queue_data(sc);
1808d87a4d00Sryo 	if (error)
1809d87a4d00Sryo 		return (error);
1810d87a4d00Sryo 
1811d87a4d00Sryo 	error = vmxnet3_alloc_mcast_table(sc);
1812d87a4d00Sryo 	if (error)
1813d87a4d00Sryo 		return (error);
1814d87a4d00Sryo 
1815d87a4d00Sryo 	vmxnet3_init_shared_data(sc);
1816d87a4d00Sryo 
1817d87a4d00Sryo 	return (0);
1818d87a4d00Sryo }
1819d87a4d00Sryo 
1820d87a4d00Sryo static void
vmxnet3_free_data(struct vmxnet3_softc * sc)1821d87a4d00Sryo vmxnet3_free_data(struct vmxnet3_softc *sc)
1822d87a4d00Sryo {
1823d87a4d00Sryo 
1824d87a4d00Sryo 	vmxnet3_free_mcast_table(sc);
1825d87a4d00Sryo 	vmxnet3_free_queue_data(sc);
1826d87a4d00Sryo 	vmxnet3_free_shared_data(sc);
1827d87a4d00Sryo }
1828d87a4d00Sryo 
1829d87a4d00Sryo static int
vmxnet3_setup_interface(struct vmxnet3_softc * sc)1830d87a4d00Sryo vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1831d87a4d00Sryo {
1832d87a4d00Sryo 	struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
1833d87a4d00Sryo 
1834d87a4d00Sryo 	vmxnet3_get_lladdr(sc);
1835d87a4d00Sryo 	aprint_normal_dev(sc->vmx_dev, "Ethernet address %s\n",
1836d87a4d00Sryo 	    ether_sprintf(sc->vmx_lladdr));
1837d87a4d00Sryo 	vmxnet3_set_lladdr(sc);
1838d87a4d00Sryo 
1839d87a4d00Sryo 	strlcpy(ifp->if_xname, device_xname(sc->vmx_dev), IFNAMSIZ);
1840d87a4d00Sryo 	ifp->if_softc = sc;
1841d87a4d00Sryo 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1842d87a4d00Sryo 	ifp->if_extflags = IFEF_MPSAFE;
1843d87a4d00Sryo 	ifp->if_ioctl = vmxnet3_ioctl;
1844d87a4d00Sryo 	ifp->if_start = vmxnet3_start;
1845d87a4d00Sryo 	ifp->if_transmit = vmxnet3_transmit;
1846d87a4d00Sryo 	ifp->if_watchdog = NULL;
1847d87a4d00Sryo 	ifp->if_init = vmxnet3_init;
1848d87a4d00Sryo 	ifp->if_stop = vmxnet3_stop;
1849d87a4d00Sryo 	sc->vmx_ethercom.ec_if.if_capabilities |=IFCAP_CSUM_IPv4_Rx |
1850d87a4d00Sryo 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1851d87a4d00Sryo 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1852d87a4d00Sryo 		    IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
1853d87a4d00Sryo 		    IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
1854d87a4d00Sryo 
1855d87a4d00Sryo 	ifp->if_capenable = ifp->if_capabilities;
1856d87a4d00Sryo 
1857d87a4d00Sryo 	sc->vmx_ethercom.ec_if.if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1858d87a4d00Sryo 
1859d87a4d00Sryo 	sc->vmx_ethercom.ec_capabilities |=
1860d87a4d00Sryo 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
1861d87a4d00Sryo 	sc->vmx_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
1862d87a4d00Sryo 
1863d87a4d00Sryo 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs);
1864d87a4d00Sryo 	IFQ_SET_READY(&ifp->if_snd);
1865d87a4d00Sryo 
1866d87a4d00Sryo 	/* Initialize ifmedia structures. */
1867d87a4d00Sryo 	sc->vmx_ethercom.ec_ifmedia = &sc->vmx_media;
1868d87a4d00Sryo 	ifmedia_init_with_lock(&sc->vmx_media, IFM_IMASK, vmxnet3_ifmedia_change,
1869d87a4d00Sryo 	    vmxnet3_ifmedia_status, sc->vmx_mtx);
1870d87a4d00Sryo 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1871d87a4d00Sryo 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
1872d87a4d00Sryo 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1873d87a4d00Sryo 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1874d87a4d00Sryo 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1875d87a4d00Sryo 	ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1876d87a4d00Sryo 
1877d87a4d00Sryo 	if_attach(ifp);
1878d87a4d00Sryo 	if_deferred_start_init(ifp, NULL);
1879d87a4d00Sryo 	ether_ifattach(ifp, sc->vmx_lladdr);
1880d87a4d00Sryo 	ether_set_ifflags_cb(&sc->vmx_ethercom, vmxnet3_ifflags_cb);
1881d87a4d00Sryo 	vmxnet3_cmd_link_status(ifp);
1882d87a4d00Sryo 
1883d87a4d00Sryo 	/* should set before setting interrupts */
1884d87a4d00Sryo 	sc->vmx_rx_intr_process_limit = VMXNET3_RX_INTR_PROCESS_LIMIT;
1885d87a4d00Sryo 	sc->vmx_rx_process_limit = VMXNET3_RX_PROCESS_LIMIT;
1886d87a4d00Sryo 	sc->vmx_tx_intr_process_limit = VMXNET3_TX_INTR_PROCESS_LIMIT;
1887d87a4d00Sryo 	sc->vmx_tx_process_limit = VMXNET3_TX_PROCESS_LIMIT;
1888d87a4d00Sryo 
1889d87a4d00Sryo 	return (0);
1890d87a4d00Sryo }
1891d87a4d00Sryo 
1892d87a4d00Sryo static int
vmxnet3_setup_sysctl(struct vmxnet3_softc * sc)1893d87a4d00Sryo vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
1894d87a4d00Sryo {
1895d87a4d00Sryo 	const char *devname;
1896d87a4d00Sryo 	struct sysctllog **log;
1897d87a4d00Sryo 	const struct sysctlnode *rnode, *rxnode, *txnode;
1898d87a4d00Sryo 	int error;
1899d87a4d00Sryo 
1900d87a4d00Sryo 	log = &sc->vmx_sysctllog;
1901d87a4d00Sryo 	devname = device_xname(sc->vmx_dev);
1902d87a4d00Sryo 
1903d87a4d00Sryo 	error = sysctl_createv(log, 0, NULL, &rnode,
1904d87a4d00Sryo 	    0, CTLTYPE_NODE, devname,
1905d87a4d00Sryo 	    SYSCTL_DESCR("vmxnet3 information and settings"),
1906d87a4d00Sryo 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1907d87a4d00Sryo 	if (error)
1908d87a4d00Sryo 		goto out;
1909d87a4d00Sryo 	error = sysctl_createv(log, 0, &rnode, NULL,
1910d87a4d00Sryo 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1911d87a4d00Sryo 	    SYSCTL_DESCR("Use workqueue for packet processing"),
1912d87a4d00Sryo 	    NULL, 0, &sc->vmx_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1913d87a4d00Sryo 	if (error)
1914d87a4d00Sryo 		goto out;
1915d87a4d00Sryo 
1916d87a4d00Sryo 	error = sysctl_createv(log, 0, &rnode, &rxnode,
1917d87a4d00Sryo 	    0, CTLTYPE_NODE, "rx",
1918d87a4d00Sryo 	    SYSCTL_DESCR("vmxnet3 information and settings for Rx"),
1919d87a4d00Sryo 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1920d87a4d00Sryo 	if (error)
1921d87a4d00Sryo 		goto out;
1922d87a4d00Sryo 	error = sysctl_createv(log, 0, &rxnode, NULL,
1923d87a4d00Sryo 	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1924d87a4d00Sryo 	    SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
1925d87a4d00Sryo 	    NULL, 0, &sc->vmx_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1926d87a4d00Sryo 	if (error)
1927d87a4d00Sryo 		goto out;
1928d87a4d00Sryo 	error = sysctl_createv(log, 0, &rxnode, NULL,
1929d87a4d00Sryo 	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1930d87a4d00Sryo 	    SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
1931d87a4d00Sryo 	    NULL, 0, &sc->vmx_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1932d87a4d00Sryo 	if (error)
1933d87a4d00Sryo 		goto out;
1934d87a4d00Sryo 
1935d87a4d00Sryo 	error = sysctl_createv(log, 0, &rnode, &txnode,
1936d87a4d00Sryo 	    0, CTLTYPE_NODE, "tx",
1937d87a4d00Sryo 	    SYSCTL_DESCR("vmxnet3 information and settings for Tx"),
1938d87a4d00Sryo 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1939d87a4d00Sryo 	if (error)
1940d87a4d00Sryo 		goto out;
1941d87a4d00Sryo 	error = sysctl_createv(log, 0, &txnode, NULL,
1942d87a4d00Sryo 	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1943d87a4d00Sryo 	    SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
1944d87a4d00Sryo 	    NULL, 0, &sc->vmx_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1945d87a4d00Sryo 	if (error)
1946d87a4d00Sryo 		goto out;
1947d87a4d00Sryo 	error = sysctl_createv(log, 0, &txnode, NULL,
1948d87a4d00Sryo 	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1949d87a4d00Sryo 	    SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
1950d87a4d00Sryo 	    NULL, 0, &sc->vmx_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
1951d87a4d00Sryo 
1952d87a4d00Sryo out:
1953d87a4d00Sryo 	if (error) {
1954d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev,
1955d87a4d00Sryo 		    "unable to create sysctl node\n");
1956d87a4d00Sryo 		sysctl_teardown(log);
1957d87a4d00Sryo 	}
1958d87a4d00Sryo 	return error;
1959d87a4d00Sryo }
1960d87a4d00Sryo 
1961d87a4d00Sryo static int
vmxnet3_setup_stats(struct vmxnet3_softc * sc)1962d87a4d00Sryo vmxnet3_setup_stats(struct vmxnet3_softc *sc)
1963d87a4d00Sryo {
1964d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
1965d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
1966d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
1967d87a4d00Sryo 	int i;
1968d87a4d00Sryo 
1969d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1970d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
1971d87a4d00Sryo 		txq = &vmxq->vxq_txqueue;
1972d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR,
1973d87a4d00Sryo 		    NULL, txq->vxtxq_name, "Interrupt on queue");
1974d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC,
1975d87a4d00Sryo 		    NULL, txq->vxtxq_name, "Handled queue in softint/workqueue");
1976d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC,
1977d87a4d00Sryo 		    NULL, txq->vxtxq_name, "Requested in softint/workqueue");
1978d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC,
1979d87a4d00Sryo 		    NULL, txq->vxtxq_name, "Dropped in pcq");
1980d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC,
1981d87a4d00Sryo 		    NULL, txq->vxtxq_name, "Deferred transmit");
1982d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC,
1983d87a4d00Sryo 		    NULL, txq->vxtxq_name, "Watchdog timeout");
1984d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_defragged, EVCNT_TYPE_MISC,
1985d87a4d00Sryo 		    NULL, txq->vxtxq_name, "m_defrag successed");
1986d87a4d00Sryo 		evcnt_attach_dynamic(&txq->vxtxq_defrag_failed, EVCNT_TYPE_MISC,
1987d87a4d00Sryo 		    NULL, txq->vxtxq_name, "m_defrag failed");
1988d87a4d00Sryo 	}
1989d87a4d00Sryo 
1990d87a4d00Sryo 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1991d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
1992d87a4d00Sryo 		rxq = &vmxq->vxq_rxqueue;
1993d87a4d00Sryo 		evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR,
1994d87a4d00Sryo 		    NULL, rxq->vxrxq_name, "Interrupt on queue");
1995d87a4d00Sryo 		evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC,
1996d87a4d00Sryo 		    NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue");
1997d87a4d00Sryo 		evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC,
1998d87a4d00Sryo 		    NULL, rxq->vxrxq_name, "Requested in softint/workqueue");
1999d87a4d00Sryo 		evcnt_attach_dynamic(&rxq->vxrxq_mgetcl_failed, EVCNT_TYPE_MISC,
2000d87a4d00Sryo 		    NULL, rxq->vxrxq_name, "MCLGET failed");
2001d87a4d00Sryo 		evcnt_attach_dynamic(&rxq->vxrxq_mbuf_load_failed, EVCNT_TYPE_MISC,
2002d87a4d00Sryo 		    NULL, rxq->vxrxq_name, "bus_dmamap_load_mbuf failed");
2003d87a4d00Sryo 	}
2004d87a4d00Sryo 
2005d87a4d00Sryo 	evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR,
2006d87a4d00Sryo 	    NULL, device_xname(sc->vmx_dev), "Interrupt for other events");
2007d87a4d00Sryo 	evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC,
2008d87a4d00Sryo 	    NULL, device_xname(sc->vmx_dev), "Link status event");
2009d87a4d00Sryo 	evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC,
2010d87a4d00Sryo 	    NULL, device_xname(sc->vmx_dev), "Tx queue error event");
2011d87a4d00Sryo 	evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC,
2012d87a4d00Sryo 	    NULL, device_xname(sc->vmx_dev), "Rx queue error event");
2013d87a4d00Sryo 	evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC,
2014d87a4d00Sryo 	    NULL, device_xname(sc->vmx_dev), "Device impl change event");
2015d87a4d00Sryo 	evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC,
2016d87a4d00Sryo 	    NULL, device_xname(sc->vmx_dev), "Debug event");
2017d87a4d00Sryo 
2018d87a4d00Sryo 	return 0;
2019d87a4d00Sryo }
2020d87a4d00Sryo 
2021d87a4d00Sryo static void
vmxnet3_teardown_stats(struct vmxnet3_softc * sc)2022d87a4d00Sryo vmxnet3_teardown_stats(struct vmxnet3_softc *sc)
2023d87a4d00Sryo {
2024d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
2025d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
2026d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
2027d87a4d00Sryo 	int i;
2028d87a4d00Sryo 
2029d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
2030d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
2031d87a4d00Sryo 		txq = &vmxq->vxq_txqueue;
2032d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_intr);
2033d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_defer);
2034d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_deferreq);
2035d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_pcqdrop);
2036d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_transmitdef);
2037d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_watchdogto);
2038d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_defragged);
2039d87a4d00Sryo 		evcnt_detach(&txq->vxtxq_defrag_failed);
2040d87a4d00Sryo 	}
2041d87a4d00Sryo 
2042d87a4d00Sryo 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
2043d87a4d00Sryo 		vmxq = &sc->vmx_queue[i];
2044d87a4d00Sryo 		rxq = &vmxq->vxq_rxqueue;
2045d87a4d00Sryo 		evcnt_detach(&rxq->vxrxq_intr);
2046d87a4d00Sryo 		evcnt_detach(&rxq->vxrxq_defer);
2047d87a4d00Sryo 		evcnt_detach(&rxq->vxrxq_deferreq);
2048d87a4d00Sryo 		evcnt_detach(&rxq->vxrxq_mgetcl_failed);
2049d87a4d00Sryo 		evcnt_detach(&rxq->vxrxq_mbuf_load_failed);
2050d87a4d00Sryo 	}
2051d87a4d00Sryo 
2052d87a4d00Sryo 	evcnt_detach(&sc->vmx_event_intr);
2053d87a4d00Sryo 	evcnt_detach(&sc->vmx_event_link);
2054d87a4d00Sryo 	evcnt_detach(&sc->vmx_event_txqerror);
2055d87a4d00Sryo 	evcnt_detach(&sc->vmx_event_rxqerror);
2056d87a4d00Sryo 	evcnt_detach(&sc->vmx_event_dic);
2057d87a4d00Sryo 	evcnt_detach(&sc->vmx_event_debug);
2058d87a4d00Sryo }
2059d87a4d00Sryo 
2060d87a4d00Sryo static void
vmxnet3_evintr(struct vmxnet3_softc * sc)2061d87a4d00Sryo vmxnet3_evintr(struct vmxnet3_softc *sc)
2062d87a4d00Sryo {
2063d87a4d00Sryo 	device_t dev;
2064d87a4d00Sryo 	struct vmxnet3_txq_shared *ts;
2065d87a4d00Sryo 	struct vmxnet3_rxq_shared *rs;
2066d87a4d00Sryo 	uint32_t event;
2067d87a4d00Sryo 	int reset;
2068d87a4d00Sryo 
2069d87a4d00Sryo 	dev = sc->vmx_dev;
2070d87a4d00Sryo 	reset = 0;
2071d87a4d00Sryo 
2072d87a4d00Sryo 	VMXNET3_CORE_LOCK(sc);
2073d87a4d00Sryo 
2074d87a4d00Sryo 	/* Clear events. */
2075d87a4d00Sryo 	event = sc->vmx_ds->event;
2076d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
2077d87a4d00Sryo 
2078d87a4d00Sryo 	if (event & VMXNET3_EVENT_LINK) {
2079d87a4d00Sryo 		sc->vmx_event_link.ev_count++;
2080d87a4d00Sryo 		vmxnet3_if_link_status(sc);
2081d87a4d00Sryo 		if (sc->vmx_link_active != 0)
2082d87a4d00Sryo 			if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2083d87a4d00Sryo 	}
2084d87a4d00Sryo 
2085d87a4d00Sryo 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
2086d87a4d00Sryo 		if (event & VMXNET3_EVENT_TQERROR)
2087d87a4d00Sryo 			sc->vmx_event_txqerror.ev_count++;
2088d87a4d00Sryo 		if (event & VMXNET3_EVENT_RQERROR)
2089d87a4d00Sryo 			sc->vmx_event_rxqerror.ev_count++;
2090d87a4d00Sryo 
2091d87a4d00Sryo 		reset = 1;
2092d87a4d00Sryo 		vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
2093d87a4d00Sryo 		ts = sc->vmx_queue[0].vxq_txqueue.vxtxq_ts;
2094d87a4d00Sryo 		if (ts->stopped != 0)
2095d87a4d00Sryo 			device_printf(dev, "Tx queue error %#x\n", ts->error);
2096d87a4d00Sryo 		rs = sc->vmx_queue[0].vxq_rxqueue.vxrxq_rs;
2097d87a4d00Sryo 		if (rs->stopped != 0)
2098d87a4d00Sryo 			device_printf(dev, "Rx queue error %#x\n", rs->error);
2099d87a4d00Sryo 		device_printf(dev, "Rx/Tx queue error event ... resetting\n");
2100d87a4d00Sryo 	}
2101d87a4d00Sryo 
2102d87a4d00Sryo 	if (event & VMXNET3_EVENT_DIC) {
2103d87a4d00Sryo 		sc->vmx_event_dic.ev_count++;
2104d87a4d00Sryo 		device_printf(dev, "device implementation change event\n");
2105d87a4d00Sryo 	}
2106d87a4d00Sryo 	if (event & VMXNET3_EVENT_DEBUG) {
2107d87a4d00Sryo 		sc->vmx_event_debug.ev_count++;
2108d87a4d00Sryo 		device_printf(dev, "debug event\n");
2109d87a4d00Sryo 	}
2110d87a4d00Sryo 
2111d87a4d00Sryo 	if (reset != 0)
2112d87a4d00Sryo 		vmxnet3_init_locked(sc);
2113d87a4d00Sryo 
2114d87a4d00Sryo 	VMXNET3_CORE_UNLOCK(sc);
2115d87a4d00Sryo }
2116d87a4d00Sryo 
2117d87a4d00Sryo static bool
vmxnet3_txq_eof(struct vmxnet3_txqueue * txq,u_int limit)2118d87a4d00Sryo vmxnet3_txq_eof(struct vmxnet3_txqueue *txq, u_int limit)
2119d87a4d00Sryo {
2120d87a4d00Sryo 	struct vmxnet3_softc *sc;
2121d87a4d00Sryo 	struct vmxnet3_txring *txr;
2122d87a4d00Sryo 	struct vmxnet3_comp_ring *txc;
2123d87a4d00Sryo 	struct vmxnet3_txcompdesc *txcd;
2124d87a4d00Sryo 	struct vmxnet3_txbuf *txb;
2125d87a4d00Sryo 	struct ifnet *ifp;
2126d87a4d00Sryo 	struct mbuf *m;
2127d87a4d00Sryo 	u_int sop;
2128d87a4d00Sryo 	bool more = false;
2129d87a4d00Sryo 
2130d87a4d00Sryo 	sc = txq->vxtxq_sc;
2131d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
2132d87a4d00Sryo 	txc = &txq->vxtxq_comp_ring;
2133d87a4d00Sryo 	ifp = &sc->vmx_ethercom.ec_if;
2134d87a4d00Sryo 
2135d87a4d00Sryo 	VMXNET3_TXQ_LOCK_ASSERT(txq);
2136d87a4d00Sryo 
2137d87a4d00Sryo 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2138d87a4d00Sryo 	for (;;) {
2139d87a4d00Sryo 		if (limit-- == 0) {
2140d87a4d00Sryo 			more = true;
2141d87a4d00Sryo 			break;
2142d87a4d00Sryo 		}
2143d87a4d00Sryo 
2144d87a4d00Sryo 		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
2145d87a4d00Sryo 		if (txcd->gen != txc->vxcr_gen)
2146d87a4d00Sryo 			break;
2147d87a4d00Sryo 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2148d87a4d00Sryo 
2149d87a4d00Sryo 		if (++txc->vxcr_next == txc->vxcr_ndesc) {
2150d87a4d00Sryo 			txc->vxcr_next = 0;
2151d87a4d00Sryo 			txc->vxcr_gen ^= 1;
2152d87a4d00Sryo 		}
2153d87a4d00Sryo 
2154d87a4d00Sryo 		sop = txr->vxtxr_next;
2155d87a4d00Sryo 		txb = &txr->vxtxr_txbuf[sop];
2156d87a4d00Sryo 
2157d87a4d00Sryo 		if ((m = txb->vtxb_m) != NULL) {
2158d87a4d00Sryo 			bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap,
2159d87a4d00Sryo 			    0, txb->vtxb_dmamap->dm_mapsize,
2160d87a4d00Sryo 			    BUS_DMASYNC_POSTWRITE);
2161d87a4d00Sryo 			bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap);
2162d87a4d00Sryo 
2163be6f2fceSriastradh 			if_statinc_ref(ifp, nsr, if_opackets);
2164be6f2fceSriastradh 			if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
2165d87a4d00Sryo 			if (m->m_flags & M_MCAST)
2166be6f2fceSriastradh 				if_statinc_ref(ifp, nsr, if_omcasts);
2167d87a4d00Sryo 
2168d87a4d00Sryo 			m_freem(m);
2169d87a4d00Sryo 			txb->vtxb_m = NULL;
2170d87a4d00Sryo 		}
2171d87a4d00Sryo 
2172d87a4d00Sryo 		txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
2173d87a4d00Sryo 	}
2174d87a4d00Sryo 	IF_STAT_PUTREF(ifp);
2175d87a4d00Sryo 
2176d87a4d00Sryo 	if (txr->vxtxr_head == txr->vxtxr_next)
2177d87a4d00Sryo 		txq->vxtxq_watchdog = 0;
2178d87a4d00Sryo 
2179d87a4d00Sryo 	return more;
2180d87a4d00Sryo }
2181d87a4d00Sryo 
2182d87a4d00Sryo static int
vmxnet3_newbuf(struct vmxnet3_softc * sc,struct vmxnet3_rxqueue * rxq,struct vmxnet3_rxring * rxr)2183d87a4d00Sryo vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq,
2184d87a4d00Sryo     struct vmxnet3_rxring *rxr)
2185d87a4d00Sryo {
2186d87a4d00Sryo 	struct mbuf *m;
2187d87a4d00Sryo 	struct vmxnet3_rxdesc *rxd;
2188d87a4d00Sryo 	struct vmxnet3_rxbuf *rxb;
2189d87a4d00Sryo 	bus_dma_tag_t tag;
2190d87a4d00Sryo 	bus_dmamap_t dmap;
2191d87a4d00Sryo 	int idx, btype, error;
2192d87a4d00Sryo 
2193d87a4d00Sryo 	tag = sc->vmx_dmat;
2194d87a4d00Sryo 	dmap = rxr->vxrxr_spare_dmap;
2195d87a4d00Sryo 	idx = rxr->vxrxr_fill;
2196d87a4d00Sryo 	rxd = &rxr->vxrxr_rxd[idx];
2197d87a4d00Sryo 	rxb = &rxr->vxrxr_rxbuf[idx];
2198d87a4d00Sryo 
2199d87a4d00Sryo 	/* Don't allocate buffers for ring 2 for now. */
2200d87a4d00Sryo 	if (rxr->vxrxr_rid != 0)
2201d87a4d00Sryo 		return -1;
2202d87a4d00Sryo 	btype = VMXNET3_BTYPE_HEAD;
2203d87a4d00Sryo 
2204d87a4d00Sryo 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2205d87a4d00Sryo 	if (m == NULL)
2206d87a4d00Sryo 		return (ENOBUFS);
2207d87a4d00Sryo 
2208d87a4d00Sryo 	MCLGET(m, M_DONTWAIT);
2209d87a4d00Sryo 	if ((m->m_flags & M_EXT) == 0) {
2210d87a4d00Sryo 		rxq->vxrxq_mgetcl_failed.ev_count++;
2211d87a4d00Sryo 		m_freem(m);
2212d87a4d00Sryo 		return (ENOBUFS);
2213d87a4d00Sryo 	}
2214d87a4d00Sryo 
2215d87a4d00Sryo 	m->m_pkthdr.len = m->m_len = JUMBO_LEN;
2216d87a4d00Sryo 	m_adj(m, ETHER_ALIGN);
2217d87a4d00Sryo 
2218d87a4d00Sryo 	error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT);
2219d87a4d00Sryo 	if (error) {
2220d87a4d00Sryo 		m_freem(m);
2221d87a4d00Sryo 		rxq->vxrxq_mbuf_load_failed.ev_count++;
2222d87a4d00Sryo 		return (error);
2223d87a4d00Sryo 	}
2224d87a4d00Sryo 
2225d87a4d00Sryo 	if (rxb->vrxb_m != NULL) {
2226d87a4d00Sryo 		bus_dmamap_sync(tag, rxb->vrxb_dmamap,
2227d87a4d00Sryo 		    0, rxb->vrxb_dmamap->dm_mapsize,
2228d87a4d00Sryo 		    BUS_DMASYNC_POSTREAD);
2229d87a4d00Sryo 		bus_dmamap_unload(tag, rxb->vrxb_dmamap);
2230d87a4d00Sryo 	}
2231d87a4d00Sryo 
2232d87a4d00Sryo 	rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
2233d87a4d00Sryo 	rxb->vrxb_dmamap = dmap;
2234d87a4d00Sryo 	rxb->vrxb_m = m;
2235d87a4d00Sryo 
2236d87a4d00Sryo 	rxd->addr = DMAADDR(dmap);
2237d87a4d00Sryo 	rxd->len = m->m_pkthdr.len;
2238d87a4d00Sryo 	rxd->btype = btype;
2239d87a4d00Sryo 	rxd->gen = rxr->vxrxr_gen;
2240d87a4d00Sryo 
2241d87a4d00Sryo 	vmxnet3_rxr_increment_fill(rxr);
2242d87a4d00Sryo 	return (0);
2243d87a4d00Sryo }
2244d87a4d00Sryo 
2245d87a4d00Sryo static void
vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue * rxq,struct vmxnet3_rxring * rxr,int idx)2246d87a4d00Sryo vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
2247d87a4d00Sryo     struct vmxnet3_rxring *rxr, int idx)
2248d87a4d00Sryo {
2249d87a4d00Sryo 	struct vmxnet3_rxdesc *rxd;
2250d87a4d00Sryo 
2251d87a4d00Sryo 	rxd = &rxr->vxrxr_rxd[idx];
2252d87a4d00Sryo 	rxd->gen = rxr->vxrxr_gen;
2253d87a4d00Sryo 	vmxnet3_rxr_increment_fill(rxr);
2254d87a4d00Sryo }
2255d87a4d00Sryo 
2256d87a4d00Sryo static void
vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue * rxq)2257d87a4d00Sryo vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
2258d87a4d00Sryo {
2259d87a4d00Sryo 	struct vmxnet3_softc *sc;
2260d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
2261d87a4d00Sryo 	struct vmxnet3_comp_ring *rxc;
2262d87a4d00Sryo 	struct vmxnet3_rxcompdesc *rxcd;
2263d87a4d00Sryo 	int idx, eof;
2264d87a4d00Sryo 
2265d87a4d00Sryo 	sc = rxq->vxrxq_sc;
2266d87a4d00Sryo 	rxc = &rxq->vxrxq_comp_ring;
2267d87a4d00Sryo 
2268d87a4d00Sryo 	do {
2269d87a4d00Sryo 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2270d87a4d00Sryo 		if (rxcd->gen != rxc->vxcr_gen)
2271d87a4d00Sryo 			break;		/* Not expected. */
2272d87a4d00Sryo 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2273d87a4d00Sryo 
2274d87a4d00Sryo 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2275d87a4d00Sryo 			rxc->vxcr_next = 0;
2276d87a4d00Sryo 			rxc->vxcr_gen ^= 1;
2277d87a4d00Sryo 		}
2278d87a4d00Sryo 
2279d87a4d00Sryo 		idx = rxcd->rxd_idx;
2280d87a4d00Sryo 		eof = rxcd->eop;
2281d87a4d00Sryo 		if (rxcd->qid < sc->vmx_nrxqueues)
2282d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[0];
2283d87a4d00Sryo 		else
2284d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[1];
2285d87a4d00Sryo 		vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2286d87a4d00Sryo 	} while (!eof);
2287d87a4d00Sryo }
2288d87a4d00Sryo 
2289d87a4d00Sryo static void
vmxnet3_rx_csum(struct vmxnet3_rxcompdesc * rxcd,struct mbuf * m)2290d87a4d00Sryo vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2291d87a4d00Sryo {
2292d87a4d00Sryo 	if (rxcd->no_csum)
2293d87a4d00Sryo 		return;
2294d87a4d00Sryo 
2295d87a4d00Sryo 	if (rxcd->ipv4) {
2296d87a4d00Sryo 		m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2297d87a4d00Sryo 		if (rxcd->ipcsum_ok == 0)
2298d87a4d00Sryo 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2299d87a4d00Sryo 	}
2300d87a4d00Sryo 
2301d87a4d00Sryo 	if (rxcd->fragment)
2302d87a4d00Sryo 		return;
2303d87a4d00Sryo 
2304d87a4d00Sryo 	if (rxcd->tcp) {
2305d87a4d00Sryo 		m->m_pkthdr.csum_flags |=
2306d87a4d00Sryo 		    rxcd->ipv4 ? M_CSUM_TCPv4 : M_CSUM_TCPv6;
2307d87a4d00Sryo 		if ((rxcd->csum_ok) == 0)
2308d87a4d00Sryo 			m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2309d87a4d00Sryo 	}
2310d87a4d00Sryo 
2311d87a4d00Sryo 	if (rxcd->udp) {
2312d87a4d00Sryo 		m->m_pkthdr.csum_flags |=
2313d87a4d00Sryo 		    rxcd->ipv4 ? M_CSUM_UDPv4 : M_CSUM_UDPv6 ;
2314d87a4d00Sryo 		if ((rxcd->csum_ok) == 0)
2315d87a4d00Sryo 			m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2316d87a4d00Sryo 	}
2317d87a4d00Sryo }
2318d87a4d00Sryo 
2319d87a4d00Sryo static void
vmxnet3_rxq_input(struct vmxnet3_rxqueue * rxq,struct vmxnet3_rxcompdesc * rxcd,struct mbuf * m)2320d87a4d00Sryo vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2321d87a4d00Sryo     struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2322d87a4d00Sryo {
2323d87a4d00Sryo 	struct vmxnet3_softc *sc;
2324d87a4d00Sryo 	struct ifnet *ifp;
2325d87a4d00Sryo 
2326d87a4d00Sryo 	sc = rxq->vxrxq_sc;
2327d87a4d00Sryo 	ifp = &sc->vmx_ethercom.ec_if;
2328d87a4d00Sryo 
2329d87a4d00Sryo 	if (rxcd->error) {
2330d87a4d00Sryo 		if_statinc(ifp, if_ierrors);
2331d87a4d00Sryo 		m_freem(m);
2332d87a4d00Sryo 		return;
2333d87a4d00Sryo 	}
2334d87a4d00Sryo 
2335d87a4d00Sryo 	if (!rxcd->no_csum)
2336d87a4d00Sryo 		vmxnet3_rx_csum(rxcd, m);
2337d87a4d00Sryo 	if (rxcd->vlan)
2338d87a4d00Sryo 		vlan_set_tag(m, rxcd->vtag);
2339d87a4d00Sryo 
2340d87a4d00Sryo 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2341be6f2fceSriastradh 	if_statinc_ref(ifp, nsr, if_ipackets);
2342be6f2fceSriastradh 	if_statadd_ref(ifp, nsr, if_ibytes, m->m_pkthdr.len);
2343d87a4d00Sryo 	IF_STAT_PUTREF(ifp);
2344d87a4d00Sryo 
2345d87a4d00Sryo 	if_percpuq_enqueue(ifp->if_percpuq, m);
2346d87a4d00Sryo }
2347d87a4d00Sryo 
2348d87a4d00Sryo static bool
vmxnet3_rxq_eof(struct vmxnet3_rxqueue * rxq,u_int limit)2349d87a4d00Sryo vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq, u_int limit)
2350d87a4d00Sryo {
2351d87a4d00Sryo 	struct vmxnet3_softc *sc;
2352d87a4d00Sryo 	struct ifnet *ifp;
2353d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
2354d87a4d00Sryo 	struct vmxnet3_comp_ring *rxc;
2355d87a4d00Sryo 	struct vmxnet3_rxdesc *rxd __diagused;
2356d87a4d00Sryo 	struct vmxnet3_rxcompdesc *rxcd;
2357d87a4d00Sryo 	struct mbuf *m, *m_head, *m_tail;
235835492473Sryo 	u_int idx, length;
2359d87a4d00Sryo 	bool more = false;
2360d87a4d00Sryo 
2361d87a4d00Sryo 	sc = rxq->vxrxq_sc;
2362d87a4d00Sryo 	ifp = &sc->vmx_ethercom.ec_if;
2363d87a4d00Sryo 	rxc = &rxq->vxrxq_comp_ring;
2364d87a4d00Sryo 
2365d87a4d00Sryo 	VMXNET3_RXQ_LOCK_ASSERT(rxq);
2366d87a4d00Sryo 
2367db465cddSriastradh 	if (rxq->vxrxq_stopping)
2368d87a4d00Sryo 		return more;
2369d87a4d00Sryo 
2370d87a4d00Sryo 	m_head = rxq->vxrxq_mhead;
2371d87a4d00Sryo 	rxq->vxrxq_mhead = NULL;
2372d87a4d00Sryo 	m_tail = rxq->vxrxq_mtail;
2373d87a4d00Sryo 	rxq->vxrxq_mtail = NULL;
2374d87a4d00Sryo 	KASSERT(m_head == NULL || m_tail != NULL);
2375d87a4d00Sryo 
2376d87a4d00Sryo 	for (;;) {
2377d87a4d00Sryo 		if (limit-- == 0) {
2378d87a4d00Sryo 			more = true;
2379d87a4d00Sryo 			break;
2380d87a4d00Sryo 		}
2381d87a4d00Sryo 
2382d87a4d00Sryo 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2383d87a4d00Sryo 		if (rxcd->gen != rxc->vxcr_gen) {
2384d87a4d00Sryo 			rxq->vxrxq_mhead = m_head;
2385d87a4d00Sryo 			rxq->vxrxq_mtail = m_tail;
2386d87a4d00Sryo 			break;
2387d87a4d00Sryo 		}
2388d87a4d00Sryo 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2389d87a4d00Sryo 
2390d87a4d00Sryo 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2391d87a4d00Sryo 			rxc->vxcr_next = 0;
2392d87a4d00Sryo 			rxc->vxcr_gen ^= 1;
2393d87a4d00Sryo 		}
2394d87a4d00Sryo 
2395d87a4d00Sryo 		idx = rxcd->rxd_idx;
2396d87a4d00Sryo 		length = rxcd->len;
2397d87a4d00Sryo 		if (rxcd->qid < sc->vmx_nrxqueues)
2398d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[0];
2399d87a4d00Sryo 		else
2400d87a4d00Sryo 			rxr = &rxq->vxrxq_cmd_ring[1];
2401d87a4d00Sryo 		rxd = &rxr->vxrxr_rxd[idx];
2402d87a4d00Sryo 
2403d87a4d00Sryo 		m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2404d87a4d00Sryo 		KASSERT(m != NULL);
2405d87a4d00Sryo 
2406d87a4d00Sryo 		/*
2407d87a4d00Sryo 		 * The host may skip descriptors. We detect this when this
2408d87a4d00Sryo 		 * descriptor does not match the previous fill index. Catch
2409d87a4d00Sryo 		 * up with the host now.
2410d87a4d00Sryo 		 */
2411d87a4d00Sryo 		if (__predict_false(rxr->vxrxr_fill != idx)) {
2412d87a4d00Sryo 			while (rxr->vxrxr_fill != idx) {
2413d87a4d00Sryo 				rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2414d87a4d00Sryo 				    rxr->vxrxr_gen;
2415d87a4d00Sryo 				vmxnet3_rxr_increment_fill(rxr);
2416d87a4d00Sryo 			}
2417d87a4d00Sryo 		}
2418d87a4d00Sryo 
2419d87a4d00Sryo 		if (rxcd->sop) {
2420d87a4d00Sryo 			/* start of frame w/o head buffer */
2421d87a4d00Sryo 			KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD);
2422d87a4d00Sryo 			/* start of frame not in ring 0 */
2423d87a4d00Sryo 			KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]);
2424d87a4d00Sryo 			/* duplicate start of frame? */
2425d87a4d00Sryo 			KASSERT(m_head == NULL);
2426d87a4d00Sryo 
2427d87a4d00Sryo 			if (length == 0) {
2428d87a4d00Sryo 				/* Just ignore this descriptor. */
2429d87a4d00Sryo 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2430d87a4d00Sryo 				goto nextp;
2431d87a4d00Sryo 			}
2432d87a4d00Sryo 
2433d87a4d00Sryo 			if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2434d87a4d00Sryo 				if_statinc(ifp, if_iqdrops);
2435d87a4d00Sryo 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2436d87a4d00Sryo 				if (!rxcd->eop)
2437d87a4d00Sryo 					vmxnet3_rxq_discard_chain(rxq);
2438d87a4d00Sryo 				goto nextp;
2439d87a4d00Sryo 			}
2440d87a4d00Sryo 
2441d87a4d00Sryo 			m_set_rcvif(m, ifp);
2442d87a4d00Sryo 			m->m_pkthdr.len = m->m_len = length;
2443d87a4d00Sryo 			m->m_pkthdr.csum_flags = 0;
2444d87a4d00Sryo 			m_head = m_tail = m;
2445d87a4d00Sryo 
2446d87a4d00Sryo 		} else {
2447d87a4d00Sryo 			/* non start of frame w/o body buffer */
2448d87a4d00Sryo 			KASSERT(rxd->btype == VMXNET3_BTYPE_BODY);
2449d87a4d00Sryo 			/* frame not started? */
2450d87a4d00Sryo 			KASSERT(m_head != NULL);
2451d87a4d00Sryo 
2452d87a4d00Sryo 			if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2453d87a4d00Sryo 				if_statinc(ifp, if_iqdrops);
2454d87a4d00Sryo 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2455d87a4d00Sryo 				if (!rxcd->eop)
2456d87a4d00Sryo 					vmxnet3_rxq_discard_chain(rxq);
2457d87a4d00Sryo 				m_freem(m_head);
2458d87a4d00Sryo 				m_head = m_tail = NULL;
2459d87a4d00Sryo 				goto nextp;
2460d87a4d00Sryo 			}
2461d87a4d00Sryo 
2462d87a4d00Sryo 			m->m_len = length;
2463d87a4d00Sryo 			m_head->m_pkthdr.len += length;
2464d87a4d00Sryo 			m_tail->m_next = m;
2465d87a4d00Sryo 			m_tail = m;
2466d87a4d00Sryo 		}
2467d87a4d00Sryo 
2468d87a4d00Sryo 		if (rxcd->eop) {
2469d87a4d00Sryo 			vmxnet3_rxq_input(rxq, rxcd, m_head);
2470d87a4d00Sryo 			m_head = m_tail = NULL;
2471d87a4d00Sryo 
2472d87a4d00Sryo 			/* Must recheck after dropping the Rx lock. */
2473db465cddSriastradh 			if (rxq->vxrxq_stopping)
2474d87a4d00Sryo 				break;
2475d87a4d00Sryo 		}
2476d87a4d00Sryo 
2477d87a4d00Sryo nextp:
2478d87a4d00Sryo 		if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2479d87a4d00Sryo 			int qid = rxcd->qid;
2480d87a4d00Sryo 			bus_size_t r;
2481d87a4d00Sryo 
2482d87a4d00Sryo 			idx = (idx + 1) % rxr->vxrxr_ndesc;
2483d87a4d00Sryo 			if (qid >= sc->vmx_nrxqueues) {
2484d87a4d00Sryo 				qid -= sc->vmx_nrxqueues;
2485d87a4d00Sryo 				r = VMXNET3_BAR0_RXH2(qid);
2486d87a4d00Sryo 			} else
2487d87a4d00Sryo 				r = VMXNET3_BAR0_RXH1(qid);
2488d87a4d00Sryo 			vmxnet3_write_bar0(sc, r, idx);
2489d87a4d00Sryo 		}
2490d87a4d00Sryo 	}
2491d87a4d00Sryo 
2492d87a4d00Sryo 	return more;
2493d87a4d00Sryo }
2494d87a4d00Sryo 
2495d87a4d00Sryo static inline void
vmxnet3_sched_handle_queue(struct vmxnet3_softc * sc,struct vmxnet3_queue * vmxq)2496d87a4d00Sryo vmxnet3_sched_handle_queue(struct vmxnet3_softc *sc, struct vmxnet3_queue *vmxq)
2497d87a4d00Sryo {
2498d87a4d00Sryo 
2499d87a4d00Sryo 	if (vmxq->vxq_workqueue) {
2500c3c564a7Sknakahara 		/*
2501c3c564a7Sknakahara 		 * When this function is called, "vmxq" is owned by one CPU.
2502c3c564a7Sknakahara 		 * so, atomic operation is not required here.
2503c3c564a7Sknakahara 		 */
2504c3c564a7Sknakahara 		if (!vmxq->vxq_wq_enqueued) {
2505c3c564a7Sknakahara 			vmxq->vxq_wq_enqueued = true;
2506c3c564a7Sknakahara 			workqueue_enqueue(sc->vmx_queue_wq,
2507c3c564a7Sknakahara 			    &vmxq->vxq_wq_cookie, curcpu());
2508c3c564a7Sknakahara 		}
2509d87a4d00Sryo 	} else {
2510d87a4d00Sryo 		softint_schedule(vmxq->vxq_si);
2511d87a4d00Sryo 	}
2512d87a4d00Sryo }
2513d87a4d00Sryo 
2514d87a4d00Sryo static int
vmxnet3_legacy_intr(void * xsc)2515d87a4d00Sryo vmxnet3_legacy_intr(void *xsc)
2516d87a4d00Sryo {
2517d87a4d00Sryo 	struct vmxnet3_softc *sc;
25185e274e5dSmsaitoh 	struct vmxnet3_queue *vmxq;
2519d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
25205e274e5dSmsaitoh 	struct vmxnet3_rxqueue *rxq;
2521d87a4d00Sryo 	u_int txlimit, rxlimit;
2522d87a4d00Sryo 	bool txmore, rxmore;
2523d87a4d00Sryo 
2524d87a4d00Sryo 	sc = xsc;
25255e274e5dSmsaitoh 	vmxq = &sc->vmx_queue[0];
25265e274e5dSmsaitoh 	txq = &vmxq->vxq_txqueue;
25275e274e5dSmsaitoh 	rxq = &vmxq->vxq_rxqueue;
2528d87a4d00Sryo 	txlimit = sc->vmx_tx_intr_process_limit;
2529d87a4d00Sryo 	rxlimit = sc->vmx_rx_intr_process_limit;
2530d87a4d00Sryo 
2531d87a4d00Sryo 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2532d87a4d00Sryo 		if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2533d87a4d00Sryo 			return (0);
2534d87a4d00Sryo 	}
2535d87a4d00Sryo 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2536d87a4d00Sryo 		vmxnet3_disable_all_intrs(sc);
2537d87a4d00Sryo 
2538d87a4d00Sryo 	if (sc->vmx_ds->event != 0)
2539d87a4d00Sryo 		vmxnet3_evintr(sc);
2540d87a4d00Sryo 
2541d87a4d00Sryo 	VMXNET3_TXQ_LOCK(txq);
2542d87a4d00Sryo 	txmore = vmxnet3_txq_eof(txq, txlimit);
2543d87a4d00Sryo 	VMXNET3_TXQ_UNLOCK(txq);
2544d87a4d00Sryo 
25457af49426Smsaitoh 	VMXNET3_RXQ_LOCK(rxq);
25467af49426Smsaitoh 	rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
25477af49426Smsaitoh 	VMXNET3_RXQ_UNLOCK(rxq);
25487af49426Smsaitoh 
25495e274e5dSmsaitoh 	if (txmore || rxmore)
25505e274e5dSmsaitoh 		vmxnet3_sched_handle_queue(sc, vmxq);
25515e274e5dSmsaitoh 	else {
2552d87a4d00Sryo 		if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2553d87a4d00Sryo 		vmxnet3_enable_all_intrs(sc);
2554d87a4d00Sryo 	}
25555e274e5dSmsaitoh 
2556d87a4d00Sryo 	return (1);
2557d87a4d00Sryo }
2558d87a4d00Sryo 
2559d87a4d00Sryo static int
vmxnet3_txrxq_intr(void * xvmxq)2560d87a4d00Sryo vmxnet3_txrxq_intr(void *xvmxq)
2561d87a4d00Sryo {
2562d87a4d00Sryo 	struct vmxnet3_softc *sc;
2563d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
2564d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
2565d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
2566d87a4d00Sryo 	u_int txlimit, rxlimit;
2567d87a4d00Sryo 	bool txmore, rxmore;
2568d87a4d00Sryo 
2569d87a4d00Sryo 	vmxq = xvmxq;
2570d87a4d00Sryo 	txq = &vmxq->vxq_txqueue;
2571d87a4d00Sryo 	rxq = &vmxq->vxq_rxqueue;
2572d87a4d00Sryo 	sc = txq->vxtxq_sc;
2573d87a4d00Sryo 	txlimit = sc->vmx_tx_intr_process_limit;
2574d87a4d00Sryo 	rxlimit = sc->vmx_rx_intr_process_limit;
2575d87a4d00Sryo 	vmxq->vxq_workqueue = sc->vmx_txrx_workqueue;
2576d87a4d00Sryo 
2577d87a4d00Sryo 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2578d87a4d00Sryo 		vmxnet3_disable_intr(sc, vmxq->vxq_intr_idx);
2579d87a4d00Sryo 
2580d87a4d00Sryo 	VMXNET3_TXQ_LOCK(txq);
2581d87a4d00Sryo 	txq->vxtxq_intr.ev_count++;
2582d87a4d00Sryo 	txmore = vmxnet3_txq_eof(txq, txlimit);
2583d87a4d00Sryo 	VMXNET3_TXQ_UNLOCK(txq);
2584d87a4d00Sryo 
2585d87a4d00Sryo 	VMXNET3_RXQ_LOCK(rxq);
2586d87a4d00Sryo 	rxq->vxrxq_intr.ev_count++;
2587d87a4d00Sryo 	rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2588d87a4d00Sryo 	VMXNET3_RXQ_UNLOCK(rxq);
2589d87a4d00Sryo 
25905e274e5dSmsaitoh 	if (txmore || rxmore)
2591d87a4d00Sryo 		vmxnet3_sched_handle_queue(sc, vmxq);
25925e274e5dSmsaitoh 	else {
2593d87a4d00Sryo 		/* for ALTQ */
2594d87a4d00Sryo 		if (vmxq->vxq_id == 0)
2595d87a4d00Sryo 			if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2596d87a4d00Sryo 		softint_schedule(txq->vxtxq_si);
2597d87a4d00Sryo 
2598d87a4d00Sryo 		vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
2599d87a4d00Sryo 	}
2600d87a4d00Sryo 
2601d87a4d00Sryo 	return (1);
2602d87a4d00Sryo }
2603d87a4d00Sryo 
2604d87a4d00Sryo static void
vmxnet3_handle_queue(void * xvmxq)2605d87a4d00Sryo vmxnet3_handle_queue(void *xvmxq)
2606d87a4d00Sryo {
2607d87a4d00Sryo 	struct vmxnet3_softc *sc;
2608d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
2609d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
2610d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
2611d87a4d00Sryo 	u_int txlimit, rxlimit;
2612d87a4d00Sryo 	bool txmore, rxmore;
2613d87a4d00Sryo 
2614d87a4d00Sryo 	vmxq = xvmxq;
2615d87a4d00Sryo 	txq = &vmxq->vxq_txqueue;
2616d87a4d00Sryo 	rxq = &vmxq->vxq_rxqueue;
2617d87a4d00Sryo 	sc = txq->vxtxq_sc;
2618d87a4d00Sryo 	txlimit = sc->vmx_tx_process_limit;
2619d87a4d00Sryo 	rxlimit = sc->vmx_rx_process_limit;
2620d87a4d00Sryo 
2621d87a4d00Sryo 	VMXNET3_TXQ_LOCK(txq);
2622d87a4d00Sryo 	txq->vxtxq_defer.ev_count++;
2623d87a4d00Sryo 	txmore = vmxnet3_txq_eof(txq, txlimit);
2624d87a4d00Sryo 	if (txmore)
2625d87a4d00Sryo 		txq->vxtxq_deferreq.ev_count++;
2626d87a4d00Sryo 	/* for ALTQ */
2627d87a4d00Sryo 	if (vmxq->vxq_id == 0)
2628d87a4d00Sryo 		if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2629d87a4d00Sryo 	softint_schedule(txq->vxtxq_si);
2630d87a4d00Sryo 	VMXNET3_TXQ_UNLOCK(txq);
2631d87a4d00Sryo 
2632d87a4d00Sryo 	VMXNET3_RXQ_LOCK(rxq);
2633d87a4d00Sryo 	rxq->vxrxq_defer.ev_count++;
2634d87a4d00Sryo 	rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2635d87a4d00Sryo 	if (rxmore)
2636d87a4d00Sryo 		rxq->vxrxq_deferreq.ev_count++;
2637d87a4d00Sryo 	VMXNET3_RXQ_UNLOCK(rxq);
2638d87a4d00Sryo 
2639d87a4d00Sryo 	if (txmore || rxmore)
2640d87a4d00Sryo 		vmxnet3_sched_handle_queue(sc, vmxq);
2641d87a4d00Sryo 	else
2642d87a4d00Sryo 		vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
2643d87a4d00Sryo }
2644d87a4d00Sryo 
2645d87a4d00Sryo static void
vmxnet3_handle_queue_work(struct work * wk,void * context)2646d87a4d00Sryo vmxnet3_handle_queue_work(struct work *wk, void *context)
2647d87a4d00Sryo {
2648d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
2649d87a4d00Sryo 
2650d87a4d00Sryo 	vmxq = container_of(wk, struct vmxnet3_queue, vxq_wq_cookie);
2651c3c564a7Sknakahara 	vmxq->vxq_wq_enqueued = false;
2652d87a4d00Sryo 	vmxnet3_handle_queue(vmxq);
2653d87a4d00Sryo }
2654d87a4d00Sryo 
2655d87a4d00Sryo static int
vmxnet3_event_intr(void * xsc)2656d87a4d00Sryo vmxnet3_event_intr(void *xsc)
2657d87a4d00Sryo {
2658d87a4d00Sryo 	struct vmxnet3_softc *sc;
2659d87a4d00Sryo 
2660d87a4d00Sryo 	sc = xsc;
2661d87a4d00Sryo 
2662d87a4d00Sryo 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2663d87a4d00Sryo 		vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2664d87a4d00Sryo 
2665d87a4d00Sryo 	sc->vmx_event_intr.ev_count++;
2666d87a4d00Sryo 
2667d87a4d00Sryo 	if (sc->vmx_ds->event != 0)
2668d87a4d00Sryo 		vmxnet3_evintr(sc);
2669d87a4d00Sryo 
2670d87a4d00Sryo 	vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2671d87a4d00Sryo 
2672d87a4d00Sryo 	return (1);
2673d87a4d00Sryo }
2674d87a4d00Sryo 
2675d87a4d00Sryo static void
vmxnet3_txstop(struct vmxnet3_softc * sc,struct vmxnet3_txqueue * txq)2676d87a4d00Sryo vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2677d87a4d00Sryo {
2678d87a4d00Sryo 	struct vmxnet3_txring *txr;
2679d87a4d00Sryo 	struct vmxnet3_txbuf *txb;
268035492473Sryo 	u_int i;
2681d87a4d00Sryo 
2682d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
2683d87a4d00Sryo 
2684d87a4d00Sryo 	for (i = 0; i < txr->vxtxr_ndesc; i++) {
2685d87a4d00Sryo 		txb = &txr->vxtxr_txbuf[i];
2686d87a4d00Sryo 
2687d87a4d00Sryo 		if (txb->vtxb_m == NULL)
2688d87a4d00Sryo 			continue;
2689d87a4d00Sryo 
2690d87a4d00Sryo 		bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap,
2691d87a4d00Sryo 		    0, txb->vtxb_dmamap->dm_mapsize,
2692d87a4d00Sryo 		    BUS_DMASYNC_POSTWRITE);
2693d87a4d00Sryo 		bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap);
2694d87a4d00Sryo 		m_freem(txb->vtxb_m);
2695d87a4d00Sryo 		txb->vtxb_m = NULL;
2696d87a4d00Sryo 	}
2697d87a4d00Sryo }
2698d87a4d00Sryo 
2699d87a4d00Sryo static void
vmxnet3_rxstop(struct vmxnet3_softc * sc,struct vmxnet3_rxqueue * rxq)2700d87a4d00Sryo vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2701d87a4d00Sryo {
2702d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
2703d87a4d00Sryo 	struct vmxnet3_rxbuf *rxb;
270435492473Sryo 	u_int i, j;
2705d87a4d00Sryo 
2706d87a4d00Sryo 	if (rxq->vxrxq_mhead != NULL) {
2707d87a4d00Sryo 		m_freem(rxq->vxrxq_mhead);
2708d87a4d00Sryo 		rxq->vxrxq_mhead = NULL;
2709d87a4d00Sryo 		rxq->vxrxq_mtail = NULL;
2710d87a4d00Sryo 	}
2711d87a4d00Sryo 
2712d87a4d00Sryo 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2713d87a4d00Sryo 		rxr = &rxq->vxrxq_cmd_ring[i];
2714d87a4d00Sryo 
2715d87a4d00Sryo 		for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2716d87a4d00Sryo 			rxb = &rxr->vxrxr_rxbuf[j];
2717d87a4d00Sryo 
2718d87a4d00Sryo 			if (rxb->vrxb_m == NULL)
2719d87a4d00Sryo 				continue;
2720d87a4d00Sryo 
2721d87a4d00Sryo 			bus_dmamap_sync(sc->vmx_dmat, rxb->vrxb_dmamap,
2722d87a4d00Sryo 			    0, rxb->vrxb_dmamap->dm_mapsize,
2723d87a4d00Sryo 			    BUS_DMASYNC_POSTREAD);
2724d87a4d00Sryo 			bus_dmamap_unload(sc->vmx_dmat, rxb->vrxb_dmamap);
2725d87a4d00Sryo 			m_freem(rxb->vrxb_m);
2726d87a4d00Sryo 			rxb->vrxb_m = NULL;
2727d87a4d00Sryo 		}
2728d87a4d00Sryo 	}
2729d87a4d00Sryo }
2730d87a4d00Sryo 
2731d87a4d00Sryo static void
vmxnet3_stop_rendezvous(struct vmxnet3_softc * sc)2732d87a4d00Sryo vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2733d87a4d00Sryo {
2734d87a4d00Sryo 	struct vmxnet3_rxqueue *rxq;
2735d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
2736be531ae4Sknakahara 	struct vmxnet3_queue *vmxq;
2737d87a4d00Sryo 	int i;
2738d87a4d00Sryo 
2739d87a4d00Sryo 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
2740d87a4d00Sryo 		rxq = &sc->vmx_queue[i].vxq_rxqueue;
2741d87a4d00Sryo 		VMXNET3_RXQ_LOCK(rxq);
2742db465cddSriastradh 		rxq->vxrxq_stopping = true;
2743d87a4d00Sryo 		VMXNET3_RXQ_UNLOCK(rxq);
2744d87a4d00Sryo 	}
2745d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
2746d87a4d00Sryo 		txq = &sc->vmx_queue[i].vxq_txqueue;
2747d87a4d00Sryo 		VMXNET3_TXQ_LOCK(txq);
2748db465cddSriastradh 		txq->vxtxq_stopping = true;
2749d87a4d00Sryo 		VMXNET3_TXQ_UNLOCK(txq);
2750d87a4d00Sryo 	}
2751be531ae4Sknakahara 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
2752be531ae4Sknakahara 		vmxq = &sc->vmx_queue[i];
2753be531ae4Sknakahara 		workqueue_wait(sc->vmx_queue_wq, &vmxq->vxq_wq_cookie);
2754be531ae4Sknakahara 	}
2755d87a4d00Sryo }
2756d87a4d00Sryo 
2757d87a4d00Sryo static void
vmxnet3_stop_locked(struct vmxnet3_softc * sc)2758d87a4d00Sryo vmxnet3_stop_locked(struct vmxnet3_softc *sc)
2759d87a4d00Sryo {
2760db465cddSriastradh 	struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
2761d87a4d00Sryo 	int q;
2762d87a4d00Sryo 
2763d87a4d00Sryo 	VMXNET3_CORE_LOCK_ASSERT(sc);
2764db465cddSriastradh 	KASSERT(IFNET_LOCKED(ifp));
2765db465cddSriastradh 
2766db465cddSriastradh 	vmxnet3_stop_rendezvous(sc);
2767db465cddSriastradh 
2768db465cddSriastradh 	sc->vmx_mcastactive = false;
2769db465cddSriastradh 	sc->vmx_link_active = 0;
2770db465cddSriastradh 	callout_halt(&sc->vmx_tick, sc->vmx_mtx);
2771d87a4d00Sryo 
2772d87a4d00Sryo 	ifp->if_flags &= ~IFF_RUNNING;
2773d87a4d00Sryo 
2774d87a4d00Sryo 	/* Disable interrupts. */
2775d87a4d00Sryo 	vmxnet3_disable_all_intrs(sc);
2776d87a4d00Sryo 	vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2777d87a4d00Sryo 
2778d87a4d00Sryo 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2779d87a4d00Sryo 		vmxnet3_txstop(sc, &sc->vmx_queue[q].vxq_txqueue);
2780d87a4d00Sryo 	for (q = 0; q < sc->vmx_nrxqueues; q++)
2781d87a4d00Sryo 		vmxnet3_rxstop(sc, &sc->vmx_queue[q].vxq_rxqueue);
2782d87a4d00Sryo 
2783d87a4d00Sryo 	vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2784d87a4d00Sryo }
2785d87a4d00Sryo 
2786d87a4d00Sryo static void
vmxnet3_stop(struct ifnet * ifp,int disable)2787d87a4d00Sryo vmxnet3_stop(struct ifnet *ifp, int disable)
2788d87a4d00Sryo {
2789d87a4d00Sryo 	struct vmxnet3_softc *sc = ifp->if_softc;
2790d87a4d00Sryo 
2791db465cddSriastradh 	KASSERT(IFNET_LOCKED(ifp));
2792db465cddSriastradh 
2793d87a4d00Sryo 	VMXNET3_CORE_LOCK(sc);
2794d87a4d00Sryo 	vmxnet3_stop_locked(sc);
2795d87a4d00Sryo 	VMXNET3_CORE_UNLOCK(sc);
2796d87a4d00Sryo }
2797d87a4d00Sryo 
2798d87a4d00Sryo static void
vmxnet3_txinit(struct vmxnet3_softc * sc,struct vmxnet3_txqueue * txq)2799d87a4d00Sryo vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2800d87a4d00Sryo {
2801d87a4d00Sryo 	struct vmxnet3_txring *txr;
2802d87a4d00Sryo 	struct vmxnet3_comp_ring *txc;
2803d87a4d00Sryo 
2804d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
2805d87a4d00Sryo 	txr->vxtxr_head = 0;
2806d87a4d00Sryo 	txr->vxtxr_next = 0;
2807d87a4d00Sryo 	txr->vxtxr_gen = VMXNET3_INIT_GEN;
2808d87a4d00Sryo 	memset(txr->vxtxr_txd, 0,
2809d87a4d00Sryo 	    txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2810d87a4d00Sryo 
2811d87a4d00Sryo 	txc = &txq->vxtxq_comp_ring;
2812d87a4d00Sryo 	txc->vxcr_next = 0;
2813d87a4d00Sryo 	txc->vxcr_gen = VMXNET3_INIT_GEN;
2814d87a4d00Sryo 	memset(txc->vxcr_u.txcd, 0,
2815d87a4d00Sryo 	    txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2816d87a4d00Sryo }
2817d87a4d00Sryo 
2818d87a4d00Sryo static int
vmxnet3_rxinit(struct vmxnet3_softc * sc,struct vmxnet3_rxqueue * rxq)2819d87a4d00Sryo vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2820d87a4d00Sryo {
2821d87a4d00Sryo 	struct vmxnet3_rxring *rxr;
2822d87a4d00Sryo 	struct vmxnet3_comp_ring *rxc;
282335492473Sryo 	u_int i, populate, idx;
282435492473Sryo 	int error;
2825d87a4d00Sryo 
2826d87a4d00Sryo 	/* LRO and jumbo frame is not supported yet */
2827d87a4d00Sryo 	populate = 1;
2828d87a4d00Sryo 
2829d87a4d00Sryo 	for (i = 0; i < populate; i++) {
2830d87a4d00Sryo 		rxr = &rxq->vxrxq_cmd_ring[i];
2831d87a4d00Sryo 		rxr->vxrxr_fill = 0;
2832d87a4d00Sryo 		rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2833d87a4d00Sryo 		memset(rxr->vxrxr_rxd, 0,
2834d87a4d00Sryo 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2835d87a4d00Sryo 
2836d87a4d00Sryo 		for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2837d87a4d00Sryo 			error = vmxnet3_newbuf(sc, rxq, rxr);
2838d87a4d00Sryo 			if (error)
2839d87a4d00Sryo 				return (error);
2840d87a4d00Sryo 		}
2841d87a4d00Sryo 	}
2842d87a4d00Sryo 
2843d87a4d00Sryo 	for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2844d87a4d00Sryo 		rxr = &rxq->vxrxq_cmd_ring[i];
2845d87a4d00Sryo 		rxr->vxrxr_fill = 0;
2846d87a4d00Sryo 		rxr->vxrxr_gen = 0;
2847d87a4d00Sryo 		memset(rxr->vxrxr_rxd, 0,
2848d87a4d00Sryo 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2849d87a4d00Sryo 	}
2850d87a4d00Sryo 
2851d87a4d00Sryo 	rxc = &rxq->vxrxq_comp_ring;
2852d87a4d00Sryo 	rxc->vxcr_next = 0;
2853d87a4d00Sryo 	rxc->vxcr_gen = VMXNET3_INIT_GEN;
2854d87a4d00Sryo 	memset(rxc->vxcr_u.rxcd, 0,
2855d87a4d00Sryo 	    rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2856d87a4d00Sryo 
2857d87a4d00Sryo 	return (0);
2858d87a4d00Sryo }
2859d87a4d00Sryo 
2860d87a4d00Sryo static int
vmxnet3_reinit_queues(struct vmxnet3_softc * sc)2861d87a4d00Sryo vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2862d87a4d00Sryo {
2863d87a4d00Sryo 	device_t dev;
2864d87a4d00Sryo 	int q, error;
2865d87a4d00Sryo 	dev = sc->vmx_dev;
2866d87a4d00Sryo 
2867d87a4d00Sryo 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2868d87a4d00Sryo 		vmxnet3_txinit(sc, &sc->vmx_queue[q].vxq_txqueue);
2869d87a4d00Sryo 
2870d87a4d00Sryo 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2871d87a4d00Sryo 		error = vmxnet3_rxinit(sc, &sc->vmx_queue[q].vxq_rxqueue);
2872d87a4d00Sryo 		if (error) {
2873d87a4d00Sryo 			device_printf(dev, "cannot populate Rx queue %d\n", q);
2874d87a4d00Sryo 			return (error);
2875d87a4d00Sryo 		}
2876d87a4d00Sryo 	}
2877d87a4d00Sryo 
2878d87a4d00Sryo 	return (0);
2879d87a4d00Sryo }
2880d87a4d00Sryo 
2881d87a4d00Sryo static int
vmxnet3_enable_device(struct vmxnet3_softc * sc)2882d87a4d00Sryo vmxnet3_enable_device(struct vmxnet3_softc *sc)
2883d87a4d00Sryo {
2884d87a4d00Sryo 	int q;
2885d87a4d00Sryo 
2886d87a4d00Sryo 	if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2887d87a4d00Sryo 		device_printf(sc->vmx_dev, "device enable command failed!\n");
2888d87a4d00Sryo 		return (1);
2889d87a4d00Sryo 	}
2890d87a4d00Sryo 
2891d87a4d00Sryo 	/* Reset the Rx queue heads. */
2892d87a4d00Sryo 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2893d87a4d00Sryo 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2894d87a4d00Sryo 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2895d87a4d00Sryo 	}
2896d87a4d00Sryo 
2897d87a4d00Sryo 	return (0);
2898d87a4d00Sryo }
2899d87a4d00Sryo 
2900d87a4d00Sryo static void
vmxnet3_reinit_rxfilters(struct vmxnet3_softc * sc)2901d87a4d00Sryo vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2902d87a4d00Sryo {
2903d87a4d00Sryo 
2904d87a4d00Sryo 	vmxnet3_set_rxfilter(sc);
2905d87a4d00Sryo 
2906d87a4d00Sryo 	memset(sc->vmx_ds->vlan_filter, 0, sizeof(sc->vmx_ds->vlan_filter));
2907d87a4d00Sryo 	vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2908d87a4d00Sryo }
2909d87a4d00Sryo 
2910d87a4d00Sryo static int
vmxnet3_reinit(struct vmxnet3_softc * sc)2911d87a4d00Sryo vmxnet3_reinit(struct vmxnet3_softc *sc)
2912d87a4d00Sryo {
2913d87a4d00Sryo 
2914db465cddSriastradh 	VMXNET3_CORE_LOCK_ASSERT(sc);
2915db465cddSriastradh 
2916d87a4d00Sryo 	vmxnet3_set_lladdr(sc);
2917d87a4d00Sryo 	vmxnet3_reinit_shared_data(sc);
2918d87a4d00Sryo 
2919d87a4d00Sryo 	if (vmxnet3_reinit_queues(sc) != 0)
2920d87a4d00Sryo 		return (ENXIO);
2921d87a4d00Sryo 
2922d87a4d00Sryo 	if (vmxnet3_enable_device(sc) != 0)
2923d87a4d00Sryo 		return (ENXIO);
2924d87a4d00Sryo 
2925d87a4d00Sryo 	vmxnet3_reinit_rxfilters(sc);
2926d87a4d00Sryo 
2927d87a4d00Sryo 	return (0);
2928d87a4d00Sryo }
2929d87a4d00Sryo 
2930d87a4d00Sryo static int
vmxnet3_init_locked(struct vmxnet3_softc * sc)2931d87a4d00Sryo vmxnet3_init_locked(struct vmxnet3_softc *sc)
2932d87a4d00Sryo {
2933d87a4d00Sryo 	struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
2934db465cddSriastradh 	int q;
2935d87a4d00Sryo 	int error;
2936d87a4d00Sryo 
2937db465cddSriastradh 	KASSERT(IFNET_LOCKED(ifp));
2938db465cddSriastradh 	VMXNET3_CORE_LOCK_ASSERT(sc);
2939db465cddSriastradh 
2940d87a4d00Sryo 	vmxnet3_stop_locked(sc);
2941d87a4d00Sryo 
2942d87a4d00Sryo 	error = vmxnet3_reinit(sc);
2943d87a4d00Sryo 	if (error) {
2944d87a4d00Sryo 		vmxnet3_stop_locked(sc);
2945d87a4d00Sryo 		return (error);
2946d87a4d00Sryo 	}
2947d87a4d00Sryo 
2948d87a4d00Sryo 	ifp->if_flags |= IFF_RUNNING;
294945090a6bSryo 	vmxnet3_if_link_status(sc);
2950db465cddSriastradh 	sc->vmx_mcastactive = true;
2951d87a4d00Sryo 
2952d87a4d00Sryo 	vmxnet3_enable_all_intrs(sc);
2953d87a4d00Sryo 	callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2954d87a4d00Sryo 
2955db465cddSriastradh 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
2956db465cddSriastradh 		VMXNET3_TXQ_LOCK(&sc->vmx_queue[q].vxq_txqueue);
2957db465cddSriastradh 		sc->vmx_queue[q].vxq_txqueue.vxtxq_stopping = false;
2958db465cddSriastradh 		VMXNET3_TXQ_UNLOCK(&sc->vmx_queue[q].vxq_txqueue);
2959db465cddSriastradh 	}
2960db465cddSriastradh 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2961db465cddSriastradh 		VMXNET3_RXQ_LOCK(&sc->vmx_queue[q].vxq_rxqueue);
2962db465cddSriastradh 		sc->vmx_queue[q].vxq_rxqueue.vxrxq_stopping = false;
2963db465cddSriastradh 		VMXNET3_RXQ_UNLOCK(&sc->vmx_queue[q].vxq_rxqueue);
2964db465cddSriastradh 	}
2965db465cddSriastradh 
2966d87a4d00Sryo 	return (0);
2967d87a4d00Sryo }
2968d87a4d00Sryo 
2969d87a4d00Sryo static int
vmxnet3_init(struct ifnet * ifp)2970d87a4d00Sryo vmxnet3_init(struct ifnet *ifp)
2971d87a4d00Sryo {
2972d87a4d00Sryo 	struct vmxnet3_softc *sc = ifp->if_softc;
2973d87a4d00Sryo 	int error;
2974d87a4d00Sryo 
2975db465cddSriastradh 	KASSERT(IFNET_LOCKED(ifp));
2976db465cddSriastradh 
2977d87a4d00Sryo 	VMXNET3_CORE_LOCK(sc);
2978d87a4d00Sryo 	error = vmxnet3_init_locked(sc);
2979d87a4d00Sryo 	VMXNET3_CORE_UNLOCK(sc);
2980d87a4d00Sryo 
2981d87a4d00Sryo 	return (error);
2982d87a4d00Sryo }
2983d87a4d00Sryo 
2984d87a4d00Sryo static int
vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue * txq,struct mbuf * m,int * start,int * csum_start)2985d87a4d00Sryo vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2986d87a4d00Sryo     int *start, int *csum_start)
2987d87a4d00Sryo {
2988d87a4d00Sryo 	struct ether_header *eh;
2989d87a4d00Sryo 	struct mbuf *mp;
2990d87a4d00Sryo 	int offset, csum_off, iphl, offp;
2991d87a4d00Sryo 	bool v4;
2992d87a4d00Sryo 
2993d87a4d00Sryo 	eh = mtod(m, struct ether_header *);
2994d87a4d00Sryo 	switch (htons(eh->ether_type)) {
2995d87a4d00Sryo 	case ETHERTYPE_IP:
2996d87a4d00Sryo 	case ETHERTYPE_IPV6:
2997d87a4d00Sryo 		offset = ETHER_HDR_LEN;
2998d87a4d00Sryo 		break;
2999d87a4d00Sryo 	case ETHERTYPE_VLAN:
3000d87a4d00Sryo 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3001d87a4d00Sryo 		break;
3002d87a4d00Sryo 	default:
3003d87a4d00Sryo 		m_freem(m);
3004d87a4d00Sryo 		return (EINVAL);
3005d87a4d00Sryo 	}
3006d87a4d00Sryo 
3007d87a4d00Sryo 	if ((m->m_pkthdr.csum_flags &
3008d87a4d00Sryo 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
3009d87a4d00Sryo 		iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
3010d87a4d00Sryo 		v4 = true;
3011d87a4d00Sryo 	} else {
3012d87a4d00Sryo 		iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
3013d87a4d00Sryo 		v4 = false;
3014d87a4d00Sryo 	}
3015d87a4d00Sryo 	*start = offset + iphl;
3016d87a4d00Sryo 
3017d87a4d00Sryo 	if (m->m_pkthdr.csum_flags &
3018d87a4d00Sryo 	    (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
3019d87a4d00Sryo 		csum_off = offsetof(struct tcphdr, th_sum);
3020d87a4d00Sryo 	} else {
3021d87a4d00Sryo 		csum_off = offsetof(struct udphdr, uh_sum);
3022d87a4d00Sryo 	}
3023d87a4d00Sryo 
3024d87a4d00Sryo 	*csum_start = *start + csum_off;
3025d87a4d00Sryo 	mp = m_pulldown(m, 0, *csum_start + 2, &offp);
3026d87a4d00Sryo 	if (!mp) {
3027d87a4d00Sryo 		/* m is already freed */
3028d87a4d00Sryo 		return ENOBUFS;
3029d87a4d00Sryo 	}
3030d87a4d00Sryo 
3031d87a4d00Sryo 	if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
3032d87a4d00Sryo 		struct tcphdr *tcp;
3033d87a4d00Sryo 
3034d87a4d00Sryo 		txq->vxtxq_stats.vmtxs_tso++;
3035d87a4d00Sryo 		tcp = (void *)(mtod(mp, char *) + offp + *start);
3036d87a4d00Sryo 
3037d87a4d00Sryo 		if (v4) {
3038d87a4d00Sryo 			struct ip *ip;
3039d87a4d00Sryo 
3040d87a4d00Sryo 			ip = (void *)(mtod(mp, char *) + offp + offset);
3041d87a4d00Sryo 			tcp->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3042d87a4d00Sryo 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3043d87a4d00Sryo 		} else {
3044d87a4d00Sryo 			struct ip6_hdr *ip6;
3045d87a4d00Sryo 
3046d87a4d00Sryo 			ip6 = (void *)(mtod(mp, char *) + offp + offset);
3047d87a4d00Sryo 			tcp->th_sum = in6_cksum_phdr(&ip6->ip6_src,
3048d87a4d00Sryo 			    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
3049d87a4d00Sryo 		}
3050d87a4d00Sryo 
3051d87a4d00Sryo 		/*
3052d87a4d00Sryo 		 * For TSO, the size of the protocol header is also
3053d87a4d00Sryo 		 * included in the descriptor header size.
3054d87a4d00Sryo 		 */
3055d87a4d00Sryo 		*start += (tcp->th_off << 2);
3056d87a4d00Sryo 	} else
3057d87a4d00Sryo 		txq->vxtxq_stats.vmtxs_csum++;
3058d87a4d00Sryo 
3059d87a4d00Sryo 	return (0);
3060d87a4d00Sryo }
3061d87a4d00Sryo 
3062d87a4d00Sryo static int
vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue * txq,struct mbuf ** m0,bus_dmamap_t dmap)3063d87a4d00Sryo vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
3064d87a4d00Sryo     bus_dmamap_t dmap)
3065d87a4d00Sryo {
3066d87a4d00Sryo 	struct mbuf *m;
3067d87a4d00Sryo 	bus_dma_tag_t tag;
3068d87a4d00Sryo 	int error;
3069d87a4d00Sryo 
3070d87a4d00Sryo 	m = *m0;
3071d87a4d00Sryo 	tag = txq->vxtxq_sc->vmx_dmat;
3072d87a4d00Sryo 
3073d87a4d00Sryo 	error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3074d87a4d00Sryo 	if (error == 0 || error != EFBIG)
3075d87a4d00Sryo 		return (error);
3076d87a4d00Sryo 
3077d87a4d00Sryo 	m = m_defrag(m, M_NOWAIT);
3078d87a4d00Sryo 	if (m != NULL) {
3079d87a4d00Sryo 		*m0 = m;
3080d87a4d00Sryo 		error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3081d87a4d00Sryo 	} else
3082d87a4d00Sryo 		error = ENOBUFS;
3083d87a4d00Sryo 
3084d87a4d00Sryo 	if (error) {
3085d87a4d00Sryo 		m_freem(*m0);
3086d87a4d00Sryo 		*m0 = NULL;
3087d87a4d00Sryo 		txq->vxtxq_defrag_failed.ev_count++;
3088d87a4d00Sryo 	} else
3089d87a4d00Sryo 		txq->vxtxq_defragged.ev_count++;
3090d87a4d00Sryo 
3091d87a4d00Sryo 	return (error);
3092d87a4d00Sryo }
3093d87a4d00Sryo 
3094d87a4d00Sryo static void
vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue * txq,bus_dmamap_t dmap)3095d87a4d00Sryo vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
3096d87a4d00Sryo {
3097d87a4d00Sryo 
3098d87a4d00Sryo 	bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap);
3099d87a4d00Sryo }
3100d87a4d00Sryo 
3101d87a4d00Sryo static int
vmxnet3_txq_encap(struct vmxnet3_txqueue * txq,struct mbuf ** m0)3102d87a4d00Sryo vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
3103d87a4d00Sryo {
3104d87a4d00Sryo 	struct vmxnet3_softc *sc;
3105d87a4d00Sryo 	struct vmxnet3_txring *txr;
3106d87a4d00Sryo 	struct vmxnet3_txdesc *txd, *sop;
3107d87a4d00Sryo 	struct mbuf *m;
3108d87a4d00Sryo 	bus_dmamap_t dmap;
3109d87a4d00Sryo 	bus_dma_segment_t *segs;
3110d87a4d00Sryo 	int i, gen, start, csum_start, nsegs, error;
3111d87a4d00Sryo 
3112d87a4d00Sryo 	sc = txq->vxtxq_sc;
3113d87a4d00Sryo 	start = 0;
3114d87a4d00Sryo 	txd = NULL;
3115d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
3116d87a4d00Sryo 	dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
3117d87a4d00Sryo 	csum_start = 0; /* GCC */
3118d87a4d00Sryo 
3119d87a4d00Sryo 	error = vmxnet3_txq_load_mbuf(txq, m0, dmap);
3120d87a4d00Sryo 	if (error)
3121d87a4d00Sryo 		return (error);
3122d87a4d00Sryo 
3123d87a4d00Sryo 	nsegs = dmap->dm_nsegs;
3124d87a4d00Sryo 	segs = dmap->dm_segs;
3125d87a4d00Sryo 
3126d87a4d00Sryo 	m = *m0;
3127d87a4d00Sryo 	KASSERT(m->m_flags & M_PKTHDR);
3128d87a4d00Sryo 	KASSERT(nsegs <= VMXNET3_TX_MAXSEGS);
3129d87a4d00Sryo 
3130d87a4d00Sryo 	if (vmxnet3_txring_avail(txr) < nsegs) {
3131d87a4d00Sryo 		txq->vxtxq_stats.vmtxs_full++;
3132d87a4d00Sryo 		vmxnet3_txq_unload_mbuf(txq, dmap);
3133d87a4d00Sryo 		return (ENOSPC);
3134d87a4d00Sryo 	} else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
3135d87a4d00Sryo 		error = vmxnet3_txq_offload_ctx(txq, m, &start, &csum_start);
3136d87a4d00Sryo 		if (error) {
3137d87a4d00Sryo 			/* m is already freed */
3138d87a4d00Sryo 			txq->vxtxq_stats.vmtxs_offload_failed++;
3139d87a4d00Sryo 			vmxnet3_txq_unload_mbuf(txq, dmap);
3140d87a4d00Sryo 			*m0 = NULL;
3141d87a4d00Sryo 			return (error);
3142d87a4d00Sryo 		}
3143d87a4d00Sryo 	}
3144d87a4d00Sryo 
3145d87a4d00Sryo 	txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
3146d87a4d00Sryo 	sop = &txr->vxtxr_txd[txr->vxtxr_head];
3147d87a4d00Sryo 	gen = txr->vxtxr_gen ^ 1;	/* Owned by cpu (yet) */
3148d87a4d00Sryo 
3149d87a4d00Sryo 	for (i = 0; i < nsegs; i++) {
3150d87a4d00Sryo 		txd = &txr->vxtxr_txd[txr->vxtxr_head];
3151d87a4d00Sryo 
3152d87a4d00Sryo 		txd->addr = segs[i].ds_addr;
3153d87a4d00Sryo 		txd->len = segs[i].ds_len;
3154d87a4d00Sryo 		txd->gen = gen;
3155d87a4d00Sryo 		txd->dtype = 0;
3156d87a4d00Sryo 		txd->offload_mode = VMXNET3_OM_NONE;
3157d87a4d00Sryo 		txd->offload_pos = 0;
3158d87a4d00Sryo 		txd->hlen = 0;
3159d87a4d00Sryo 		txd->eop = 0;
3160d87a4d00Sryo 		txd->compreq = 0;
3161d87a4d00Sryo 		txd->vtag_mode = 0;
3162d87a4d00Sryo 		txd->vtag = 0;
3163d87a4d00Sryo 
3164d87a4d00Sryo 		if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
3165d87a4d00Sryo 			txr->vxtxr_head = 0;
3166d87a4d00Sryo 			txr->vxtxr_gen ^= 1;
3167d87a4d00Sryo 		}
3168d87a4d00Sryo 		gen = txr->vxtxr_gen;
3169d87a4d00Sryo 	}
3170d87a4d00Sryo 	txd->eop = 1;
3171d87a4d00Sryo 	txd->compreq = 1;
3172d87a4d00Sryo 
3173d87a4d00Sryo 	if (vlan_has_tag(m)) {
3174d87a4d00Sryo 		sop->vtag_mode = 1;
3175d87a4d00Sryo 		sop->vtag = vlan_get_tag(m);
3176d87a4d00Sryo 	}
3177d87a4d00Sryo 
3178d87a4d00Sryo 	if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
3179d87a4d00Sryo 		sop->offload_mode = VMXNET3_OM_TSO;
3180d87a4d00Sryo 		sop->hlen = start;
3181d87a4d00Sryo 		sop->offload_pos = m->m_pkthdr.segsz;
3182d87a4d00Sryo 	} else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
3183d87a4d00Sryo 	    VMXNET3_CSUM_OFFLOAD_IPV6)) {
3184d87a4d00Sryo 		sop->offload_mode = VMXNET3_OM_CSUM;
3185d87a4d00Sryo 		sop->hlen = start;
3186d87a4d00Sryo 		sop->offload_pos = csum_start;
3187d87a4d00Sryo 	}
3188d87a4d00Sryo 
3189d87a4d00Sryo 	/* Finally, change the ownership. */
3190d87a4d00Sryo 	vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
3191d87a4d00Sryo 	sop->gen ^= 1;
3192d87a4d00Sryo 
3193d87a4d00Sryo 	txq->vxtxq_ts->npending += nsegs;
3194d87a4d00Sryo 	if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
3195d87a4d00Sryo 		struct vmxnet3_queue *vmxq;
3196d87a4d00Sryo 		vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue);
3197d87a4d00Sryo 		txq->vxtxq_ts->npending = 0;
3198d87a4d00Sryo 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(vmxq->vxq_id),
3199d87a4d00Sryo 		    txr->vxtxr_head);
3200d87a4d00Sryo 	}
3201d87a4d00Sryo 
3202d87a4d00Sryo 	return (0);
3203d87a4d00Sryo }
3204d87a4d00Sryo 
3205d87a4d00Sryo #define VMXNET3_TX_START 1
3206d87a4d00Sryo #define VMXNET3_TX_TRANSMIT 2
3207d87a4d00Sryo static inline void
vmxnet3_tx_common_locked(struct ifnet * ifp,struct vmxnet3_txqueue * txq,int txtype)3208d87a4d00Sryo vmxnet3_tx_common_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq, int txtype)
3209d87a4d00Sryo {
3210d87a4d00Sryo 	struct vmxnet3_softc *sc;
3211d87a4d00Sryo 	struct vmxnet3_txring *txr;
3212d87a4d00Sryo 	struct mbuf *m_head;
3213d87a4d00Sryo 	int tx;
3214d87a4d00Sryo 
3215d87a4d00Sryo 	sc = ifp->if_softc;
3216d87a4d00Sryo 	txr = &txq->vxtxq_cmd_ring;
3217d87a4d00Sryo 	tx = 0;
3218d87a4d00Sryo 
3219d87a4d00Sryo 	VMXNET3_TXQ_LOCK_ASSERT(txq);
3220d87a4d00Sryo 
3221db465cddSriastradh 	if (txq->vxtxq_stopping || sc->vmx_link_active == 0)
3222d87a4d00Sryo 		return;
3223d87a4d00Sryo 
3224d87a4d00Sryo 	for (;;) {
3225d87a4d00Sryo 		if (txtype == VMXNET3_TX_START)
3226d87a4d00Sryo 			IFQ_POLL(&ifp->if_snd, m_head);
3227d87a4d00Sryo 		else
3228d87a4d00Sryo 			m_head = pcq_peek(txq->vxtxq_interq);
3229d87a4d00Sryo 		if (m_head == NULL)
3230d87a4d00Sryo 			break;
3231d87a4d00Sryo 
3232d87a4d00Sryo 		if (vmxnet3_txring_avail(txr) < VMXNET3_TX_MAXSEGS)
3233d87a4d00Sryo 			break;
3234d87a4d00Sryo 
3235d87a4d00Sryo 		if (txtype == VMXNET3_TX_START)
3236d87a4d00Sryo 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
3237d87a4d00Sryo 		else
3238d87a4d00Sryo 			m_head = pcq_get(txq->vxtxq_interq);
3239d87a4d00Sryo 		if (m_head == NULL)
3240d87a4d00Sryo 			break;
3241d87a4d00Sryo 
3242d87a4d00Sryo 		if (vmxnet3_txq_encap(txq, &m_head) != 0) {
3243d87a4d00Sryo 			m_freem(m_head);
3244d87a4d00Sryo 			break;
3245d87a4d00Sryo 		}
3246d87a4d00Sryo 
3247d87a4d00Sryo 		tx++;
3248d87a4d00Sryo 		bpf_mtap(ifp, m_head, BPF_D_OUT);
3249d87a4d00Sryo 	}
3250d87a4d00Sryo 
3251d87a4d00Sryo 	if (tx > 0)
3252d87a4d00Sryo 		txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3253d87a4d00Sryo }
3254d87a4d00Sryo 
3255d87a4d00Sryo static void
vmxnet3_start_locked(struct ifnet * ifp)3256d87a4d00Sryo vmxnet3_start_locked(struct ifnet *ifp)
3257d87a4d00Sryo {
3258d87a4d00Sryo 	struct vmxnet3_softc *sc;
3259d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
3260d87a4d00Sryo 
3261d87a4d00Sryo 	sc = ifp->if_softc;
3262d87a4d00Sryo 	txq = &sc->vmx_queue[0].vxq_txqueue;
3263d87a4d00Sryo 
3264d87a4d00Sryo 	vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_START);
3265d87a4d00Sryo }
3266d87a4d00Sryo 
3267d87a4d00Sryo void
vmxnet3_start(struct ifnet * ifp)3268d87a4d00Sryo vmxnet3_start(struct ifnet *ifp)
3269d87a4d00Sryo {
3270d87a4d00Sryo 	struct vmxnet3_softc *sc;
3271d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
3272d87a4d00Sryo 
3273d87a4d00Sryo 	sc = ifp->if_softc;
3274d87a4d00Sryo 	txq = &sc->vmx_queue[0].vxq_txqueue;
3275d87a4d00Sryo 
3276d87a4d00Sryo 	VMXNET3_TXQ_LOCK(txq);
3277d87a4d00Sryo 	vmxnet3_start_locked(ifp);
3278d87a4d00Sryo 	VMXNET3_TXQ_UNLOCK(txq);
3279d87a4d00Sryo }
3280d87a4d00Sryo 
3281d87a4d00Sryo static int
vmxnet3_select_txqueue(struct ifnet * ifp,struct mbuf * m __unused)3282d87a4d00Sryo vmxnet3_select_txqueue(struct ifnet *ifp, struct mbuf *m __unused)
3283d87a4d00Sryo {
3284d87a4d00Sryo 	struct vmxnet3_softc *sc;
3285d87a4d00Sryo 	u_int cpuid;
3286d87a4d00Sryo 
3287d87a4d00Sryo 	sc = ifp->if_softc;
3288d87a4d00Sryo 	cpuid = cpu_index(curcpu());
3289d87a4d00Sryo 	/*
32901c79931eSandvar 	 * Future work
3291d87a4d00Sryo 	 * We should select txqueue to even up the load even if ncpu is
3292d87a4d00Sryo 	 * different from sc->vmx_ntxqueues. Currently, the load is not
3293d87a4d00Sryo 	 * even, that is, when ncpu is six and ntxqueues is four, the load
3294d87a4d00Sryo 	 * of vmx_queue[0] and vmx_queue[1] is higher than vmx_queue[2] and
3295d87a4d00Sryo 	 * vmx_queue[3] because CPU#4 always uses vmx_queue[0] and CPU#5 always
3296d87a4d00Sryo 	 * uses vmx_queue[1].
3297d87a4d00Sryo 	 * Furthermore, we should not use random value to select txqueue to
3298d87a4d00Sryo 	 * avoid reordering. We should use flow information of mbuf.
3299d87a4d00Sryo 	 */
3300d87a4d00Sryo 	return cpuid % sc->vmx_ntxqueues;
3301d87a4d00Sryo }
3302d87a4d00Sryo 
3303d87a4d00Sryo static void
vmxnet3_transmit_locked(struct ifnet * ifp,struct vmxnet3_txqueue * txq)3304d87a4d00Sryo vmxnet3_transmit_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq)
3305d87a4d00Sryo {
3306d87a4d00Sryo 
3307d87a4d00Sryo 	vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_TRANSMIT);
3308d87a4d00Sryo }
3309d87a4d00Sryo 
3310d87a4d00Sryo static int
vmxnet3_transmit(struct ifnet * ifp,struct mbuf * m)3311d87a4d00Sryo vmxnet3_transmit(struct ifnet *ifp, struct mbuf *m)
3312d87a4d00Sryo {
3313d87a4d00Sryo 	struct vmxnet3_softc *sc;
3314d87a4d00Sryo 	struct vmxnet3_txqueue *txq;
3315d87a4d00Sryo 	int qid;
3316d87a4d00Sryo 
3317d87a4d00Sryo 	qid = vmxnet3_select_txqueue(ifp, m);
3318d87a4d00Sryo 	sc = ifp->if_softc;
3319d87a4d00Sryo 	txq = &sc->vmx_queue[qid].vxq_txqueue;
3320d87a4d00Sryo 
3321d87a4d00Sryo 	if (__predict_false(!pcq_put(txq->vxtxq_interq, m))) {
3322d87a4d00Sryo 		VMXNET3_TXQ_LOCK(txq);
3323d87a4d00Sryo 		txq->vxtxq_pcqdrop.ev_count++;
3324d87a4d00Sryo 		VMXNET3_TXQ_UNLOCK(txq);
3325d87a4d00Sryo 		m_freem(m);
3326d87a4d00Sryo 		return ENOBUFS;
3327d87a4d00Sryo 	}
3328d87a4d00Sryo 
33292e9219acSknakahara #ifdef VMXNET3_ALWAYS_TXDEFER
33302e9219acSknakahara 	kpreempt_disable();
33312e9219acSknakahara 	softint_schedule(txq->vxtxq_si);
33322e9219acSknakahara 	kpreempt_enable();
33332e9219acSknakahara #else
3334d87a4d00Sryo 	if (VMXNET3_TXQ_TRYLOCK(txq)) {
3335d87a4d00Sryo 		vmxnet3_transmit_locked(ifp, txq);
3336d87a4d00Sryo 		VMXNET3_TXQ_UNLOCK(txq);
3337d87a4d00Sryo 	} else {
3338d87a4d00Sryo 		kpreempt_disable();
3339d87a4d00Sryo 		softint_schedule(txq->vxtxq_si);
3340d87a4d00Sryo 		kpreempt_enable();
3341d87a4d00Sryo 	}
33422e9219acSknakahara #endif
3343d87a4d00Sryo 
3344d87a4d00Sryo 	return 0;
3345d87a4d00Sryo }
3346d87a4d00Sryo 
3347d87a4d00Sryo static void
vmxnet3_deferred_transmit(void * arg)3348d87a4d00Sryo vmxnet3_deferred_transmit(void *arg)
3349d87a4d00Sryo {
3350d87a4d00Sryo 	struct vmxnet3_txqueue *txq = arg;
3351d87a4d00Sryo 	struct vmxnet3_softc *sc = txq->vxtxq_sc;
3352d87a4d00Sryo 	struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3353d87a4d00Sryo 
3354d87a4d00Sryo 	VMXNET3_TXQ_LOCK(txq);
3355d87a4d00Sryo 	txq->vxtxq_transmitdef.ev_count++;
3356d87a4d00Sryo 	if (pcq_peek(txq->vxtxq_interq) != NULL)
3357d87a4d00Sryo 		vmxnet3_transmit_locked(ifp, txq);
3358d87a4d00Sryo 	VMXNET3_TXQ_UNLOCK(txq);
3359d87a4d00Sryo }
3360d87a4d00Sryo 
3361d87a4d00Sryo static void
vmxnet3_set_rxfilter(struct vmxnet3_softc * sc)3362d87a4d00Sryo vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3363d87a4d00Sryo {
3364d87a4d00Sryo 	struct ethercom *ec = &sc->vmx_ethercom;
3365d87a4d00Sryo 	struct vmxnet3_driver_shared *ds = sc->vmx_ds;
3366d87a4d00Sryo 	struct ether_multi *enm;
3367d87a4d00Sryo 	struct ether_multistep step;
3368d87a4d00Sryo 	u_int mode;
3369d87a4d00Sryo 	uint8_t *p;
3370d87a4d00Sryo 
3371db465cddSriastradh 	VMXNET3_CORE_LOCK_ASSERT(sc);
3372db465cddSriastradh 
3373d87a4d00Sryo 	ds->mcast_tablelen = 0;
3374d87a4d00Sryo 	ETHER_LOCK(ec);
3375d87a4d00Sryo 	CLR(ec->ec_flags, ETHER_F_ALLMULTI);
3376d87a4d00Sryo 	ETHER_UNLOCK(ec);
3377d87a4d00Sryo 
3378d87a4d00Sryo 	/*
3379d87a4d00Sryo 	 * Always accept broadcast frames.
3380d87a4d00Sryo 	 * Always accept frames destined to our station address.
3381d87a4d00Sryo 	 */
3382d87a4d00Sryo 	mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
3383d87a4d00Sryo 
3384d87a4d00Sryo 	ETHER_LOCK(ec);
3385db465cddSriastradh 	if (sc->vmx_promisc ||
3386d87a4d00Sryo 	    ec->ec_multicnt > VMXNET3_MULTICAST_MAX)
3387d87a4d00Sryo 		goto allmulti;
3388d87a4d00Sryo 
3389d87a4d00Sryo 	p = sc->vmx_mcast;
3390d87a4d00Sryo 	ETHER_FIRST_MULTI(step, ec, enm);
3391d87a4d00Sryo 	while (enm != NULL) {
3392d87a4d00Sryo 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3393d87a4d00Sryo 			/*
3394d87a4d00Sryo 			 * We must listen to a range of multicast addresses.
3395d87a4d00Sryo 			 * For now, just accept all multicasts, rather than
3396d87a4d00Sryo 			 * trying to set only those filter bits needed to match
3397d87a4d00Sryo 			 * the range.  (At this time, the only use of address
3398d87a4d00Sryo 			 * ranges is for IP multicast routing, for which the
3399d87a4d00Sryo 			 * range is big enough to require all bits set.)
3400d87a4d00Sryo 			 */
3401d87a4d00Sryo 			goto allmulti;
3402d87a4d00Sryo 		}
3403d87a4d00Sryo 		memcpy(p, enm->enm_addrlo, ETHER_ADDR_LEN);
3404d87a4d00Sryo 
3405d87a4d00Sryo 		p += ETHER_ADDR_LEN;
3406d87a4d00Sryo 
3407d87a4d00Sryo 		ETHER_NEXT_MULTI(step, enm);
3408d87a4d00Sryo 	}
3409d87a4d00Sryo 
3410d87a4d00Sryo 	if (ec->ec_multicnt > 0) {
3411d87a4d00Sryo 		SET(mode, VMXNET3_RXMODE_MCAST);
3412d87a4d00Sryo 		ds->mcast_tablelen = p - sc->vmx_mcast;
3413d87a4d00Sryo 	}
3414d87a4d00Sryo 	ETHER_UNLOCK(ec);
3415d87a4d00Sryo 
3416d87a4d00Sryo 	goto setit;
3417d87a4d00Sryo 
3418d87a4d00Sryo allmulti:
3419d87a4d00Sryo 	SET(ec->ec_flags, ETHER_F_ALLMULTI);
3420d87a4d00Sryo 	ETHER_UNLOCK(ec);
3421d87a4d00Sryo 	SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
3422db465cddSriastradh 	if (sc->vmx_promisc)
3423d87a4d00Sryo 		SET(mode, VMXNET3_RXMODE_PROMISC);
3424d87a4d00Sryo 
3425d87a4d00Sryo setit:
3426d87a4d00Sryo 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3427d87a4d00Sryo 	ds->rxmode = mode;
3428d87a4d00Sryo 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3429d87a4d00Sryo }
3430d87a4d00Sryo 
3431d87a4d00Sryo static int
vmxnet3_ioctl(struct ifnet * ifp,u_long cmd,void * data)3432d87a4d00Sryo vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3433d87a4d00Sryo {
3434d87a4d00Sryo 	struct vmxnet3_softc *sc = ifp->if_softc;
3435d87a4d00Sryo 	struct ifreq *ifr = (struct ifreq *)data;
3436d87a4d00Sryo 	int s, error = 0;
3437d87a4d00Sryo 
3438d87a4d00Sryo 	switch (cmd) {
3439db465cddSriastradh 	case SIOCADDMULTI:
3440db465cddSriastradh 	case SIOCDELMULTI:
3441db465cddSriastradh 		break;
3442db465cddSriastradh 	default:
3443db465cddSriastradh 		KASSERT(IFNET_LOCKED(ifp));
3444db465cddSriastradh 	}
3445db465cddSriastradh 
3446db465cddSriastradh 	switch (cmd) {
3447d87a4d00Sryo 	case SIOCSIFMTU: {
3448d87a4d00Sryo 		int nmtu = ifr->ifr_mtu;
3449d87a4d00Sryo 
3450d87a4d00Sryo 		if (nmtu < VMXNET3_MIN_MTU || nmtu > VMXNET3_MAX_MTU) {
3451d87a4d00Sryo 			error = EINVAL;
3452d87a4d00Sryo 			break;
3453d87a4d00Sryo 		}
345435492473Sryo 		if (ifp->if_mtu != (uint64_t)nmtu) {
3455d87a4d00Sryo 			s = splnet();
3456d87a4d00Sryo 			error = ether_ioctl(ifp, cmd, data);
3457d87a4d00Sryo 			splx(s);
3458d87a4d00Sryo 			if (error == ENETRESET)
3459d87a4d00Sryo 				error = vmxnet3_init(ifp);
3460d87a4d00Sryo 		}
3461d87a4d00Sryo 		break;
3462d87a4d00Sryo 	}
3463d87a4d00Sryo 
3464d87a4d00Sryo 	default:
3465d87a4d00Sryo 		s = splnet();
3466d87a4d00Sryo 		error = ether_ioctl(ifp, cmd, data);
3467d87a4d00Sryo 		splx(s);
3468d87a4d00Sryo 	}
3469d87a4d00Sryo 
3470d87a4d00Sryo 	if (error == ENETRESET) {
3471d87a4d00Sryo 		VMXNET3_CORE_LOCK(sc);
3472db465cddSriastradh 		if (sc->vmx_mcastactive)
3473d87a4d00Sryo 			vmxnet3_set_rxfilter(sc);
3474d87a4d00Sryo 		VMXNET3_CORE_UNLOCK(sc);
3475d87a4d00Sryo 		error = 0;
3476d87a4d00Sryo 	}
3477d87a4d00Sryo 
3478d87a4d00Sryo 	return error;
3479d87a4d00Sryo }
3480d87a4d00Sryo 
3481d87a4d00Sryo static int
vmxnet3_ifflags_cb(struct ethercom * ec)3482d87a4d00Sryo vmxnet3_ifflags_cb(struct ethercom *ec)
3483d87a4d00Sryo {
3484db465cddSriastradh 	struct ifnet *ifp = &ec->ec_if;
3485db465cddSriastradh 	struct vmxnet3_softc *sc = ifp->if_softc;
3486db465cddSriastradh 	int error = 0;
3487d87a4d00Sryo 
3488db465cddSriastradh 	KASSERT(IFNET_LOCKED(ifp));
3489d87a4d00Sryo 
3490d87a4d00Sryo 	VMXNET3_CORE_LOCK(sc);
3491db465cddSriastradh 	const unsigned short changed = ifp->if_flags ^ sc->vmx_if_flags;
3492db465cddSriastradh 	if ((changed & ~(IFF_CANTCHANGE | IFF_DEBUG)) == 0) {
3493db465cddSriastradh 		sc->vmx_if_flags = ifp->if_flags;
3494db465cddSriastradh 		if (changed & IFF_PROMISC) {
3495db465cddSriastradh 			sc->vmx_promisc = ifp->if_flags & IFF_PROMISC;
3496db465cddSriastradh 			error = ENETRESET;
3497db465cddSriastradh 		}
3498db465cddSriastradh 	} else {
3499db465cddSriastradh 		error = ENETRESET;
3500db465cddSriastradh 	}
3501d87a4d00Sryo 	VMXNET3_CORE_UNLOCK(sc);
3502d87a4d00Sryo 
3503d87a4d00Sryo 	vmxnet3_if_link_status(sc);
3504d87a4d00Sryo 
3505db465cddSriastradh 	return error;
3506d87a4d00Sryo }
3507d87a4d00Sryo 
3508d87a4d00Sryo static int
vmxnet3_watchdog(struct vmxnet3_txqueue * txq)3509d87a4d00Sryo vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3510d87a4d00Sryo {
3511d87a4d00Sryo 	struct vmxnet3_softc *sc;
3512d87a4d00Sryo 	struct vmxnet3_queue *vmxq;
3513d87a4d00Sryo 
3514d87a4d00Sryo 	sc = txq->vxtxq_sc;
3515d87a4d00Sryo 	vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue);
3516d87a4d00Sryo 
3517d87a4d00Sryo 	VMXNET3_TXQ_LOCK(txq);
3518d87a4d00Sryo 	if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3519d87a4d00Sryo 		VMXNET3_TXQ_UNLOCK(txq);
3520d87a4d00Sryo 		return (0);
3521d87a4d00Sryo 	}
3522d87a4d00Sryo 	txq->vxtxq_watchdogto.ev_count++;
3523d87a4d00Sryo 	VMXNET3_TXQ_UNLOCK(txq);
3524d87a4d00Sryo 
3525d87a4d00Sryo 	device_printf(sc->vmx_dev, "watchdog timeout on queue %d\n",
3526d87a4d00Sryo 	    vmxq->vxq_id);
3527d87a4d00Sryo 	return (1);
3528d87a4d00Sryo }
3529d87a4d00Sryo 
3530d87a4d00Sryo static void
vmxnet3_refresh_host_stats(struct vmxnet3_softc * sc)3531d87a4d00Sryo vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3532d87a4d00Sryo {
3533d87a4d00Sryo 
3534d87a4d00Sryo 	vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3535d87a4d00Sryo }
3536d87a4d00Sryo 
3537d87a4d00Sryo static void
vmxnet3_tick(void * xsc)3538d87a4d00Sryo vmxnet3_tick(void *xsc)
3539d87a4d00Sryo {
3540d87a4d00Sryo 	struct vmxnet3_softc *sc;
3541d87a4d00Sryo 	int i, timedout;
3542d87a4d00Sryo 
3543d87a4d00Sryo 	sc = xsc;
3544d87a4d00Sryo 	timedout = 0;
3545d87a4d00Sryo 
3546d87a4d00Sryo 	VMXNET3_CORE_LOCK(sc);
3547d87a4d00Sryo 
3548d87a4d00Sryo 	vmxnet3_refresh_host_stats(sc);
3549d87a4d00Sryo 
3550d87a4d00Sryo 	for (i = 0; i < sc->vmx_ntxqueues; i++)
3551d87a4d00Sryo 		timedout |= vmxnet3_watchdog(&sc->vmx_queue[i].vxq_txqueue);
3552d87a4d00Sryo 
3553db465cddSriastradh 	if (timedout != 0) {
3554db465cddSriastradh 		if (!sc->vmx_reset_pending) {
3555db465cddSriastradh 			sc->vmx_reset_pending = true;
3556db465cddSriastradh 			workqueue_enqueue(sc->vmx_reset_wq,
3557db465cddSriastradh 			    &sc->vmx_reset_work, NULL);
3558db465cddSriastradh 		}
3559db465cddSriastradh 	} else {
3560d87a4d00Sryo 		callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3561db465cddSriastradh 	}
3562d87a4d00Sryo 
3563d87a4d00Sryo 	VMXNET3_CORE_UNLOCK(sc);
3564d87a4d00Sryo }
3565d87a4d00Sryo 
3566db465cddSriastradh static void
vmxnet3_reset_work(struct work * work,void * arg)3567db465cddSriastradh vmxnet3_reset_work(struct work *work, void *arg)
3568db465cddSriastradh {
3569db465cddSriastradh 	struct vmxnet3_softc *sc = arg;
3570db465cddSriastradh 	struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3571db465cddSriastradh 
3572db465cddSriastradh 	VMXNET3_CORE_LOCK(sc);
3573db465cddSriastradh 	KASSERT(sc->vmx_reset_pending);
3574db465cddSriastradh 	sc->vmx_reset_pending = false;
3575db465cddSriastradh 	VMXNET3_CORE_UNLOCK(sc);
3576db465cddSriastradh 
3577db465cddSriastradh 	IFNET_LOCK(ifp);
3578db465cddSriastradh 	(void)vmxnet3_init(ifp);
3579db465cddSriastradh 	IFNET_UNLOCK(ifp);
3580db465cddSriastradh }
3581db465cddSriastradh 
3582d87a4d00Sryo /*
3583d87a4d00Sryo  * update link state of ifnet and softc
3584d87a4d00Sryo  */
3585d87a4d00Sryo static void
vmxnet3_if_link_status(struct vmxnet3_softc * sc)3586d87a4d00Sryo vmxnet3_if_link_status(struct vmxnet3_softc *sc)
3587d87a4d00Sryo {
3588d87a4d00Sryo 	struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3589a099e3b4Sryo 	u_int link;
3590a099e3b4Sryo 	bool up;
3591d87a4d00Sryo 
3592a099e3b4Sryo 	up = vmxnet3_cmd_link_status(ifp);
3593a099e3b4Sryo 	if (up) {
3594d87a4d00Sryo 		sc->vmx_link_active = 1;
3595d87a4d00Sryo 		link = LINK_STATE_UP;
3596d87a4d00Sryo 	} else {
3597d87a4d00Sryo 		sc->vmx_link_active = 0;
3598d87a4d00Sryo 		link = LINK_STATE_DOWN;
3599d87a4d00Sryo 	}
3600d87a4d00Sryo 
3601d87a4d00Sryo 	if_link_state_change(ifp, link);
3602d87a4d00Sryo }
3603d87a4d00Sryo 
3604d87a4d00Sryo /*
3605d87a4d00Sryo  * check vmx(4) state by VMXNET3_CMD and update ifp->if_baudrate
3606d87a4d00Sryo  *   returns
3607d87a4d00Sryo  *       - true:  link up
36081c79931eSandvar  *       - false: link down
3609d87a4d00Sryo  */
3610d87a4d00Sryo static bool
vmxnet3_cmd_link_status(struct ifnet * ifp)3611d87a4d00Sryo vmxnet3_cmd_link_status(struct ifnet *ifp)
3612d87a4d00Sryo {
3613d87a4d00Sryo 	struct vmxnet3_softc *sc = ifp->if_softc;
3614d87a4d00Sryo 	u_int x, speed;
3615d87a4d00Sryo 
3616d87a4d00Sryo 	x = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3617d87a4d00Sryo 	if ((x & 1) == 0)
3618d87a4d00Sryo 		return false;
3619d87a4d00Sryo 
3620d87a4d00Sryo 	speed = x >> 16;
3621d87a4d00Sryo 	ifp->if_baudrate = IF_Mbps(speed);
3622d87a4d00Sryo 	return true;
3623d87a4d00Sryo }
3624d87a4d00Sryo 
3625d87a4d00Sryo static void
vmxnet3_ifmedia_status(struct ifnet * ifp,struct ifmediareq * ifmr)3626d87a4d00Sryo vmxnet3_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3627d87a4d00Sryo {
3628d87a4d00Sryo 	bool up;
3629d87a4d00Sryo 
3630d87a4d00Sryo 	ifmr->ifm_status = IFM_AVALID;
3631d87a4d00Sryo 	ifmr->ifm_active = IFM_ETHER;
3632d87a4d00Sryo 
3633d87a4d00Sryo 	up = vmxnet3_cmd_link_status(ifp);
3634d87a4d00Sryo 	if (!up)
3635d87a4d00Sryo 		return;
3636d87a4d00Sryo 
3637d87a4d00Sryo 	ifmr->ifm_status |= IFM_ACTIVE;
3638d87a4d00Sryo 
3639d87a4d00Sryo 	if (ifp->if_baudrate >= IF_Gbps(10ULL))
36401ff1d567Smsaitoh 		ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
3641d87a4d00Sryo }
3642d87a4d00Sryo 
3643d87a4d00Sryo static int
vmxnet3_ifmedia_change(struct ifnet * ifp)3644d87a4d00Sryo vmxnet3_ifmedia_change(struct ifnet *ifp)
3645d87a4d00Sryo {
3646d87a4d00Sryo 	return 0;
3647d87a4d00Sryo }
3648d87a4d00Sryo 
3649d87a4d00Sryo static void
vmxnet3_set_lladdr(struct vmxnet3_softc * sc)3650d87a4d00Sryo vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3651d87a4d00Sryo {
3652d87a4d00Sryo 	uint32_t ml, mh;
3653d87a4d00Sryo 
3654d87a4d00Sryo 	ml  = sc->vmx_lladdr[0];
3655d87a4d00Sryo 	ml |= sc->vmx_lladdr[1] << 8;
3656d87a4d00Sryo 	ml |= sc->vmx_lladdr[2] << 16;
3657d87a4d00Sryo 	ml |= sc->vmx_lladdr[3] << 24;
3658d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3659d87a4d00Sryo 
3660d87a4d00Sryo 	mh  = sc->vmx_lladdr[4];
3661d87a4d00Sryo 	mh |= sc->vmx_lladdr[5] << 8;
3662d87a4d00Sryo 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3663d87a4d00Sryo }
3664d87a4d00Sryo 
3665d87a4d00Sryo static void
vmxnet3_get_lladdr(struct vmxnet3_softc * sc)3666d87a4d00Sryo vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3667d87a4d00Sryo {
3668d87a4d00Sryo 	uint32_t ml, mh;
3669d87a4d00Sryo 
3670d87a4d00Sryo 	ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3671d87a4d00Sryo 	mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3672d87a4d00Sryo 
3673d87a4d00Sryo 	sc->vmx_lladdr[0] = ml;
3674d87a4d00Sryo 	sc->vmx_lladdr[1] = ml >> 8;
3675d87a4d00Sryo 	sc->vmx_lladdr[2] = ml >> 16;
3676d87a4d00Sryo 	sc->vmx_lladdr[3] = ml >> 24;
3677d87a4d00Sryo 	sc->vmx_lladdr[4] = mh;
3678d87a4d00Sryo 	sc->vmx_lladdr[5] = mh >> 8;
3679d87a4d00Sryo }
3680d87a4d00Sryo 
3681d87a4d00Sryo static void
vmxnet3_enable_all_intrs(struct vmxnet3_softc * sc)3682d87a4d00Sryo vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3683d87a4d00Sryo {
3684d87a4d00Sryo 	int i;
3685d87a4d00Sryo 
3686d87a4d00Sryo 	sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3687d87a4d00Sryo 	for (i = 0; i < sc->vmx_nintrs; i++)
3688d87a4d00Sryo 		vmxnet3_enable_intr(sc, i);
3689d87a4d00Sryo }
3690d87a4d00Sryo 
3691d87a4d00Sryo static void
vmxnet3_disable_all_intrs(struct vmxnet3_softc * sc)3692d87a4d00Sryo vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3693d87a4d00Sryo {
3694d87a4d00Sryo 	int i;
3695d87a4d00Sryo 
3696d87a4d00Sryo 	sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3697d87a4d00Sryo 	for (i = 0; i < sc->vmx_nintrs; i++)
3698d87a4d00Sryo 		vmxnet3_disable_intr(sc, i);
3699d87a4d00Sryo }
3700d87a4d00Sryo 
3701d87a4d00Sryo static int
vmxnet3_dma_malloc(struct vmxnet3_softc * sc,bus_size_t size,bus_size_t align,struct vmxnet3_dma_alloc * dma)3702d87a4d00Sryo vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3703d87a4d00Sryo     struct vmxnet3_dma_alloc *dma)
3704d87a4d00Sryo {
3705d87a4d00Sryo 	bus_dma_tag_t t = sc->vmx_dmat;
3706d87a4d00Sryo 	bus_dma_segment_t *segs = dma->dma_segs;
3707d87a4d00Sryo 	int n, error;
3708d87a4d00Sryo 
3709d87a4d00Sryo 	memset(dma, 0, sizeof(*dma));
3710d87a4d00Sryo 
3711d87a4d00Sryo 	error = bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT);
3712d87a4d00Sryo 	if (error) {
3713d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "bus_dmamem_alloc failed: %d\n", error);
3714d87a4d00Sryo 		goto fail1;
3715d87a4d00Sryo 	}
3716d87a4d00Sryo 	KASSERT(n == 1);
3717d87a4d00Sryo 
3718d87a4d00Sryo 	error = bus_dmamem_map(t, segs, 1, size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
3719d87a4d00Sryo 	if (error) {
3720d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "bus_dmamem_map failed: %d\n", error);
3721d87a4d00Sryo 		goto fail2;
3722d87a4d00Sryo 	}
3723d87a4d00Sryo 
3724d87a4d00Sryo 	error = bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
3725d87a4d00Sryo 	if (error) {
3726d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "bus_dmamap_create failed: %d\n", error);
3727d87a4d00Sryo 		goto fail3;
3728d87a4d00Sryo 	}
3729d87a4d00Sryo 
3730d87a4d00Sryo 	error = bus_dmamap_load(t, dma->dma_map, dma->dma_vaddr, size, NULL,
3731d87a4d00Sryo 	    BUS_DMA_NOWAIT);
3732d87a4d00Sryo 	if (error) {
3733d87a4d00Sryo 		aprint_error_dev(sc->vmx_dev, "bus_dmamap_load failed: %d\n", error);
3734d87a4d00Sryo 		goto fail4;
3735d87a4d00Sryo 	}
3736d87a4d00Sryo 
3737d87a4d00Sryo 	memset(dma->dma_vaddr, 0, size);
3738d87a4d00Sryo 	dma->dma_paddr = DMAADDR(dma->dma_map);
3739d87a4d00Sryo 	dma->dma_size = size;
3740d87a4d00Sryo 
3741d87a4d00Sryo 	return (0);
3742d87a4d00Sryo fail4:
3743d87a4d00Sryo 	bus_dmamap_destroy(t, dma->dma_map);
3744d87a4d00Sryo fail3:
3745d87a4d00Sryo 	bus_dmamem_unmap(t, dma->dma_vaddr, size);
3746d87a4d00Sryo fail2:
3747d87a4d00Sryo 	bus_dmamem_free(t, segs, 1);
3748d87a4d00Sryo fail1:
3749d87a4d00Sryo 	return (error);
3750d87a4d00Sryo }
3751d87a4d00Sryo 
3752d87a4d00Sryo static void
vmxnet3_dma_free(struct vmxnet3_softc * sc,struct vmxnet3_dma_alloc * dma)3753d87a4d00Sryo vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3754d87a4d00Sryo {
3755d87a4d00Sryo 	bus_dma_tag_t t = sc->vmx_dmat;
3756d87a4d00Sryo 
3757d87a4d00Sryo 	bus_dmamap_unload(t, dma->dma_map);
3758d87a4d00Sryo 	bus_dmamap_destroy(t, dma->dma_map);
3759d87a4d00Sryo 	bus_dmamem_unmap(t, dma->dma_vaddr, dma->dma_size);
3760d87a4d00Sryo 	bus_dmamem_free(t, dma->dma_segs, 1);
3761d87a4d00Sryo 
3762d87a4d00Sryo 	memset(dma, 0, sizeof(*dma));
3763d87a4d00Sryo }
376435492473Sryo 
376535492473Sryo MODULE(MODULE_CLASS_DRIVER, if_vmx, "pci");
376635492473Sryo 
376735492473Sryo #ifdef _MODULE
376835492473Sryo #include "ioconf.c"
376935492473Sryo #endif
377035492473Sryo 
377135492473Sryo static int
if_vmx_modcmd(modcmd_t cmd,void * opaque)377235492473Sryo if_vmx_modcmd(modcmd_t cmd, void *opaque)
377335492473Sryo {
377435492473Sryo 	int error = 0;
377535492473Sryo 
377635492473Sryo 	switch (cmd) {
377735492473Sryo 	case MODULE_CMD_INIT:
377835492473Sryo #ifdef _MODULE
377935492473Sryo 		error = config_init_component(cfdriver_ioconf_if_vmx,
378035492473Sryo 		    cfattach_ioconf_if_vmx, cfdata_ioconf_if_vmx);
378135492473Sryo #endif
378235492473Sryo 		return error;
378335492473Sryo 	case MODULE_CMD_FINI:
378435492473Sryo #ifdef _MODULE
378535492473Sryo 		error = config_fini_component(cfdriver_ioconf_if_vmx,
378635492473Sryo 		    cfattach_ioconf_if_vmx, cfdata_ioconf_if_vmx);
378735492473Sryo #endif
378835492473Sryo 		return error;
378935492473Sryo 	default:
379035492473Sryo 		return ENOTTY;
379135492473Sryo 	}
379235492473Sryo }
379335492473Sryo 
3794