xref: /openbsd-src/sys/dev/pci/if_oce.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_oce.c,v 1.76 2014/07/12 18:48:52 tedu Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/device.h>
67 #include <sys/socket.h>
68 #include <sys/queue.h>
69 #include <sys/timeout.h>
70 #include <sys/pool.h>
71 
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/if_ether.h>
81 #endif
82 
83 #ifdef INET6
84 #include <netinet/ip6.h>
85 #endif
86 
87 #if NBPFILTER > 0
88 #include <net/bpf.h>
89 #endif
90 
91 #if NVLAN > 0
92 #include <net/if_types.h>
93 #include <net/if_vlan_var.h>
94 #endif
95 
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99 
100 #include <dev/pci/if_ocereg.h>
101 
102 #ifndef TRUE
103 #define TRUE			1
104 #endif
105 #ifndef FALSE
106 #define FALSE			0
107 #endif
108 
109 #define OCE_MBX_TIMEOUT		5
110 
111 #define OCE_MAX_PAYLOAD		65536
112 
113 #define OCE_TX_RING_SIZE	512
114 #define OCE_RX_RING_SIZE	1024
115 
116 /* This should be powers of 2. Like 2,4,8 & 16 */
117 #define OCE_MAX_RSS		4 /* TODO: 8 */
118 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
119 #define OCE_MAX_WQ		8
120 
121 #define OCE_MAX_EQ		32
122 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
123 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
124 
125 #define OCE_DEFAULT_EQD		80
126 
127 #define OCE_MIN_MTU		256
128 #define OCE_MAX_MTU		9000
129 
130 #define OCE_MAX_RQ_COMPL	64
131 #define OCE_MAX_RQ_POSTS	255
132 #define OCE_RX_BUF_SIZE		2048
133 
134 #define OCE_MAX_TX_ELEMENTS	29
135 #define OCE_MAX_TX_DESC		1024
136 #define OCE_MAX_TX_SIZE		65535
137 
138 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
139 #define OCE_MEM_DVA(_m)		((_m)->paddr)
140 
141 #define OCE_WQ_FOREACH(sc, wq, i) 	\
142 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
143 #define OCE_RQ_FOREACH(sc, rq, i) 	\
144 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
145 #define OCE_EQ_FOREACH(sc, eq, i) 	\
146 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
147 #define OCE_CQ_FOREACH(sc, cq, i) 	\
148 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
149 #define OCE_RING_FOREACH(_r, _v, _c)	\
150 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
151 
152 static inline int
153 ilog2(unsigned int v)
154 {
155 	int r = 0;
156 
157 	while (v >>= 1)
158 		r++;
159 	return (r);
160 }
161 
162 struct oce_pkt {
163 	struct mbuf *		mbuf;
164 	bus_dmamap_t		map;
165 	int			nsegs;
166 	SIMPLEQ_ENTRY(oce_pkt)	entry;
167 };
168 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
169 
170 struct oce_dma_mem {
171 	bus_dma_tag_t		tag;
172 	bus_dmamap_t		map;
173 	bus_dma_segment_t	segs;
174 	int			nsegs;
175 	bus_size_t		size;
176 	caddr_t			vaddr;
177 	bus_addr_t		paddr;
178 };
179 
180 struct oce_ring {
181 	int			index;
182 	int			nitems;
183 	int			nused;
184 	int			isize;
185 	struct oce_dma_mem	dma;
186 };
187 
188 struct oce_softc;
189 
190 enum cq_len {
191 	CQ_LEN_256  = 256,
192 	CQ_LEN_512  = 512,
193 	CQ_LEN_1024 = 1024
194 };
195 
196 enum eq_len {
197 	EQ_LEN_256  = 256,
198 	EQ_LEN_512  = 512,
199 	EQ_LEN_1024 = 1024,
200 	EQ_LEN_2048 = 2048,
201 	EQ_LEN_4096 = 4096
202 };
203 
204 enum eqe_size {
205 	EQE_SIZE_4  = 4,
206 	EQE_SIZE_16 = 16
207 };
208 
209 enum qtype {
210 	QTYPE_EQ,
211 	QTYPE_MQ,
212 	QTYPE_WQ,
213 	QTYPE_RQ,
214 	QTYPE_CQ,
215 	QTYPE_RSS
216 };
217 
218 struct oce_eq {
219 	struct oce_softc *	sc;
220 	struct oce_ring *	ring;
221 	enum qtype		type;
222 	int			id;
223 
224 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
225 	int			cq_valid;
226 
227 	int			nitems;
228 	int			isize;
229 	int			delay;
230 };
231 
232 struct oce_cq {
233 	struct oce_softc *	sc;
234 	struct oce_ring *	ring;
235 	enum qtype		type;
236 	int			id;
237 
238 	struct oce_eq *		eq;
239 
240 	void			(*cq_intr)(void *);
241 	void *			cb_arg;
242 
243 	int			nitems;
244 	int			nodelay;
245 	int			eventable;
246 	int			ncoalesce;
247 };
248 
249 struct oce_mq {
250 	struct oce_softc *	sc;
251 	struct oce_ring *	ring;
252 	enum qtype		type;
253 	int			id;
254 
255 	struct oce_cq *		cq;
256 
257 	int			nitems;
258 };
259 
260 struct oce_wq {
261 	struct oce_softc *	sc;
262 	struct oce_ring *	ring;
263 	enum qtype		type;
264 	int			id;
265 
266 	struct oce_cq *		cq;
267 
268 	struct oce_pkt_list	pkt_list;
269 	struct oce_pkt_list	pkt_free;
270 
271 	int			nitems;
272 };
273 
274 struct oce_rq {
275 	struct oce_softc *	sc;
276 	struct oce_ring *	ring;
277 	enum qtype		type;
278 	int			id;
279 
280 	struct oce_cq *		cq;
281 
282 	struct if_rxring	rxring;
283 	struct oce_pkt_list	pkt_list;
284 	struct oce_pkt_list	pkt_free;
285 
286 	uint32_t		rss_cpuid;
287 
288 #ifdef OCE_LRO
289 	struct lro_ctrl		lro;
290 	int			lro_pkts_queued;
291 #endif
292 
293 	int			nitems;
294 	int			fragsize;
295 	int			mtu;
296 	int			rss;
297 };
298 
299 struct oce_softc {
300 	struct device		sc_dev;
301 
302 	uint			sc_flags;
303 #define  OCE_F_BE2		 0x00000001
304 #define  OCE_F_BE3		 0x00000002
305 #define  OCE_F_XE201		 0x00000008
306 #define  OCE_F_BE3_NATIVE	 0x00000100
307 #define  OCE_F_RESET_RQD	 0x00001000
308 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
309 
310 	bus_dma_tag_t		sc_dmat;
311 
312 	bus_space_tag_t		sc_cfg_iot;
313 	bus_space_handle_t	sc_cfg_ioh;
314 	bus_size_t		sc_cfg_size;
315 
316 	bus_space_tag_t		sc_csr_iot;
317 	bus_space_handle_t	sc_csr_ioh;
318 	bus_size_t		sc_csr_size;
319 
320 	bus_space_tag_t		sc_db_iot;
321 	bus_space_handle_t	sc_db_ioh;
322 	bus_size_t		sc_db_size;
323 
324 	void *			sc_ih;
325 
326 	struct arpcom		sc_ac;
327 	struct ifmedia		sc_media;
328 	ushort			sc_link_up;
329 	ushort			sc_link_speed;
330 	uint			sc_fc;
331 
332 	struct oce_dma_mem	sc_mbx;
333 	struct oce_dma_mem	sc_pld;
334 
335 	uint			sc_port;
336 	uint			sc_fmode;
337 
338 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
339 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
340 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
341 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
342 	struct oce_mq *		sc_mq;			/* Mailbox queue */
343 
344 	ushort			sc_neq;
345 	ushort			sc_ncq;
346 	ushort			sc_nrq;
347 	ushort			sc_nwq;
348 	ushort			sc_nintr;
349 
350 	ushort			sc_tx_ring_size;
351 	ushort			sc_rx_ring_size;
352 	ushort			sc_rss_enable;
353 
354 	uint32_t		sc_if_id;	/* interface ID */
355 	uint32_t		sc_pmac_id;	/* PMAC id */
356 	char			sc_macaddr[ETHER_ADDR_LEN];
357 
358 	uint32_t		sc_pvid;
359 
360 	uint64_t		sc_rx_errors;
361 	uint64_t		sc_tx_errors;
362 
363 	struct timeout		sc_tick;
364 	struct timeout		sc_rxrefill;
365 };
366 
367 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
368 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
369 
370 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
371 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
372 
373 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
374 
375 int 	oce_match(struct device *, void *, void *);
376 void	oce_attach(struct device *, struct device *, void *);
377 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
378 void	oce_attachhook(void *);
379 void	oce_attach_ifp(struct oce_softc *);
380 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
381 void	oce_iff(struct oce_softc *);
382 void	oce_link_status(struct oce_softc *);
383 void	oce_media_status(struct ifnet *, struct ifmediareq *);
384 int 	oce_media_change(struct ifnet *);
385 void	oce_tick(void *);
386 void	oce_init(void *);
387 void	oce_stop(struct oce_softc *);
388 void	oce_watchdog(struct ifnet *);
389 void	oce_start(struct ifnet *);
390 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
391 #ifdef OCE_TSO
392 struct mbuf *
393 	oce_tso(struct oce_softc *, struct mbuf **);
394 #endif
395 int 	oce_intr(void *);
396 void	oce_intr_wq(void *);
397 void	oce_txeof(struct oce_wq *);
398 void	oce_intr_rq(void *);
399 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
400 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
401 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
402 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
403 #ifdef OCE_LRO
404 void	oce_flush_lro(struct oce_rq *);
405 int 	oce_init_lro(struct oce_softc *);
406 void	oce_free_lro(struct oce_softc *);
407 #endif
408 int	oce_get_buf(struct oce_rq *);
409 int	oce_alloc_rx_bufs(struct oce_rq *);
410 void	oce_refill_rx(void *);
411 void	oce_free_posted_rxbuf(struct oce_rq *);
412 void	oce_intr_mq(void *);
413 void	oce_link_event(struct oce_softc *,
414 	    struct oce_async_cqe_link_state *);
415 
416 int 	oce_init_queues(struct oce_softc *);
417 void	oce_release_queues(struct oce_softc *);
418 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
419 void	oce_drain_wq(struct oce_wq *);
420 void	oce_destroy_wq(struct oce_wq *);
421 struct oce_rq *
422 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
423 void	oce_drain_rq(struct oce_rq *);
424 void	oce_destroy_rq(struct oce_rq *);
425 struct oce_eq *
426 	oce_create_eq(struct oce_softc *);
427 static inline void
428 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
429 void	oce_drain_eq(struct oce_eq *);
430 void	oce_destroy_eq(struct oce_eq *);
431 struct oce_mq *
432 	oce_create_mq(struct oce_softc *, struct oce_eq *);
433 void	oce_drain_mq(struct oce_mq *);
434 void	oce_destroy_mq(struct oce_mq *);
435 struct oce_cq *
436 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
437 	    int isize, int eventable, int nodelay, int ncoalesce);
438 static inline void
439 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
440 void	oce_destroy_cq(struct oce_cq *);
441 
442 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
443 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
444 #define	oce_dma_sync(d, f) \
445 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
446 
447 struct oce_ring *
448 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
449 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
450 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
451 	    struct oce_pa *, int max_segs);
452 static inline void *
453 	oce_ring_get(struct oce_ring *);
454 static inline void *
455 	oce_ring_first(struct oce_ring *);
456 static inline void *
457 	oce_ring_next(struct oce_ring *);
458 struct oce_pkt *
459 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
460 	    int maxsegsz);
461 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
462 static inline struct oce_pkt *
463 	oce_pkt_get(struct oce_pkt_list *);
464 static inline void
465 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
466 
467 int	oce_init_fw(struct oce_softc *);
468 int	oce_mbox_init(struct oce_softc *);
469 int	oce_mbox_dispatch(struct oce_softc *);
470 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
471 	    void *payload, int length);
472 void	oce_first_mcc(struct oce_softc *);
473 
474 int	oce_get_fw_config(struct oce_softc *);
475 int	oce_check_native_mode(struct oce_softc *);
476 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
477 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
478 	    int nvtags, int untagged, int promisc);
479 int	oce_set_flow_control(struct oce_softc *, uint flags);
480 int	oce_config_rss(struct oce_softc *, int enable);
481 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
482 	    int naddr);
483 int	oce_set_promisc(struct oce_softc *, int enable);
484 int	oce_get_link_status(struct oce_softc *);
485 
486 void	oce_macaddr_set(struct oce_softc *);
487 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
488 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
489 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
490 
491 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
492 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
493 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
494 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
495 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
496 
497 static inline int
498 	oce_update_stats(struct oce_softc *);
499 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
500 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
501 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
502 
503 struct pool *oce_pkt_pool;
504 
505 struct cfdriver oce_cd = {
506 	NULL, "oce", DV_IFNET
507 };
508 
509 struct cfattach oce_ca = {
510 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
511 };
512 
513 const struct pci_matchid oce_devices[] = {
514 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
515 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
516 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
517 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
518 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
519 };
520 
521 int
522 oce_match(struct device *parent, void *match, void *aux)
523 {
524 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
525 }
526 
527 void
528 oce_attach(struct device *parent, struct device *self, void *aux)
529 {
530 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
531 	struct oce_softc *sc = (struct oce_softc *)self;
532 	const char *intrstr = NULL;
533 	pci_intr_handle_t ih;
534 
535 	switch (PCI_PRODUCT(pa->pa_id)) {
536 	case PCI_PRODUCT_SERVERENGINES_BE2:
537 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
538 		SET(sc->sc_flags, OCE_F_BE2);
539 		break;
540 	case PCI_PRODUCT_SERVERENGINES_BE3:
541 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
542 		SET(sc->sc_flags, OCE_F_BE3);
543 		break;
544 	case PCI_PRODUCT_EMULEX_XE201:
545 		SET(sc->sc_flags, OCE_F_XE201);
546 		break;
547 	}
548 
549 	sc->sc_dmat = pa->pa_dmat;
550 	if (oce_pci_alloc(sc, pa))
551 		return;
552 
553 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
554 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
555 
556 	/* create the bootstrap mailbox */
557 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
558 		printf(": failed to allocate mailbox memory\n");
559 		return;
560 	}
561 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
562 		printf(": failed to allocate payload memory\n");
563 		goto fail_1;
564 	}
565 
566 	if (oce_init_fw(sc))
567 		goto fail_2;
568 
569 	if (oce_mbox_init(sc)) {
570 		printf(": failed to initialize mailbox\n");
571 		goto fail_2;
572 	}
573 
574 	if (oce_get_fw_config(sc)) {
575 		printf(": failed to get firmware configuration\n");
576 		goto fail_2;
577 	}
578 
579 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
580 		if (oce_check_native_mode(sc))
581 			goto fail_2;
582 	}
583 
584 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
585 		printf(": failed to fetch MAC address\n");
586 		goto fail_2;
587 	}
588 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
589 
590 	if (oce_pkt_pool == NULL) {
591 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
592 		if (oce_pkt_pool == NULL) {
593 			printf(": unable to allocate descriptor pool\n");
594 			goto fail_2;
595 		}
596 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, 0, 0,
597 		    "ocepkts", NULL);
598 	}
599 
600 	/* We allocate a single interrupt resource */
601 	sc->sc_nintr = 1;
602 	if (pci_intr_map_msi(pa, &ih) != 0 &&
603 	    pci_intr_map(pa, &ih) != 0) {
604 		printf(": couldn't map interrupt\n");
605 		goto fail_2;
606 	}
607 
608 	intrstr = pci_intr_string(pa->pa_pc, ih);
609 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
610 	    sc->sc_dev.dv_xname);
611 	if (sc->sc_ih == NULL) {
612 		printf(": couldn't establish interrupt\n");
613 		if (intrstr != NULL)
614 			printf(" at %s", intrstr);
615 		printf("\n");
616 		goto fail_2;
617 	}
618 	printf(": %s", intrstr);
619 
620 	if (oce_init_queues(sc))
621 		goto fail_3;
622 
623 	oce_attach_ifp(sc);
624 
625 #ifdef OCE_LRO
626 	if (oce_init_lro(sc))
627 		goto fail_4;
628 #endif
629 
630 	timeout_set(&sc->sc_tick, oce_tick, sc);
631 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
632 
633 	mountroothook_establish(oce_attachhook, sc);
634 
635 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
636 
637 	return;
638 
639 #ifdef OCE_LRO
640 fail_4:
641 	oce_free_lro(sc);
642 	ether_ifdetach(&sc->sc_ac.ac_if);
643 	if_detach(&sc->sc_ac.ac_if);
644 	oce_release_queues(sc);
645 #endif
646 fail_3:
647 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
648 fail_2:
649 	oce_dma_free(sc, &sc->sc_pld);
650 fail_1:
651 	oce_dma_free(sc, &sc->sc_mbx);
652 }
653 
654 int
655 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
656 {
657 	pcireg_t memtype, reg;
658 
659 	/* setup the device config region */
660 	if (ISSET(sc->sc_flags, OCE_F_BE2))
661 		reg = OCE_BAR_CFG_BE2;
662 	else
663 		reg = OCE_BAR_CFG;
664 
665 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
666 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
667 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
668 	    IS_BE(sc) ? 0 : 32768)) {
669 		printf(": can't find cfg mem space\n");
670 		return (ENXIO);
671 	}
672 
673 	/*
674 	 * Read the SLI_INTF register and determine whether we
675 	 * can use this port and its features
676 	 */
677 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
678 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
679 		printf(": invalid signature\n");
680 		goto fail_1;
681 	}
682 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
683 		printf(": unsupported SLI revision\n");
684 		goto fail_1;
685 	}
686 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
687 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
688 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
689 		SET(sc->sc_flags, OCE_F_RESET_RQD);
690 
691 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
692 	if (IS_BE(sc)) {
693 		/* set up CSR region */
694 		reg = OCE_BAR_CSR;
695 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
696 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
697 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
698 			printf(": can't find csr mem space\n");
699 			goto fail_1;
700 		}
701 
702 		/* set up DB doorbell region */
703 		reg = OCE_BAR_DB;
704 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
705 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
706 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
707 			printf(": can't find csr mem space\n");
708 			goto fail_2;
709 		}
710 	} else {
711 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
712 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
713 	}
714 
715 	return (0);
716 
717 fail_2:
718 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
719 fail_1:
720 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
721 	return (ENXIO);
722 }
723 
724 static inline uint32_t
725 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
726 {
727 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
728 	    BUS_SPACE_BARRIER_READ);
729 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
730 }
731 
732 static inline uint32_t
733 oce_read_csr(struct oce_softc *sc, bus_size_t off)
734 {
735 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
736 	    BUS_SPACE_BARRIER_READ);
737 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
738 }
739 
740 static inline uint32_t
741 oce_read_db(struct oce_softc *sc, bus_size_t off)
742 {
743 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
744 	    BUS_SPACE_BARRIER_READ);
745 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
746 }
747 
748 static inline void
749 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
750 {
751 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
752 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
753 	    BUS_SPACE_BARRIER_WRITE);
754 }
755 
756 static inline void
757 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
758 {
759 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
760 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
761 	    BUS_SPACE_BARRIER_WRITE);
762 }
763 
764 static inline void
765 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
766 {
767 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
768 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
769 	    BUS_SPACE_BARRIER_WRITE);
770 }
771 
772 static inline void
773 oce_intr_enable(struct oce_softc *sc)
774 {
775 	uint32_t reg;
776 
777 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
778 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
779 }
780 
781 static inline void
782 oce_intr_disable(struct oce_softc *sc)
783 {
784 	uint32_t reg;
785 
786 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
787 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
788 }
789 
790 void
791 oce_attachhook(void *arg)
792 {
793 	struct oce_softc *sc = arg;
794 
795 	oce_get_link_status(sc);
796 
797 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
798 
799 	/*
800 	 * We need to get MCC async events. So enable intrs and arm
801 	 * first EQ, Other EQs will be armed after interface is UP
802 	 */
803 	oce_intr_enable(sc);
804 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
805 
806 	/*
807 	 * Send first mcc cmd and after that we get gracious
808 	 * MCC notifications from FW
809 	 */
810 	oce_first_mcc(sc);
811 }
812 
813 void
814 oce_attach_ifp(struct oce_softc *sc)
815 {
816 	struct ifnet *ifp = &sc->sc_ac.ac_if;
817 
818 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
819 	    oce_media_status);
820 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
821 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
822 
823 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
824 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
825 	ifp->if_ioctl = oce_ioctl;
826 	ifp->if_start = oce_start;
827 	ifp->if_watchdog = oce_watchdog;
828 	ifp->if_hardmtu = OCE_MAX_MTU;
829 	ifp->if_softc = sc;
830 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_size - 1);
831 	IFQ_SET_READY(&ifp->if_snd);
832 
833 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
834 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
835 
836 #if NVLAN > 0
837 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
838 #endif
839 
840 #ifdef OCE_TSO
841 	ifp->if_capabilities |= IFCAP_TSO;
842 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
843 #endif
844 #ifdef OCE_LRO
845 	ifp->if_capabilities |= IFCAP_LRO;
846 #endif
847 
848 	if_attach(ifp);
849 	ether_ifattach(ifp);
850 }
851 
852 int
853 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
854 {
855 	struct oce_softc *sc = ifp->if_softc;
856 	struct ifaddr *ifa = (struct ifaddr *)data;
857 	struct ifreq *ifr = (struct ifreq *)data;
858 	int s, error = 0;
859 
860 	s = splnet();
861 
862 	switch (command) {
863 	case SIOCSIFADDR:
864 		ifp->if_flags |= IFF_UP;
865 		if (!(ifp->if_flags & IFF_RUNNING))
866 			oce_init(sc);
867 #ifdef INET
868 		if (ifa->ifa_addr->sa_family == AF_INET)
869 			arp_ifinit(&sc->sc_ac, ifa);
870 #endif
871 		break;
872 	case SIOCSIFFLAGS:
873 		if (ifp->if_flags & IFF_UP) {
874 			if (ifp->if_flags & IFF_RUNNING)
875 				error = ENETRESET;
876 			else
877 				oce_init(sc);
878 		} else {
879 			if (ifp->if_flags & IFF_RUNNING)
880 				oce_stop(sc);
881 		}
882 		break;
883 	case SIOCSIFMTU:
884 		if (ifr->ifr_mtu < OCE_MIN_MTU || ifr->ifr_mtu > OCE_MAX_MTU)
885 			error = EINVAL;
886 		else if (ifp->if_mtu != ifr->ifr_mtu) {
887 			ifp->if_mtu = ifr->ifr_mtu;
888 			oce_init(sc);
889 		}
890 		break;
891 	case SIOCGIFMEDIA:
892 	case SIOCSIFMEDIA:
893 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
894 		break;
895 	default:
896 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
897 		break;
898 	}
899 
900 	if (error == ENETRESET) {
901 		if (ifp->if_flags & IFF_RUNNING)
902 			oce_iff(sc);
903 		error = 0;
904 	}
905 
906 	splx(s);
907 
908 	return (error);
909 }
910 
911 void
912 oce_iff(struct oce_softc *sc)
913 {
914 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
915 	struct arpcom *ac = &sc->sc_ac;
916 	struct ifnet *ifp = &ac->ac_if;
917 	struct ether_multi *enm;
918 	struct ether_multistep step;
919 	int naddr = 0, promisc = 0;
920 
921 	ifp->if_flags &= ~IFF_ALLMULTI;
922 
923 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
924 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
925 		ifp->if_flags |= IFF_ALLMULTI;
926 		promisc = 1;
927 	} else {
928 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
929 		while (enm != NULL) {
930 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
931 			ETHER_NEXT_MULTI(step, enm);
932 		}
933 		oce_update_mcast(sc, multi, naddr);
934 	}
935 
936 	oce_set_promisc(sc, promisc);
937 }
938 
939 void
940 oce_link_status(struct oce_softc *sc)
941 {
942 	struct ifnet *ifp = &sc->sc_ac.ac_if;
943 	int link_state = LINK_STATE_DOWN;
944 
945 	ifp->if_baudrate = 0;
946 	if (sc->sc_link_up) {
947 		link_state = LINK_STATE_FULL_DUPLEX;
948 
949 		switch (sc->sc_link_speed) {
950 		case 1:
951 			ifp->if_baudrate = IF_Mbps(10);
952 			break;
953 		case 2:
954 			ifp->if_baudrate = IF_Mbps(100);
955 			break;
956 		case 3:
957 			ifp->if_baudrate = IF_Gbps(1);
958 			break;
959 		case 4:
960 			ifp->if_baudrate = IF_Gbps(10);
961 			break;
962 		}
963 	}
964 	if (ifp->if_link_state != link_state) {
965 		ifp->if_link_state = link_state;
966 		if_link_state_change(ifp);
967 	}
968 }
969 
970 void
971 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
972 {
973 	struct oce_softc *sc = ifp->if_softc;
974 
975 	ifmr->ifm_status = IFM_AVALID;
976 	ifmr->ifm_active = IFM_ETHER;
977 
978 	if (oce_get_link_status(sc) == 0)
979 		oce_link_status(sc);
980 
981 	if (!sc->sc_link_up) {
982 		ifmr->ifm_active |= IFM_NONE;
983 		return;
984 	}
985 
986 	ifmr->ifm_status |= IFM_ACTIVE;
987 
988 	switch (sc->sc_link_speed) {
989 	case 1: /* 10 Mbps */
990 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
991 		break;
992 	case 2: /* 100 Mbps */
993 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
994 		break;
995 	case 3: /* 1 Gbps */
996 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
997 		break;
998 	case 4: /* 10 Gbps */
999 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1000 		break;
1001 	}
1002 
1003 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1004 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1005 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1006 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1007 }
1008 
1009 int
1010 oce_media_change(struct ifnet *ifp)
1011 {
1012 	return (0);
1013 }
1014 
1015 void
1016 oce_tick(void *arg)
1017 {
1018 	struct oce_softc *sc = arg;
1019 	int s;
1020 
1021 	s = splnet();
1022 
1023 	if (oce_update_stats(sc) == 0)
1024 		timeout_add_sec(&sc->sc_tick, 1);
1025 
1026 	splx(s);
1027 }
1028 
1029 void
1030 oce_init(void *arg)
1031 {
1032 	struct oce_softc *sc = arg;
1033 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1034 	struct oce_eq *eq;
1035 	struct oce_rq *rq;
1036 	struct oce_wq *wq;
1037 	int i;
1038 
1039 	oce_stop(sc);
1040 
1041 	DELAY(10);
1042 
1043 	oce_macaddr_set(sc);
1044 
1045 	oce_iff(sc);
1046 
1047 	/* Enable VLAN promiscuous mode */
1048 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1049 		goto error;
1050 
1051 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1052 		goto error;
1053 
1054 	OCE_RQ_FOREACH(sc, rq, i) {
1055 		rq->mtu = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1056 		    ETHER_VLAN_ENCAP_LEN;
1057 		if (oce_new_rq(sc, rq)) {
1058 			printf("%s: failed to create rq\n",
1059 			    sc->sc_dev.dv_xname);
1060 			goto error;
1061 		}
1062 		rq->ring->index	 = 0;
1063 
1064 		/* oce splits jumbos into 2k chunks... */
1065 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1066 
1067 		if (!oce_alloc_rx_bufs(rq)) {
1068 			printf("%s: failed to allocate rx buffers\n",
1069 			    sc->sc_dev.dv_xname);
1070 			goto error;
1071 		}
1072 	}
1073 
1074 #ifdef OCE_RSS
1075 	/* RSS config */
1076 	if (sc->sc_rss_enable) {
1077 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1078 			printf("%s: failed to configure RSS\n",
1079 			    sc->sc_dev.dv_xname);
1080 			goto error;
1081 		}
1082 	}
1083 #endif
1084 
1085 	OCE_RQ_FOREACH(sc, rq, i)
1086 		oce_arm_cq(rq->cq, 0, TRUE);
1087 
1088 	OCE_WQ_FOREACH(sc, wq, i)
1089 		oce_arm_cq(wq->cq, 0, TRUE);
1090 
1091 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1092 
1093 	OCE_EQ_FOREACH(sc, eq, i)
1094 		oce_arm_eq(eq, 0, TRUE, FALSE);
1095 
1096 	if (oce_get_link_status(sc) == 0)
1097 		oce_link_status(sc);
1098 
1099 	ifp->if_flags |= IFF_RUNNING;
1100 	ifp->if_flags &= ~IFF_OACTIVE;
1101 
1102 	timeout_add_sec(&sc->sc_tick, 1);
1103 
1104 	oce_intr_enable(sc);
1105 
1106 	return;
1107 error:
1108 	oce_stop(sc);
1109 }
1110 
1111 void
1112 oce_stop(struct oce_softc *sc)
1113 {
1114 	struct mbx_delete_nic_rq cmd;
1115 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1116 	struct oce_rq *rq;
1117 	struct oce_wq *wq;
1118 	struct oce_eq *eq;
1119 	int i;
1120 
1121 	timeout_del(&sc->sc_tick);
1122 	timeout_del(&sc->sc_rxrefill);
1123 
1124 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1125 
1126 	/* Stop intrs and finish any bottom halves pending */
1127 	oce_intr_disable(sc);
1128 
1129 	/* Invalidate any pending cq and eq entries */
1130 	OCE_EQ_FOREACH(sc, eq, i)
1131 		oce_drain_eq(eq);
1132 	OCE_RQ_FOREACH(sc, rq, i) {
1133 		/* destroy the work queue in the firmware */
1134 		memset(&cmd, 0, sizeof(cmd));
1135 		cmd.params.req.rq_id = htole16(rq->id);
1136 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1137 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1138 		DELAY(1000);
1139 		oce_drain_rq(rq);
1140 		oce_free_posted_rxbuf(rq);
1141 	}
1142 	OCE_WQ_FOREACH(sc, wq, i)
1143 		oce_drain_wq(wq);
1144 }
1145 
1146 void
1147 oce_watchdog(struct ifnet *ifp)
1148 {
1149 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1150 
1151 	oce_init(ifp->if_softc);
1152 
1153 	ifp->if_oerrors++;
1154 }
1155 
1156 void
1157 oce_start(struct ifnet *ifp)
1158 {
1159 	struct oce_softc *sc = ifp->if_softc;
1160 	struct mbuf *m;
1161 	int pkts = 0;
1162 
1163 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1164 		return;
1165 
1166 	for (;;) {
1167 		IFQ_DEQUEUE(&ifp->if_snd, m);
1168 		if (m == NULL)
1169 			break;
1170 
1171 		if (oce_encap(sc, &m, 0)) {
1172 			ifp->if_flags |= IFF_OACTIVE;
1173 			break;
1174 		}
1175 
1176 #if NBPFILTER > 0
1177 		if (ifp->if_bpf)
1178 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1179 #endif
1180 		pkts++;
1181 	}
1182 
1183 	/* Set a timeout in case the chip goes out to lunch */
1184 	if (pkts)
1185 		ifp->if_timer = 5;
1186 }
1187 
1188 int
1189 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1190 {
1191 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1192 	struct mbuf *m = *mpp;
1193 	struct oce_wq *wq = sc->sc_wq[wqidx];
1194 	struct oce_pkt *pkt = NULL;
1195 	struct oce_nic_hdr_wqe *nhe;
1196 	struct oce_nic_frag_wqe *nfe;
1197 	int i, nwqe, err;
1198 
1199 #ifdef OCE_TSO
1200 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1201 		/* consolidate packet buffers for TSO/LSO segment offload */
1202 		m = oce_tso(sc, mpp);
1203 		if (m == NULL)
1204 			goto error;
1205 	}
1206 #endif
1207 
1208 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1209 		goto error;
1210 
1211 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1212 	if (err == EFBIG) {
1213 		if (m_defrag(m, M_DONTWAIT) ||
1214 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1215 			BUS_DMA_NOWAIT))
1216 			goto error;
1217 		*mpp = m;
1218 	} else if (err != 0)
1219 		goto error;
1220 
1221 	pkt->nsegs = pkt->map->dm_nsegs;
1222 
1223 	nwqe = pkt->nsegs + 1;
1224 	if (IS_BE(sc)) {
1225 		/* BE2 and BE3 require even number of WQEs */
1226 		if (nwqe & 1)
1227 			nwqe++;
1228 	}
1229 
1230 	/* Fail if there's not enough free WQEs */
1231 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1232 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1233 		goto error;
1234 	}
1235 
1236 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1237 	    BUS_DMASYNC_PREWRITE);
1238 	pkt->mbuf = m;
1239 
1240 	/* TX work queue entry for the header */
1241 	nhe = oce_ring_get(wq->ring);
1242 	memset(nhe, 0, sizeof(*nhe));
1243 
1244 	nhe->u0.s.complete = 1;
1245 	nhe->u0.s.event = 1;
1246 	nhe->u0.s.crc = 1;
1247 	nhe->u0.s.forward = 0;
1248 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1249 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1250 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1251 	nhe->u0.s.num_wqe = nwqe;
1252 	nhe->u0.s.total_length = m->m_pkthdr.len;
1253 
1254 #if NVLAN > 0
1255 	if (m->m_flags & M_VLANTAG) {
1256 		nhe->u0.s.vlan = 1; /* Vlan present */
1257 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1258 	}
1259 #endif
1260 
1261 #ifdef OCE_TSO
1262 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1263 		if (m->m_pkthdr.tso_segsz) {
1264 			nhe->u0.s.lso = 1;
1265 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1266 		}
1267 		if (!IS_BE(sc))
1268 			nhe->u0.s.ipcs = 1;
1269 	}
1270 #endif
1271 
1272 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1273 	    BUS_DMASYNC_PREWRITE);
1274 
1275 	wq->ring->nused++;
1276 
1277 	/* TX work queue entries for data chunks */
1278 	for (i = 0; i < pkt->nsegs; i++) {
1279 		nfe = oce_ring_get(wq->ring);
1280 		memset(nfe, 0, sizeof(*nfe));
1281 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1282 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1283 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1284 		wq->ring->nused++;
1285 	}
1286 	if (nwqe > (pkt->nsegs + 1)) {
1287 		nfe = oce_ring_get(wq->ring);
1288 		memset(nfe, 0, sizeof(*nfe));
1289 		wq->ring->nused++;
1290 		pkt->nsegs++;
1291 	}
1292 
1293 	oce_pkt_put(&wq->pkt_list, pkt);
1294 
1295 	ifp->if_opackets++;
1296 
1297 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1298 	    BUS_DMASYNC_POSTWRITE);
1299 
1300 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1301 
1302 	return (0);
1303 
1304 error:
1305 	if (pkt)
1306 		oce_pkt_put(&wq->pkt_free, pkt);
1307 	m_freem(*mpp);
1308 	*mpp = NULL;
1309 	return (1);
1310 }
1311 
1312 #ifdef OCE_TSO
1313 struct mbuf *
1314 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1315 {
1316 	struct mbuf *m;
1317 #ifdef INET
1318 	struct ip *ip;
1319 #endif
1320 #ifdef INET6
1321 	struct ip6_hdr *ip6;
1322 #endif
1323 	struct ether_vlan_header *eh;
1324 	struct tcphdr *th;
1325 	uint16_t etype;
1326 	int total_len = 0, ehdrlen = 0;
1327 
1328 	m = *mpp;
1329 
1330 	if (M_WRITABLE(m) == 0) {
1331 		m = m_dup(*mpp, M_DONTWAIT);
1332 		if (!m)
1333 			return (NULL);
1334 		m_freem(*mpp);
1335 		*mpp = m;
1336 	}
1337 
1338 	eh = mtod(m, struct ether_vlan_header *);
1339 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1340 		etype = ntohs(eh->evl_proto);
1341 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1342 	} else {
1343 		etype = ntohs(eh->evl_encap_proto);
1344 		ehdrlen = ETHER_HDR_LEN;
1345 	}
1346 
1347 	switch (etype) {
1348 #ifdef INET
1349 	case ETHERTYPE_IP:
1350 		ip = (struct ip *)(m->m_data + ehdrlen);
1351 		if (ip->ip_p != IPPROTO_TCP)
1352 			return (NULL);
1353 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1354 
1355 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1356 		break;
1357 #endif
1358 #ifdef INET6
1359 	case ETHERTYPE_IPV6:
1360 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1361 		if (ip6->ip6_nxt != IPPROTO_TCP)
1362 			return NULL;
1363 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1364 
1365 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1366 		    (th->th_off << 2);
1367 		break;
1368 #endif
1369 	default:
1370 		return (NULL);
1371 	}
1372 
1373 	m = m_pullup(m, total_len);
1374 	if (!m)
1375 		return (NULL);
1376 	*mpp = m;
1377 	return (m);
1378 
1379 }
1380 #endif /* OCE_TSO */
1381 
1382 int
1383 oce_intr(void *arg)
1384 {
1385 	struct oce_softc *sc = arg;
1386 	struct oce_eq *eq = sc->sc_eq[0];
1387 	struct oce_eqe *eqe;
1388 	struct oce_cq *cq = NULL;
1389 	int i, neqe = 0;
1390 
1391 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1392 
1393 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1394 		eqe->evnt = 0;
1395 		neqe++;
1396 	}
1397 
1398 	/* Spurious? */
1399 	if (!neqe) {
1400 		oce_arm_eq(eq, 0, TRUE, FALSE);
1401 		return (0);
1402 	}
1403 
1404 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1405 
1406  	/* Clear EQ entries, but dont arm */
1407 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1408 
1409 	/* Process TX, RX and MCC completion queues */
1410 	for (i = 0; i < eq->cq_valid; i++) {
1411 		cq = eq->cq[i];
1412 		(*cq->cq_intr)(cq->cb_arg);
1413 		oce_arm_cq(cq, 0, TRUE);
1414 	}
1415 
1416 	oce_arm_eq(eq, 0, TRUE, FALSE);
1417 	return (1);
1418 }
1419 
1420 /* Handle the Completion Queue for transmit */
1421 void
1422 oce_intr_wq(void *arg)
1423 {
1424 	struct oce_wq *wq = (struct oce_wq *)arg;
1425 	struct oce_cq *cq = wq->cq;
1426 	struct oce_nic_tx_cqe *cqe;
1427 	struct oce_softc *sc = wq->sc;
1428 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1429 	int ncqe = 0;
1430 
1431 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1432 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1433 		oce_txeof(wq);
1434 		WQ_CQE_INVALIDATE(cqe);
1435 		ncqe++;
1436 	}
1437 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1438 
1439 	if (ifp->if_flags & IFF_OACTIVE) {
1440 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1441 			ifp->if_flags &= ~IFF_OACTIVE;
1442 			oce_start(ifp);
1443 		}
1444 	}
1445 	if (wq->ring->nused == 0)
1446 		ifp->if_timer = 0;
1447 
1448 	if (ncqe)
1449 		oce_arm_cq(cq, ncqe, FALSE);
1450 }
1451 
1452 void
1453 oce_txeof(struct oce_wq *wq)
1454 {
1455 	struct oce_softc *sc = wq->sc;
1456 	struct oce_pkt *pkt;
1457 	struct mbuf *m;
1458 
1459 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1460 		printf("%s: missing descriptor in txeof\n",
1461 		    sc->sc_dev.dv_xname);
1462 		return;
1463 	}
1464 
1465 	wq->ring->nused -= pkt->nsegs + 1;
1466 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1467 	    BUS_DMASYNC_POSTWRITE);
1468 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1469 
1470 	m = pkt->mbuf;
1471 	m_freem(m);
1472 	pkt->mbuf = NULL;
1473 	oce_pkt_put(&wq->pkt_free, pkt);
1474 }
1475 
1476 /* Handle the Completion Queue for receive */
1477 void
1478 oce_intr_rq(void *arg)
1479 {
1480 	struct oce_rq *rq = (struct oce_rq *)arg;
1481 	struct oce_cq *cq = rq->cq;
1482 	struct oce_softc *sc = rq->sc;
1483 	struct oce_nic_rx_cqe *cqe;
1484 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1485 	int maxrx, ncqe = 0;
1486 
1487 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1488 
1489 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1490 
1491 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1492 		if (cqe->u0.s.error == 0) {
1493 			if (cqe->u0.s.pkt_size == 0)
1494 				/* partial DMA workaround for Lancer */
1495 				oce_rxeoc(rq, cqe);
1496 			else
1497 				oce_rxeof(rq, cqe);
1498 		} else {
1499 			ifp->if_ierrors++;
1500 			if (IS_XE201(sc))
1501 				/* Lancer A0 no buffer workaround */
1502 				oce_rxeoc(rq, cqe);
1503 			else
1504 				/* Post L3/L4 errors to stack.*/
1505 				oce_rxeof(rq, cqe);
1506 		}
1507 #ifdef OCE_LRO
1508 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1509 			oce_flush_lro(rq);
1510 #endif
1511 		RQ_CQE_INVALIDATE(cqe);
1512 		ncqe++;
1513 	}
1514 
1515 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1516 
1517 #ifdef OCE_LRO
1518 	if (IF_LRO_ENABLED(ifp))
1519 		oce_flush_lro(rq);
1520 #endif
1521 
1522 	if (ncqe) {
1523 		oce_arm_cq(cq, ncqe, FALSE);
1524 		if (!oce_alloc_rx_bufs(rq))
1525 			timeout_add(&sc->sc_rxrefill, 1);
1526 	}
1527 }
1528 
1529 void
1530 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1531 {
1532 	struct oce_softc *sc = rq->sc;
1533 	struct oce_pkt *pkt = NULL;
1534 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1535 	struct mbuf *m = NULL, *tail = NULL;
1536 	int i, len, frag_len;
1537 	uint16_t vtag;
1538 
1539 	len = cqe->u0.s.pkt_size;
1540 
1541 	 /* Get vlan_tag value */
1542 	if (IS_BE(sc))
1543 		vtag = ntohs(cqe->u0.s.vlan_tag);
1544 	else
1545 		vtag = cqe->u0.s.vlan_tag;
1546 
1547 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1548 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1549 			printf("%s: missing descriptor in rxeof\n",
1550 			    sc->sc_dev.dv_xname);
1551 			goto exit;
1552 		}
1553 
1554 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1555 		    BUS_DMASYNC_POSTREAD);
1556 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1557 		if_rxr_put(&rq->rxring, 1);
1558 
1559 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1560 		pkt->mbuf->m_len = frag_len;
1561 
1562 		if (tail != NULL) {
1563 			/* additional fragments */
1564 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1565 			tail->m_next = pkt->mbuf;
1566 			tail = pkt->mbuf;
1567 		} else {
1568 			/* first fragment, fill out most of the header */
1569 			pkt->mbuf->m_pkthdr.len = len;
1570 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1571 			if (cqe->u0.s.ip_cksum_pass) {
1572 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1573 					pkt->mbuf->m_pkthdr.csum_flags =
1574 					    M_IPV4_CSUM_IN_OK;
1575 				}
1576 			}
1577 			if (cqe->u0.s.l4_cksum_pass) {
1578 				pkt->mbuf->m_pkthdr.csum_flags |=
1579 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1580 			}
1581 			m = tail = pkt->mbuf;
1582 		}
1583 		pkt->mbuf = NULL;
1584 		oce_pkt_put(&rq->pkt_free, pkt);
1585 		len -= frag_len;
1586 	}
1587 
1588 	if (m) {
1589 		if (!oce_port_valid(sc, cqe)) {
1590 			 m_freem(m);
1591 			 goto exit;
1592 		}
1593 
1594 		m->m_pkthdr.rcvif = ifp;
1595 
1596 #if NVLAN > 0
1597 		/* This determines if vlan tag is valid */
1598 		if (oce_vtp_valid(sc, cqe)) {
1599 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1600 				/* FLEX10. If QnQ is not set, neglect VLAN */
1601 				if (cqe->u0.s.qnq) {
1602 					m->m_pkthdr.ether_vtag = vtag;
1603 					m->m_flags |= M_VLANTAG;
1604 				}
1605 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1606 				/*
1607 				 * In UMC mode generally pvid will be striped.
1608 				 * But in some cases we have seen it comes
1609 				 * with pvid. So if pvid == vlan, neglect vlan.
1610 				 */
1611 				m->m_pkthdr.ether_vtag = vtag;
1612 				m->m_flags |= M_VLANTAG;
1613 			}
1614 		}
1615 #endif
1616 
1617 		ifp->if_ipackets++;
1618 
1619 #ifdef OCE_LRO
1620 		/* Try to queue to LRO */
1621 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1622 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1623 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1624 
1625 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1626 				rq->lro_pkts_queued ++;
1627 				goto exit;
1628 			}
1629 			/* If LRO posting fails then try to post to STACK */
1630 		}
1631 #endif
1632 
1633 #if NBPFILTER > 0
1634 		if (ifp->if_bpf)
1635 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1636 #endif
1637 
1638 		ether_input_mbuf(ifp, m);
1639 	}
1640 exit:
1641 	return;
1642 }
1643 
1644 void
1645 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1646 {
1647 	struct oce_softc *sc = rq->sc;
1648 	struct oce_pkt *pkt;
1649 	int i, num_frags = cqe->u0.s.num_fragments;
1650 
1651 	if (IS_XE201(sc) && cqe->u0.s.error) {
1652 		/*
1653 		 * Lancer A0 workaround:
1654 		 * num_frags will be 1 more than actual in case of error
1655 		 */
1656 		if (num_frags)
1657 			num_frags--;
1658 	}
1659 	for (i = 0; i < num_frags; i++) {
1660 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1661 			printf("%s: missing descriptor in rxeoc\n",
1662 			    sc->sc_dev.dv_xname);
1663 			return;
1664 		}
1665 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1666 		    BUS_DMASYNC_POSTREAD);
1667 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1668 		if_rxr_put(&rq->rxring, 1);
1669 		m_freem(pkt->mbuf);
1670 		oce_pkt_put(&rq->pkt_free, pkt);
1671 	}
1672 }
1673 
1674 int
1675 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1676 {
1677 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1678 
1679 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1680 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1681 		return (cqe_v1->u0.s.vlan_tag_present);
1682 	}
1683 	return (cqe->u0.s.vlan_tag_present);
1684 }
1685 
1686 int
1687 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1688 {
1689 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1690 
1691 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1692 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1693 		if (sc->sc_port != cqe_v1->u0.s.port)
1694 			return (0);
1695 	}
1696 	return (1);
1697 }
1698 
1699 #ifdef OCE_LRO
1700 void
1701 oce_flush_lro(struct oce_rq *rq)
1702 {
1703 	struct oce_softc *sc = rq->sc;
1704 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1705 	struct lro_ctrl	*lro = &rq->lro;
1706 	struct lro_entry *queued;
1707 
1708 	if (!IF_LRO_ENABLED(ifp))
1709 		return;
1710 
1711 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1712 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1713 		tcp_lro_flush(lro, queued);
1714 	}
1715 	rq->lro_pkts_queued = 0;
1716 }
1717 
1718 int
1719 oce_init_lro(struct oce_softc *sc)
1720 {
1721 	struct lro_ctrl *lro = NULL;
1722 	int i = 0, rc = 0;
1723 
1724 	for (i = 0; i < sc->sc_nrq; i++) {
1725 		lro = &sc->sc_rq[i]->lro;
1726 		rc = tcp_lro_init(lro);
1727 		if (rc != 0) {
1728 			printf("%s: LRO init failed\n",
1729 			    sc->sc_dev.dv_xname);
1730 			return rc;
1731 		}
1732 		lro->ifp = &sc->sc_ac.ac_if;
1733 	}
1734 
1735 	return (rc);
1736 }
1737 
1738 void
1739 oce_free_lro(struct oce_softc *sc)
1740 {
1741 	struct lro_ctrl *lro = NULL;
1742 	int i = 0;
1743 
1744 	for (i = 0; i < sc->sc_nrq; i++) {
1745 		lro = &sc->sc_rq[i]->lro;
1746 		if (lro)
1747 			tcp_lro_free(lro);
1748 	}
1749 }
1750 #endif /* OCE_LRO */
1751 
1752 int
1753 oce_get_buf(struct oce_rq *rq)
1754 {
1755 	struct oce_softc *sc = rq->sc;
1756 	struct oce_pkt *pkt;
1757 	struct oce_nic_rqe *rqe;
1758 
1759 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1760 		return (0);
1761 
1762 	pkt->mbuf = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1763 	if (pkt->mbuf == NULL) {
1764 		oce_pkt_put(&rq->pkt_free, pkt);
1765 		return (0);
1766 	}
1767 
1768 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1769 	m_adj(pkt->mbuf, ETHER_ALIGN);
1770 
1771 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1772 	    BUS_DMA_NOWAIT)) {
1773 		m_freem(pkt->mbuf);
1774 		pkt->mbuf = NULL;
1775 		oce_pkt_put(&rq->pkt_free, pkt);
1776 		return (0);
1777 	}
1778 
1779 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1780 	    BUS_DMASYNC_PREREAD);
1781 
1782 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1783 	    BUS_DMASYNC_PREWRITE);
1784 
1785 	rqe = oce_ring_get(rq->ring);
1786 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1787 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1788 
1789 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1790 	    BUS_DMASYNC_POSTWRITE);
1791 
1792 	oce_pkt_put(&rq->pkt_list, pkt);
1793 
1794 	return (1);
1795 }
1796 
1797 int
1798 oce_alloc_rx_bufs(struct oce_rq *rq)
1799 {
1800 	struct oce_softc *sc = rq->sc;
1801 	int i, nbufs = 0;
1802 	u_int slots;
1803 
1804 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1805 		if (oce_get_buf(rq) == 0)
1806 			break;
1807 
1808 		nbufs++;
1809 	}
1810 	if_rxr_put(&rq->rxring, slots);
1811 
1812 	if (!nbufs)
1813 		return (0);
1814 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1815 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1816 		    (OCE_MAX_RQ_POSTS << 24));
1817 		nbufs -= OCE_MAX_RQ_POSTS;
1818 	}
1819 	if (nbufs > 0)
1820 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1821 	return (1);
1822 }
1823 
1824 void
1825 oce_refill_rx(void *arg)
1826 {
1827 	struct oce_softc *sc = arg;
1828 	struct oce_rq *rq;
1829 	int i, s;
1830 
1831 	s = splnet();
1832 	OCE_RQ_FOREACH(sc, rq, i) {
1833 		if (!oce_alloc_rx_bufs(rq))
1834 			timeout_add(&sc->sc_rxrefill, 5);
1835 	}
1836 	splx(s);
1837 }
1838 
1839 /* Handle the Completion Queue for the Mailbox/Async notifications */
1840 void
1841 oce_intr_mq(void *arg)
1842 {
1843 	struct oce_mq *mq = (struct oce_mq *)arg;
1844 	struct oce_softc *sc = mq->sc;
1845 	struct oce_cq *cq = mq->cq;
1846 	struct oce_mq_cqe *cqe;
1847 	struct oce_async_cqe_link_state *acqe;
1848 	struct oce_async_event_grp5_pvid_state *gcqe;
1849 	int evtype, optype, ncqe = 0;
1850 
1851 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1852 
1853 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1854 		if (cqe->u0.s.async_event) {
1855 			evtype = cqe->u0.s.event_type;
1856 			optype = cqe->u0.s.async_type;
1857 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1858 				/* Link status evt */
1859 				acqe = (struct oce_async_cqe_link_state *)cqe;
1860 				oce_link_event(sc, acqe);
1861 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1862 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1863 				/* GRP5 PVID */
1864 				gcqe =
1865 				(struct oce_async_event_grp5_pvid_state *)cqe;
1866 				if (gcqe->enabled)
1867 					sc->sc_pvid =
1868 					    gcqe->tag & VLAN_VID_MASK;
1869 				else
1870 					sc->sc_pvid = 0;
1871 			}
1872 		}
1873 		MQ_CQE_INVALIDATE(cqe);
1874 		ncqe++;
1875 	}
1876 
1877 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1878 
1879 	if (ncqe)
1880 		oce_arm_cq(cq, ncqe, FALSE);
1881 }
1882 
1883 void
1884 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1885 {
1886 	/* Update Link status */
1887 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1888 	    ASYNC_EVENT_LINK_UP);
1889 	/* Update speed */
1890 	sc->sc_link_speed = acqe->u0.s.speed;
1891 	oce_link_status(sc);
1892 }
1893 
1894 int
1895 oce_init_queues(struct oce_softc *sc)
1896 {
1897 	struct oce_wq *wq;
1898 	struct oce_rq *rq;
1899 	int i;
1900 
1901 	sc->sc_nrq = 1;
1902 	sc->sc_nwq = 1;
1903 
1904 	/* Create network interface on card */
1905 	if (oce_create_iface(sc, sc->sc_macaddr))
1906 		goto error;
1907 
1908 	/* create all of the event queues */
1909 	for (i = 0; i < sc->sc_nintr; i++) {
1910 		sc->sc_eq[i] = oce_create_eq(sc);
1911 		if (!sc->sc_eq[i])
1912 			goto error;
1913 	}
1914 
1915 	/* alloc tx queues */
1916 	OCE_WQ_FOREACH(sc, wq, i) {
1917 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1918 		if (!sc->sc_wq[i])
1919 			goto error;
1920 	}
1921 
1922 	/* alloc rx queues */
1923 	OCE_RQ_FOREACH(sc, rq, i) {
1924 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1925 		    i > 0 ? sc->sc_rss_enable : 0);
1926 		if (!sc->sc_rq[i])
1927 			goto error;
1928 	}
1929 
1930 	/* alloc mailbox queue */
1931 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1932 	if (!sc->sc_mq)
1933 		goto error;
1934 
1935 	return (0);
1936 error:
1937 	oce_release_queues(sc);
1938 	return (1);
1939 }
1940 
1941 void
1942 oce_release_queues(struct oce_softc *sc)
1943 {
1944 	struct oce_wq *wq;
1945 	struct oce_rq *rq;
1946 	struct oce_eq *eq;
1947 	int i;
1948 
1949 	OCE_RQ_FOREACH(sc, rq, i) {
1950 		if (rq)
1951 			oce_destroy_rq(sc->sc_rq[i]);
1952 	}
1953 
1954 	OCE_WQ_FOREACH(sc, wq, i) {
1955 		if (wq)
1956 			oce_destroy_wq(sc->sc_wq[i]);
1957 	}
1958 
1959 	if (sc->sc_mq)
1960 		oce_destroy_mq(sc->sc_mq);
1961 
1962 	OCE_EQ_FOREACH(sc, eq, i) {
1963 		if (eq)
1964 			oce_destroy_eq(sc->sc_eq[i]);
1965 	}
1966 }
1967 
1968 /**
1969  * @brief 		Function to create a WQ for NIC Tx
1970  * @param sc 		software handle to the device
1971  * @returns		the pointer to the WQ created or NULL on failure
1972  */
1973 struct oce_wq *
1974 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1975 {
1976 	struct oce_wq *wq;
1977 	struct oce_cq *cq;
1978 	struct oce_pkt *pkt;
1979 	int i;
1980 
1981 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1982 		return (NULL);
1983 
1984 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1985 	if (!wq)
1986 		return (NULL);
1987 
1988 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1989 	if (!wq->ring) {
1990 		free(wq, M_DEVBUF, 0);
1991 		return (NULL);
1992 	}
1993 
1994 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1995 	    1, 0, 3);
1996 	if (!cq) {
1997 		oce_destroy_ring(sc, wq->ring);
1998 		free(wq, M_DEVBUF, 0);
1999 		return (NULL);
2000 	}
2001 
2002 	wq->id = -1;
2003 	wq->sc = sc;
2004 
2005 	wq->cq = cq;
2006 	wq->nitems = sc->sc_tx_ring_size;
2007 
2008 	SIMPLEQ_INIT(&wq->pkt_free);
2009 	SIMPLEQ_INIT(&wq->pkt_list);
2010 
2011 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2012 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2013 		    PAGE_SIZE);
2014 		if (pkt == NULL) {
2015 			oce_destroy_wq(wq);
2016 			return (NULL);
2017 		}
2018 		oce_pkt_put(&wq->pkt_free, pkt);
2019 	}
2020 
2021 	if (oce_new_wq(sc, wq)) {
2022 		oce_destroy_wq(wq);
2023 		return (NULL);
2024 	}
2025 
2026 	eq->cq[eq->cq_valid] = cq;
2027 	eq->cq_valid++;
2028 	cq->cb_arg = wq;
2029 	cq->cq_intr = oce_intr_wq;
2030 
2031 	return (wq);
2032 }
2033 
2034 void
2035 oce_drain_wq(struct oce_wq *wq)
2036 {
2037 	struct oce_cq *cq = wq->cq;
2038 	struct oce_nic_tx_cqe *cqe;
2039 	int ncqe = 0;
2040 
2041 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2042 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2043 		WQ_CQE_INVALIDATE(cqe);
2044 		ncqe++;
2045 	}
2046 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2047 	oce_arm_cq(cq, ncqe, FALSE);
2048 }
2049 
2050 void
2051 oce_destroy_wq(struct oce_wq *wq)
2052 {
2053 	struct mbx_delete_nic_wq cmd;
2054 	struct oce_softc *sc = wq->sc;
2055 	struct oce_pkt *pkt;
2056 
2057 	if (wq->id >= 0) {
2058 		memset(&cmd, 0, sizeof(cmd));
2059 		cmd.params.req.wq_id = htole16(wq->id);
2060 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2061 		    &cmd, sizeof(cmd));
2062 	}
2063 	if (wq->cq != NULL)
2064 		oce_destroy_cq(wq->cq);
2065 	if (wq->ring != NULL)
2066 		oce_destroy_ring(sc, wq->ring);
2067 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2068 		oce_pkt_free(sc, pkt);
2069 	free(wq, M_DEVBUF, 0);
2070 }
2071 
2072 /**
2073  * @brief 		function to allocate receive queue resources
2074  * @param sc		software handle to the device
2075  * @param eq		pointer to associated event queue
2076  * @param rss		is-rss-queue flag
2077  * @returns		the pointer to the RQ created or NULL on failure
2078  */
2079 struct oce_rq *
2080 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2081 {
2082 	struct oce_rq *rq;
2083 	struct oce_cq *cq;
2084 	struct oce_pkt *pkt;
2085 	int i;
2086 
2087 	/* Hardware doesn't support any other value */
2088 	if (sc->sc_rx_ring_size != 1024)
2089 		return (NULL);
2090 
2091 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2092 	if (!rq)
2093 		return (NULL);
2094 
2095 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2096 	    sizeof(struct oce_nic_rqe), 2);
2097 	if (!rq->ring) {
2098 		free(rq, M_DEVBUF, 0);
2099 		return (NULL);
2100 	}
2101 
2102 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2103 	    1, 0, 3);
2104 	if (!cq) {
2105 		oce_destroy_ring(sc, rq->ring);
2106 		free(rq, M_DEVBUF, 0);
2107 		return (NULL);
2108 	}
2109 
2110 	rq->id = -1;
2111 	rq->sc = sc;
2112 
2113 	rq->nitems = sc->sc_rx_ring_size;
2114 	rq->fragsize = OCE_RX_BUF_SIZE;
2115 	rq->rss = rss;
2116 
2117 	SIMPLEQ_INIT(&rq->pkt_free);
2118 	SIMPLEQ_INIT(&rq->pkt_list);
2119 
2120 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2121 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2122 		if (pkt == NULL) {
2123 			oce_destroy_rq(rq);
2124 			return (NULL);
2125 		}
2126 		oce_pkt_put(&rq->pkt_free, pkt);
2127 	}
2128 
2129 	rq->cq = cq;
2130 	eq->cq[eq->cq_valid] = cq;
2131 	eq->cq_valid++;
2132 	cq->cb_arg = rq;
2133 	cq->cq_intr = oce_intr_rq;
2134 
2135 	/* RX queue is created in oce_init */
2136 
2137 	return (rq);
2138 }
2139 
2140 void
2141 oce_drain_rq(struct oce_rq *rq)
2142 {
2143 	struct oce_nic_rx_cqe *cqe;
2144 	struct oce_cq *cq = rq->cq;
2145 	int ncqe = 0;
2146 
2147 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2148 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2149 		RQ_CQE_INVALIDATE(cqe);
2150 		ncqe++;
2151 	}
2152 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2153 	oce_arm_cq(cq, ncqe, FALSE);
2154 }
2155 
2156 void
2157 oce_destroy_rq(struct oce_rq *rq)
2158 {
2159 	struct mbx_delete_nic_rq cmd;
2160 	struct oce_softc *sc = rq->sc;
2161 	struct oce_pkt *pkt;
2162 
2163 	if (rq->id >= 0) {
2164 		memset(&cmd, 0, sizeof(cmd));
2165 		cmd.params.req.rq_id = htole16(rq->id);
2166 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2167 		    &cmd, sizeof(cmd));
2168 	}
2169 	if (rq->cq != NULL)
2170 		oce_destroy_cq(rq->cq);
2171 	if (rq->ring != NULL)
2172 		oce_destroy_ring(sc, rq->ring);
2173 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2174 		oce_pkt_free(sc, pkt);
2175 	free(rq, M_DEVBUF, 0);
2176 }
2177 
2178 struct oce_eq *
2179 oce_create_eq(struct oce_softc *sc)
2180 {
2181 	struct oce_eq *eq;
2182 
2183 	/* allocate an eq */
2184 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2185 	if (eq == NULL)
2186 		return (NULL);
2187 
2188 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2189 	if (!eq->ring) {
2190 		free(eq, M_DEVBUF, 0);
2191 		return (NULL);
2192 	}
2193 
2194 	eq->id = -1;
2195 	eq->sc = sc;
2196 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2197 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2198 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2199 
2200 	if (oce_new_eq(sc, eq)) {
2201 		oce_destroy_ring(sc, eq->ring);
2202 		free(eq, M_DEVBUF, 0);
2203 		return (NULL);
2204 	}
2205 
2206 	return (eq);
2207 }
2208 
2209 /**
2210  * @brief		Function to arm an EQ so that it can generate events
2211  * @param eq		pointer to event queue structure
2212  * @param neqe		number of EQEs to arm
2213  * @param rearm		rearm bit enable/disable
2214  * @param clearint	bit to clear the interrupt condition because of which
2215  *			EQEs are generated
2216  */
2217 static inline void
2218 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2219 {
2220 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2221 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2222 }
2223 
2224 void
2225 oce_drain_eq(struct oce_eq *eq)
2226 {
2227 	struct oce_eqe *eqe;
2228 	int neqe = 0;
2229 
2230 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2231 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2232 		eqe->evnt = 0;
2233 		neqe++;
2234 	}
2235 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2236 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2237 }
2238 
2239 void
2240 oce_destroy_eq(struct oce_eq *eq)
2241 {
2242 	struct mbx_destroy_common_eq cmd;
2243 	struct oce_softc *sc = eq->sc;
2244 
2245 	if (eq->id >= 0) {
2246 		memset(&cmd, 0, sizeof(cmd));
2247 		cmd.params.req.id = htole16(eq->id);
2248 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2249 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2250 	}
2251 	if (eq->ring != NULL)
2252 		oce_destroy_ring(sc, eq->ring);
2253 	free(eq, M_DEVBUF, 0);
2254 }
2255 
2256 struct oce_mq *
2257 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2258 {
2259 	struct oce_mq *mq = NULL;
2260 	struct oce_cq *cq;
2261 
2262 	/* allocate the mq */
2263 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2264 	if (!mq)
2265 		return (NULL);
2266 
2267 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2268 	if (!mq->ring) {
2269 		free(mq, M_DEVBUF, 0);
2270 		return (NULL);
2271 	}
2272 
2273 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2274 	    1, 0, 0);
2275 	if (!cq) {
2276 		oce_destroy_ring(sc, mq->ring);
2277 		free(mq, M_DEVBUF, 0);
2278 		return (NULL);
2279 	}
2280 
2281 	mq->id = -1;
2282 	mq->sc = sc;
2283 	mq->cq = cq;
2284 
2285 	mq->nitems = 128;
2286 
2287 	if (oce_new_mq(sc, mq)) {
2288 		oce_destroy_cq(mq->cq);
2289 		oce_destroy_ring(sc, mq->ring);
2290 		free(mq, M_DEVBUF, 0);
2291 		return (NULL);
2292 	}
2293 
2294 	eq->cq[eq->cq_valid] = cq;
2295 	eq->cq_valid++;
2296 	mq->cq->eq = eq;
2297 	mq->cq->cb_arg = mq;
2298 	mq->cq->cq_intr = oce_intr_mq;
2299 
2300 	return (mq);
2301 }
2302 
2303 void
2304 oce_drain_mq(struct oce_mq *mq)
2305 {
2306 	struct oce_cq *cq = mq->cq;
2307 	struct oce_mq_cqe *cqe;
2308 	int ncqe = 0;
2309 
2310 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2311 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2312 		MQ_CQE_INVALIDATE(cqe);
2313 		ncqe++;
2314 	}
2315 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2316 	oce_arm_cq(cq, ncqe, FALSE);
2317 }
2318 
2319 void
2320 oce_destroy_mq(struct oce_mq *mq)
2321 {
2322 	struct mbx_destroy_common_mq cmd;
2323 	struct oce_softc *sc = mq->sc;
2324 
2325 	if (mq->id >= 0) {
2326 		memset(&cmd, 0, sizeof(cmd));
2327 		cmd.params.req.id = htole16(mq->id);
2328 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2329 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2330 	}
2331 	if (mq->ring != NULL)
2332 		oce_destroy_ring(sc, mq->ring);
2333 	if (mq->cq != NULL)
2334 		oce_destroy_cq(mq->cq);
2335 	free(mq, M_DEVBUF, 0);
2336 }
2337 
2338 /**
2339  * @brief		Function to create a completion queue
2340  * @param sc		software handle to the device
2341  * @param eq		optional eq to be associated with to the cq
2342  * @param nitems	length of completion queue
2343  * @param isize		size of completion queue items
2344  * @param eventable	event table
2345  * @param nodelay	no delay flag
2346  * @param ncoalesce	no coalescence flag
2347  * @returns 		pointer to the cq created, NULL on failure
2348  */
2349 struct oce_cq *
2350 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2351     int eventable, int nodelay, int ncoalesce)
2352 {
2353 	struct oce_cq *cq = NULL;
2354 
2355 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2356 	if (!cq)
2357 		return (NULL);
2358 
2359 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2360 	if (!cq->ring) {
2361 		free(cq, M_DEVBUF, 0);
2362 		return (NULL);
2363 	}
2364 
2365 	cq->sc = sc;
2366 	cq->eq = eq;
2367 	cq->nitems = nitems;
2368 	cq->nodelay = nodelay;
2369 	cq->ncoalesce = ncoalesce;
2370 	cq->eventable = eventable;
2371 
2372 	if (oce_new_cq(sc, cq)) {
2373 		oce_destroy_ring(sc, cq->ring);
2374 		free(cq, M_DEVBUF, 0);
2375 		return (NULL);
2376 	}
2377 
2378 	sc->sc_cq[sc->sc_ncq++] = cq;
2379 
2380 	return (cq);
2381 }
2382 
2383 void
2384 oce_destroy_cq(struct oce_cq *cq)
2385 {
2386 	struct mbx_destroy_common_cq cmd;
2387 	struct oce_softc *sc = cq->sc;
2388 
2389 	if (cq->id >= 0) {
2390 		memset(&cmd, 0, sizeof(cmd));
2391 		cmd.params.req.id = htole16(cq->id);
2392 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2393 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2394 	}
2395 	if (cq->ring != NULL)
2396 		oce_destroy_ring(sc, cq->ring);
2397 	free(cq, M_DEVBUF, 0);
2398 }
2399 
2400 /**
2401  * @brief		Function to arm a CQ with CQEs
2402  * @param cq		pointer to the completion queue structure
2403  * @param ncqe		number of CQEs to arm
2404  * @param rearm		rearm bit enable/disable
2405  */
2406 static inline void
2407 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2408 {
2409 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2410 }
2411 
2412 void
2413 oce_free_posted_rxbuf(struct oce_rq *rq)
2414 {
2415 	struct oce_softc *sc = rq->sc;
2416 	struct oce_pkt *pkt;
2417 
2418 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2419 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2420 		    BUS_DMASYNC_POSTREAD);
2421 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2422 		if (pkt->mbuf != NULL) {
2423 			m_freem(pkt->mbuf);
2424 			pkt->mbuf = NULL;
2425 		}
2426 		oce_pkt_put(&rq->pkt_free, pkt);
2427 		if_rxr_put(&rq->rxring, 1);
2428 	}
2429 }
2430 
2431 int
2432 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2433 {
2434 	int rc;
2435 
2436 	memset(dma, 0, sizeof(struct oce_dma_mem));
2437 
2438 	dma->tag = sc->sc_dmat;
2439 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2440 	    &dma->map);
2441 	if (rc != 0) {
2442 		printf("%s: failed to allocate DMA handle",
2443 		    sc->sc_dev.dv_xname);
2444 		goto fail_0;
2445 	}
2446 
2447 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2448 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2449 	if (rc != 0) {
2450 		printf("%s: failed to allocate DMA memory",
2451 		    sc->sc_dev.dv_xname);
2452 		goto fail_1;
2453 	}
2454 
2455 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2456 	    &dma->vaddr, BUS_DMA_NOWAIT);
2457 	if (rc != 0) {
2458 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2459 		goto fail_2;
2460 	}
2461 
2462 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2463 	    BUS_DMA_NOWAIT);
2464 	if (rc != 0) {
2465 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2466 		goto fail_3;
2467 	}
2468 
2469 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2470 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2471 
2472 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2473 	dma->size = size;
2474 
2475 	return (0);
2476 
2477 fail_3:
2478 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2479 fail_2:
2480 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2481 fail_1:
2482 	bus_dmamap_destroy(dma->tag, dma->map);
2483 fail_0:
2484 	return (rc);
2485 }
2486 
2487 void
2488 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2489 {
2490 	if (dma->tag == NULL)
2491 		return;
2492 
2493 	if (dma->map != NULL) {
2494 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2495 		bus_dmamap_unload(dma->tag, dma->map);
2496 
2497 		if (dma->vaddr != 0) {
2498 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2499 			dma->vaddr = 0;
2500 		}
2501 
2502 		bus_dmamap_destroy(dma->tag, dma->map);
2503 		dma->map = NULL;
2504 		dma->tag = NULL;
2505 	}
2506 }
2507 
2508 struct oce_ring *
2509 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2510 {
2511 	struct oce_dma_mem *dma;
2512 	struct oce_ring *ring;
2513 	bus_size_t size = nitems * isize;
2514 	int rc;
2515 
2516 	if (size > maxsegs * PAGE_SIZE)
2517 		return (NULL);
2518 
2519 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2520 	if (ring == NULL)
2521 		return (NULL);
2522 
2523 	ring->isize = isize;
2524 	ring->nitems = nitems;
2525 
2526 	dma = &ring->dma;
2527 	dma->tag = sc->sc_dmat;
2528 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2529 	    BUS_DMA_NOWAIT, &dma->map);
2530 	if (rc != 0) {
2531 		printf("%s: failed to allocate DMA handle",
2532 		    sc->sc_dev.dv_xname);
2533 		goto fail_0;
2534 	}
2535 
2536 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2537 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2538 	if (rc != 0) {
2539 		printf("%s: failed to allocate DMA memory",
2540 		    sc->sc_dev.dv_xname);
2541 		goto fail_1;
2542 	}
2543 
2544 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2545 	    &dma->vaddr, BUS_DMA_NOWAIT);
2546 	if (rc != 0) {
2547 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2548 		goto fail_2;
2549 	}
2550 
2551 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2552 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2553 
2554 	dma->paddr = 0;
2555 	dma->size = size;
2556 
2557 	return (ring);
2558 
2559 fail_2:
2560 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2561 fail_1:
2562 	bus_dmamap_destroy(dma->tag, dma->map);
2563 fail_0:
2564 	free(ring, M_DEVBUF, 0);
2565 	return (NULL);
2566 }
2567 
2568 void
2569 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2570 {
2571 	oce_dma_free(sc, &ring->dma);
2572 	free(ring, M_DEVBUF, 0);
2573 }
2574 
2575 int
2576 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2577     struct oce_pa *pa, int maxsegs)
2578 {
2579 	struct oce_dma_mem *dma = &ring->dma;
2580 	int i;
2581 
2582 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2583 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2584 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2585 		return (0);
2586 	}
2587 
2588 	if (dma->map->dm_nsegs > maxsegs) {
2589 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2590 		return (0);
2591 	}
2592 
2593 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2594 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2595 
2596 	for (i = 0; i < dma->map->dm_nsegs; i++)
2597 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2598 
2599 	return (dma->map->dm_nsegs);
2600 }
2601 
2602 static inline void *
2603 oce_ring_get(struct oce_ring *ring)
2604 {
2605 	int index = ring->index;
2606 
2607 	if (++ring->index == ring->nitems)
2608 		ring->index = 0;
2609 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2610 }
2611 
2612 static inline void *
2613 oce_ring_first(struct oce_ring *ring)
2614 {
2615 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2616 }
2617 
2618 static inline void *
2619 oce_ring_next(struct oce_ring *ring)
2620 {
2621 	if (++ring->index == ring->nitems)
2622 		ring->index = 0;
2623 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2624 }
2625 
2626 struct oce_pkt *
2627 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2628 {
2629 	struct oce_pkt *pkt;
2630 
2631 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2632 		return (NULL);
2633 
2634 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2635 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2636 		pool_put(oce_pkt_pool, pkt);
2637 		return (NULL);
2638 	}
2639 
2640 	return (pkt);
2641 }
2642 
2643 void
2644 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2645 {
2646 	if (pkt->map) {
2647 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2648 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2649 	}
2650 	pool_put(oce_pkt_pool, pkt);
2651 }
2652 
2653 static inline struct oce_pkt *
2654 oce_pkt_get(struct oce_pkt_list *lst)
2655 {
2656 	struct oce_pkt *pkt;
2657 
2658 	pkt = SIMPLEQ_FIRST(lst);
2659 	if (pkt == NULL)
2660 		return (NULL);
2661 
2662 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2663 
2664 	return (pkt);
2665 }
2666 
2667 static inline void
2668 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2669 {
2670 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2671 }
2672 
2673 /**
2674  * @brief Wait for FW to become ready and reset it
2675  * @param sc		software handle to the device
2676  */
2677 int
2678 oce_init_fw(struct oce_softc *sc)
2679 {
2680 	struct ioctl_common_function_reset cmd;
2681 	uint32_t reg;
2682 	int err = 0, tmo = 60000;
2683 
2684 	/* read semaphore CSR */
2685 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2686 
2687 	/* if host is ready then wait for fw ready else send POST */
2688 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2689 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2690 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2691 	}
2692 
2693 	/* wait for FW to become ready */
2694 	for (;;) {
2695 		if (--tmo == 0)
2696 			break;
2697 
2698 		DELAY(1000);
2699 
2700 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2701 		if (reg & MPU_EP_SEM_ERROR) {
2702 			printf(": POST failed: %#x\n", reg);
2703 			return (ENXIO);
2704 		}
2705 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2706 			/* reset FW */
2707 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2708 				memset(&cmd, 0, sizeof(cmd));
2709 				err = oce_cmd(sc, SUBSYS_COMMON,
2710 				    OPCODE_COMMON_FUNCTION_RESET,
2711 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2712 			}
2713 			return (err);
2714 		}
2715 	}
2716 
2717 	printf(": POST timed out: %#x\n", reg);
2718 
2719 	return (ENXIO);
2720 }
2721 
2722 static inline int
2723 oce_mbox_wait(struct oce_softc *sc)
2724 {
2725 	int i;
2726 
2727 	for (i = 0; i < 20000; i++) {
2728 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2729 			return (0);
2730 		DELAY(100);
2731 	}
2732 	return (ETIMEDOUT);
2733 }
2734 
2735 /**
2736  * @brief Mailbox dispatch
2737  * @param sc		software handle to the device
2738  */
2739 int
2740 oce_mbox_dispatch(struct oce_softc *sc)
2741 {
2742 	uint32_t pa, reg;
2743 	int err;
2744 
2745 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2746 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2747 
2748 	if ((err = oce_mbox_wait(sc)) != 0)
2749 		goto out;
2750 
2751 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2752 
2753 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2754 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2755 
2756 	if ((err = oce_mbox_wait(sc)) != 0)
2757 		goto out;
2758 
2759 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2760 
2761 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2762 
2763 	if ((err = oce_mbox_wait(sc)) != 0)
2764 		goto out;
2765 
2766 out:
2767 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2768 	return (err);
2769 }
2770 
2771 /**
2772  * @brief Function to initialize the hw with host endian information
2773  * @param sc		software handle to the device
2774  * @returns		0 on success, ETIMEDOUT on failure
2775  */
2776 int
2777 oce_mbox_init(struct oce_softc *sc)
2778 {
2779 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2780 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2781 
2782 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2783 		return (0);
2784 
2785 	/* Endian Signature */
2786 	*ptr++ = 0xff;
2787 	*ptr++ = 0x12;
2788 	*ptr++ = 0x34;
2789 	*ptr++ = 0xff;
2790 	*ptr++ = 0xff;
2791 	*ptr++ = 0x56;
2792 	*ptr++ = 0x78;
2793 	*ptr = 0xff;
2794 
2795 	return (oce_mbox_dispatch(sc));
2796 }
2797 
2798 int
2799 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2800     void *payload, int length)
2801 {
2802 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2803 	struct oce_mbx *mbx = &bmbx->mbx;
2804 	struct mbx_hdr *hdr;
2805 	caddr_t epayload = NULL;
2806 	int err;
2807 
2808 	if (length > OCE_MBX_PAYLOAD)
2809 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2810 	if (length > OCE_MAX_PAYLOAD)
2811 		return (EINVAL);
2812 
2813 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2814 
2815 	memset(mbx, 0, sizeof(struct oce_mbx));
2816 
2817 	mbx->payload_length = length;
2818 
2819 	if (epayload) {
2820 		mbx->flags = OCE_MBX_F_SGE;
2821 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2822 		memcpy(epayload, payload, length);
2823 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2824 		mbx->pld.sgl[0].length = length;
2825 		hdr = (struct mbx_hdr *)epayload;
2826 	} else {
2827 		mbx->flags = OCE_MBX_F_EMBED;
2828 		memcpy(mbx->pld.data, payload, length);
2829 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2830 	}
2831 
2832 	hdr->subsys = subsys;
2833 	hdr->opcode = opcode;
2834 	hdr->version = version;
2835 	hdr->length = length - sizeof(*hdr);
2836 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2837 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2838 	else
2839 		hdr->timeout = OCE_MBX_TIMEOUT;
2840 
2841 	if (epayload)
2842 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2843 
2844 	err = oce_mbox_dispatch(sc);
2845 	if (err == 0) {
2846 		if (epayload) {
2847 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2848 			memcpy(payload, epayload, length);
2849 		} else
2850 			memcpy(payload, &mbx->pld.data, length);
2851 	} else
2852 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2853 		    "%spayload lenght %d\n", sc->sc_dev.dv_xname, subsys,
2854 		    opcode, version, epayload ? "ext " : "",
2855 		    length);
2856 	return (err);
2857 }
2858 
2859 /**
2860  * @brief	Firmware will send gracious notifications during
2861  *		attach only after sending first mcc commnad. We
2862  *		use MCC queue only for getting async and mailbox
2863  *		for sending cmds. So to get gracious notifications
2864  *		atleast send one dummy command on mcc.
2865  */
2866 void
2867 oce_first_mcc(struct oce_softc *sc)
2868 {
2869 	struct oce_mbx *mbx;
2870 	struct oce_mq *mq = sc->sc_mq;
2871 	struct mbx_hdr *hdr;
2872 	struct mbx_get_common_fw_version *cmd;
2873 
2874 	mbx = oce_ring_get(mq->ring);
2875 	memset(mbx, 0, sizeof(struct oce_mbx));
2876 
2877 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2878 
2879 	hdr = &cmd->hdr;
2880 	hdr->subsys = SUBSYS_COMMON;
2881 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2882 	hdr->version = OCE_MBX_VER_V0;
2883 	hdr->timeout = OCE_MBX_TIMEOUT;
2884 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2885 
2886 	mbx->flags = OCE_MBX_F_EMBED;
2887 	mbx->payload_length = sizeof(*cmd);
2888 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2889 	    BUS_DMASYNC_PREWRITE);
2890 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2891 }
2892 
2893 int
2894 oce_get_fw_config(struct oce_softc *sc)
2895 {
2896 	struct mbx_common_query_fw_config cmd;
2897 	int err;
2898 
2899 	memset(&cmd, 0, sizeof(cmd));
2900 
2901 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2902 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2903 	if (err)
2904 		return (err);
2905 
2906 	sc->sc_port = cmd.params.rsp.port_id;
2907 	sc->sc_fmode = cmd.params.rsp.function_mode;
2908 
2909 	return (0);
2910 }
2911 
2912 int
2913 oce_check_native_mode(struct oce_softc *sc)
2914 {
2915 	struct mbx_common_set_function_cap cmd;
2916 	int err;
2917 
2918 	memset(&cmd, 0, sizeof(cmd));
2919 
2920 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2921 	    CAP_BE3_NATIVE_ERX_API;
2922 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2923 
2924 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2925 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2926 	if (err)
2927 		return (err);
2928 
2929 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2930 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2931 
2932 	return (0);
2933 }
2934 
2935 /**
2936  * @brief Function for creating a network interface.
2937  * @param sc		software handle to the device
2938  * @returns		0 on success, error otherwise
2939  */
2940 int
2941 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2942 {
2943 	struct mbx_create_common_iface cmd;
2944 	uint32_t caps, caps_en;
2945 	int err = 0;
2946 
2947 	/* interface capabilities to give device when creating interface */
2948 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2949 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2950 	    MBX_RX_IFACE_RSS;
2951 
2952 	/* capabilities to enable by default (others set dynamically) */
2953 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2954 
2955 	if (!IS_XE201(sc)) {
2956 		/* LANCER A0 workaround */
2957 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2958 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2959 	}
2960 
2961 	/* enable capabilities controlled via driver startup parameters */
2962 	if (sc->sc_rss_enable)
2963 		caps_en |= MBX_RX_IFACE_RSS;
2964 
2965 	memset(&cmd, 0, sizeof(cmd));
2966 
2967 	cmd.params.req.version = 0;
2968 	cmd.params.req.cap_flags = htole32(caps);
2969 	cmd.params.req.enable_flags = htole32(caps_en);
2970 	if (macaddr != NULL) {
2971 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2972 		cmd.params.req.mac_invalid = 0;
2973 	} else
2974 		cmd.params.req.mac_invalid = 1;
2975 
2976 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2977 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2978 	if (err)
2979 		return (err);
2980 
2981 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2982 
2983 	if (macaddr != NULL)
2984 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2985 
2986 	return (0);
2987 }
2988 
2989 /**
2990  * @brief Function to send the mbx command to configure vlan
2991  * @param sc 		software handle to the device
2992  * @param vtags		array of vlan tags
2993  * @param nvtags	number of elements in array
2994  * @param untagged	boolean TRUE/FLASE
2995  * @param promisc	flag to enable/disable VLAN promiscuous mode
2996  * @returns		0 on success, EIO on failure
2997  */
2998 int
2999 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3000     int untagged, int promisc)
3001 {
3002 	struct mbx_common_config_vlan cmd;
3003 
3004 	memset(&cmd, 0, sizeof(cmd));
3005 
3006 	cmd.params.req.if_id = sc->sc_if_id;
3007 	cmd.params.req.promisc = promisc;
3008 	cmd.params.req.untagged = untagged;
3009 	cmd.params.req.num_vlans = nvtags;
3010 
3011 	if (!promisc)
3012 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3013 			nvtags * sizeof(struct normal_vlan));
3014 
3015 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3016 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3017 }
3018 
3019 /**
3020  * @brief Function to set flow control capability in the hardware
3021  * @param sc 		software handle to the device
3022  * @param flags		flow control flags to set
3023  * @returns		0 on success, EIO on failure
3024  */
3025 int
3026 oce_set_flow_control(struct oce_softc *sc, uint flags)
3027 {
3028 	struct mbx_common_get_set_flow_control cmd;
3029 	int err;
3030 
3031 	memset(&cmd, 0, sizeof(cmd));
3032 
3033 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3034 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3035 
3036 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3037 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3038 	if (err)
3039 		return (err);
3040 
3041 	memset(&cmd, 0, sizeof(cmd));
3042 
3043 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3044 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3045 	if (err)
3046 		return (err);
3047 
3048 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3049 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3050 
3051 	return (0);
3052 }
3053 
3054 #ifdef OCE_RSS
3055 /**
3056  * @brief Function to set flow control capability in the hardware
3057  * @param sc 		software handle to the device
3058  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3059  * @returns		0 on success, EIO on failure
3060  */
3061 int
3062 oce_config_rss(struct oce_softc *sc, int enable)
3063 {
3064 	struct mbx_config_nic_rss cmd;
3065 	uint8_t *tbl = &cmd.params.req.cputable;
3066 	int i, j;
3067 
3068 	memset(&cmd, 0, sizeof(cmd));
3069 
3070 	if (enable)
3071 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3072 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6);
3073 	cmd.params.req.flush = OCE_FLUSH;
3074 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3075 
3076 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3077 
3078 	/*
3079 	 * Initialize the RSS CPU indirection table.
3080 	 *
3081 	 * The table is used to choose the queue to place incoming packets.
3082 	 * Incoming packets are hashed.  The lowest bits in the hash result
3083 	 * are used as the index into the CPU indirection table.
3084 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3085 	 * create.  Based on the CPU ID, the receive completion is routed to
3086 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3087 	 * on the default (0) CQ).
3088 	 */
3089 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3090 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3091 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3092 	}
3093 	if (i > 0)
3094 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3095 	else
3096 		return (ENXIO);
3097 
3098 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3099 	    &cmd, sizeof(cmd)));
3100 }
3101 #endif	/* OCE_RSS */
3102 
3103 /**
3104  * @brief Function for hardware update multicast filter
3105  * @param sc		software handle to the device
3106  * @param multi		table of multicast addresses
3107  * @param naddr		number of multicast addresses in the table
3108  */
3109 int
3110 oce_update_mcast(struct oce_softc *sc,
3111     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3112 {
3113 	struct mbx_set_common_iface_multicast cmd;
3114 
3115 	memset(&cmd, 0, sizeof(cmd));
3116 
3117 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3118 	cmd.params.req.num_mac = htole16(naddr);
3119 	cmd.params.req.if_id = sc->sc_if_id;
3120 
3121 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3122 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3123 }
3124 
3125 /**
3126  * @brief RXF function to enable/disable device promiscuous mode
3127  * @param sc		software handle to the device
3128  * @param enable	enable/disable flag
3129  * @returns		0 on success, EIO on failure
3130  * @note
3131  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3132  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3133  */
3134 int
3135 oce_set_promisc(struct oce_softc *sc, int enable)
3136 {
3137 	struct mbx_set_common_iface_rx_filter cmd;
3138 	struct iface_rx_filter_ctx *req;
3139 
3140 	memset(&cmd, 0, sizeof(cmd));
3141 
3142 	req = &cmd.params.req;
3143 	req->if_id = sc->sc_if_id;
3144 
3145 	if (enable)
3146 		req->iface_flags = req->iface_flags_mask =
3147 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3148 
3149 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3150 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3151 }
3152 
3153 /**
3154  * @brief Function to query the link status from the hardware
3155  * @param sc 		software handle to the device
3156  * @param[out] link	pointer to the structure returning link attributes
3157  * @returns		0 on success, EIO on failure
3158  */
3159 int
3160 oce_get_link_status(struct oce_softc *sc)
3161 {
3162 	struct mbx_query_common_link_config cmd;
3163 	int err;
3164 
3165 	memset(&cmd, 0, sizeof(cmd));
3166 
3167 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3168 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3169 	if (err)
3170 		return (err);
3171 
3172 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3173 	    NTWK_LOGICAL_LINK_UP);
3174 
3175 	if (cmd.params.rsp.mac_speed < 5)
3176 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3177 	else
3178 		sc->sc_link_speed = 0;
3179 
3180 	return (0);
3181 }
3182 
3183 void
3184 oce_macaddr_set(struct oce_softc *sc)
3185 {
3186 	uint32_t old_pmac_id = sc->sc_pmac_id;
3187 	int status = 0;
3188 
3189 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3190 		return;
3191 
3192 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3193 	if (!status)
3194 		status = oce_macaddr_del(sc, old_pmac_id);
3195 	else
3196 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3197 }
3198 
3199 int
3200 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3201 {
3202 	struct mbx_query_common_iface_mac cmd;
3203 	int err;
3204 
3205 	memset(&cmd, 0, sizeof(cmd));
3206 
3207 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3208 	cmd.params.req.permanent = 1;
3209 
3210 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3211 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3212 	if (err == 0)
3213 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3214 		    ETHER_ADDR_LEN);
3215 	return (err);
3216 }
3217 
3218 int
3219 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3220 {
3221 	struct mbx_add_common_iface_mac cmd;
3222 	int err;
3223 
3224 	memset(&cmd, 0, sizeof(cmd));
3225 
3226 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3227 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3228 
3229 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3230 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3231 	if (err == 0)
3232 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3233 	return (err);
3234 }
3235 
3236 int
3237 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3238 {
3239 	struct mbx_del_common_iface_mac cmd;
3240 
3241 	memset(&cmd, 0, sizeof(cmd));
3242 
3243 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3244 	cmd.params.req.pmac_id = htole32(pmac);
3245 
3246 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3247 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3248 }
3249 
3250 int
3251 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3252 {
3253 	struct mbx_create_nic_rq cmd;
3254 	int err, npages;
3255 
3256 	memset(&cmd, 0, sizeof(cmd));
3257 
3258 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3259 	    nitems(cmd.params.req.pages));
3260 	if (!npages) {
3261 		printf("%s: failed to load the rq ring\n", __func__);
3262 		return (1);
3263 	}
3264 
3265 	if (IS_XE201(sc)) {
3266 		cmd.params.req.frag_size = rq->fragsize / 2048;
3267 		cmd.params.req.page_size = 1;
3268 	} else
3269 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3270 	cmd.params.req.num_pages = npages;
3271 	cmd.params.req.cq_id = rq->cq->id;
3272 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3273 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3274 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3275 
3276 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3277 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3278 	    sizeof(cmd));
3279 	if (err)
3280 		return (err);
3281 
3282 	rq->id = letoh16(cmd.params.rsp.rq_id);
3283 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3284 
3285 	return (0);
3286 }
3287 
3288 int
3289 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3290 {
3291 	struct mbx_create_nic_wq cmd;
3292 	int err, npages;
3293 
3294 	memset(&cmd, 0, sizeof(cmd));
3295 
3296 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3297 	    nitems(cmd.params.req.pages));
3298 	if (!npages) {
3299 		printf("%s: failed to load the wq ring\n", __func__);
3300 		return (1);
3301 	}
3302 
3303 	if (IS_XE201(sc))
3304 		cmd.params.req.if_id = sc->sc_if_id;
3305 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3306 	cmd.params.req.num_pages = npages;
3307 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3308 	cmd.params.req.cq_id = htole16(wq->cq->id);
3309 	cmd.params.req.ulp_num = 1;
3310 
3311 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3312 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3313 	    sizeof(cmd));
3314 	if (err)
3315 		return (err);
3316 
3317 	wq->id = letoh16(cmd.params.rsp.wq_id);
3318 
3319 	return (0);
3320 }
3321 
3322 int
3323 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3324 {
3325 	struct mbx_create_common_mq_ex cmd;
3326 	union oce_mq_ext_ctx *ctx;
3327 	int err, npages;
3328 
3329 	memset(&cmd, 0, sizeof(cmd));
3330 
3331 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3332 	    nitems(cmd.params.req.pages));
3333 	if (!npages) {
3334 		printf("%s: failed to load the mq ring\n", __func__);
3335 		return (-1);
3336 	}
3337 
3338 	ctx = &cmd.params.req.context;
3339 	ctx->v0.num_pages = npages;
3340 	ctx->v0.cq_id = mq->cq->id;
3341 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3342 	ctx->v0.valid = 1;
3343 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3344 	ctx->v0.async_evt_bitmap = 0xffffffff;
3345 
3346 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3347 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3348 	if (err)
3349 		return (err);
3350 
3351 	mq->id = letoh16(cmd.params.rsp.mq_id);
3352 
3353 	return (0);
3354 }
3355 
3356 int
3357 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3358 {
3359 	struct mbx_create_common_eq cmd;
3360 	int err, npages;
3361 
3362 	memset(&cmd, 0, sizeof(cmd));
3363 
3364 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3365 	    nitems(cmd.params.req.pages));
3366 	if (!npages) {
3367 		printf("%s: failed to load the eq ring\n", __func__);
3368 		return (-1);
3369 	}
3370 
3371 	cmd.params.req.ctx.num_pages = htole16(npages);
3372 	cmd.params.req.ctx.valid = 1;
3373 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3374 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3375 	cmd.params.req.ctx.armed = 0;
3376 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3377 
3378 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3379 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3380 	if (err)
3381 		return (err);
3382 
3383 	eq->id = letoh16(cmd.params.rsp.eq_id);
3384 
3385 	return (0);
3386 }
3387 
3388 int
3389 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3390 {
3391 	struct mbx_create_common_cq cmd;
3392 	union oce_cq_ctx *ctx;
3393 	int err, npages;
3394 
3395 	memset(&cmd, 0, sizeof(cmd));
3396 
3397 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3398 	    nitems(cmd.params.req.pages));
3399 	if (!npages) {
3400 		printf("%s: failed to load the cq ring\n", __func__);
3401 		return (-1);
3402 	}
3403 
3404 	ctx = &cmd.params.req.cq_ctx;
3405 
3406 	if (IS_XE201(sc)) {
3407 		ctx->v2.num_pages = htole16(npages);
3408 		ctx->v2.page_size = 1; /* for 4K */
3409 		ctx->v2.eventable = cq->eventable;
3410 		ctx->v2.valid = 1;
3411 		ctx->v2.count = ilog2(cq->nitems / 256);
3412 		ctx->v2.nodelay = cq->nodelay;
3413 		ctx->v2.coalesce_wm = cq->ncoalesce;
3414 		ctx->v2.armed = 0;
3415 		ctx->v2.eq_id = cq->eq->id;
3416 		if (ctx->v2.count == 3) {
3417 			if (cq->nitems > (4*1024)-1)
3418 				ctx->v2.cqe_count = (4*1024)-1;
3419 			else
3420 				ctx->v2.cqe_count = cq->nitems;
3421 		}
3422 	} else {
3423 		ctx->v0.num_pages = htole16(npages);
3424 		ctx->v0.eventable = cq->eventable;
3425 		ctx->v0.valid = 1;
3426 		ctx->v0.count = ilog2(cq->nitems / 256);
3427 		ctx->v0.nodelay = cq->nodelay;
3428 		ctx->v0.coalesce_wm = cq->ncoalesce;
3429 		ctx->v0.armed = 0;
3430 		ctx->v0.eq_id = cq->eq->id;
3431 	}
3432 
3433 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3434 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3435 	    sizeof(cmd));
3436 	if (err)
3437 		return (err);
3438 
3439 	cq->id = letoh16(cmd.params.rsp.cq_id);
3440 
3441 	return (0);
3442 }
3443 
3444 static inline int
3445 oce_update_stats(struct oce_softc *sc)
3446 {
3447 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3448 	uint64_t rxe, txe;
3449 	int err;
3450 
3451 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3452 		err = oce_stats_be2(sc, &rxe, &txe);
3453 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3454 		err = oce_stats_be3(sc, &rxe, &txe);
3455 	else
3456 		err = oce_stats_xe(sc, &rxe, &txe);
3457 	if (err)
3458 		return (err);
3459 
3460 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3461 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3462 	sc->sc_rx_errors = rxe;
3463 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3464 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3465 	sc->sc_tx_errors = txe;
3466 
3467 	return (0);
3468 }
3469 
3470 int
3471 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3472 {
3473 	struct mbx_get_nic_stats_v0 cmd;
3474 	struct oce_pmem_stats *ms;
3475 	struct oce_rxf_stats_v0 *rs;
3476 	struct oce_port_rxf_stats_v0 *ps;
3477 	int err;
3478 
3479 	memset(&cmd, 0, sizeof(cmd));
3480 
3481 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3482 	    &cmd, sizeof(cmd));
3483 	if (err)
3484 		return (err);
3485 
3486 	ms = &cmd.params.rsp.stats.pmem;
3487 	rs = &cmd.params.rsp.stats.rxf;
3488 	ps = &rs->port[sc->sc_port];
3489 
3490 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3491 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3492 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3493 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3494 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3495 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3496 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3497 	    ps->rx_alignment_symbol_errors;
3498 	if (sc->sc_if_id)
3499 		*rxe += rs->port1_jabber_events;
3500 	else
3501 		*rxe += rs->port0_jabber_events;
3502 	*rxe += ms->eth_red_drops;
3503 
3504 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3505 
3506 	return (0);
3507 }
3508 
3509 int
3510 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3511 {
3512 	struct mbx_get_nic_stats cmd;
3513 	struct oce_pmem_stats *ms;
3514 	struct oce_rxf_stats_v1 *rs;
3515 	struct oce_port_rxf_stats_v1 *ps;
3516 	int err;
3517 
3518 	memset(&cmd, 0, sizeof(cmd));
3519 
3520 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3521 	    &cmd, sizeof(cmd));
3522 	if (err)
3523 		return (err);
3524 
3525 	ms = &cmd.params.rsp.stats.pmem;
3526 	rs = &cmd.params.rsp.stats.rxf;
3527 	ps = &rs->port[sc->sc_port];
3528 
3529 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3530 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3531 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3532 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3533 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3534 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3535 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3536 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3537 	*rxe += ms->eth_red_drops;
3538 
3539 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3540 
3541 	return (0);
3542 }
3543 
3544 int
3545 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3546 {
3547 	struct mbx_get_pport_stats cmd;
3548 	struct oce_pport_stats *pps;
3549 	int err;
3550 
3551 	memset(&cmd, 0, sizeof(cmd));
3552 
3553 	cmd.params.req.reset_stats = 0;
3554 	cmd.params.req.port_number = sc->sc_if_id;
3555 
3556 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3557 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3558 	if (err)
3559 		return (err);
3560 
3561 	pps = &cmd.params.rsp.pps;
3562 
3563 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3564 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3565 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3566 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3567 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3568 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3569 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3570 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3571 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3572 
3573 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3574 
3575 	return (0);
3576 }
3577