xref: /openbsd-src/sys/dev/pci/if_oce.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_oce.c,v 1.100 2017/11/27 16:53:04 sthen Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/device.h>
67 #include <sys/socket.h>
68 #include <sys/queue.h>
69 #include <sys/timeout.h>
70 #include <sys/pool.h>
71 
72 #include <net/if.h>
73 #include <net/if_media.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #ifdef INET6
79 #include <netinet/ip6.h>
80 #endif
81 
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #endif
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 
90 #include <dev/pci/if_ocereg.h>
91 
92 #ifndef TRUE
93 #define TRUE			1
94 #endif
95 #ifndef FALSE
96 #define FALSE			0
97 #endif
98 
99 #define OCE_MBX_TIMEOUT		5
100 
101 #define OCE_MAX_PAYLOAD		65536
102 
103 #define OCE_TX_RING_SIZE	512
104 #define OCE_RX_RING_SIZE	1024
105 
106 /* This should be powers of 2. Like 2,4,8 & 16 */
107 #define OCE_MAX_RSS		4 /* TODO: 8 */
108 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
109 #define OCE_MAX_WQ		8
110 
111 #define OCE_MAX_EQ		32
112 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
113 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
114 
115 #define OCE_DEFAULT_EQD		80
116 
117 #define OCE_MIN_MTU		256
118 #define OCE_MAX_MTU		9000
119 
120 #define OCE_MAX_RQ_COMPL	64
121 #define OCE_MAX_RQ_POSTS	255
122 #define OCE_RX_BUF_SIZE		2048
123 
124 #define OCE_MAX_TX_ELEMENTS	29
125 #define OCE_MAX_TX_DESC		1024
126 #define OCE_MAX_TX_SIZE		65535
127 
128 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
129 #define OCE_MEM_DVA(_m)		((_m)->paddr)
130 
131 #define OCE_WQ_FOREACH(sc, wq, i) 	\
132 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133 #define OCE_RQ_FOREACH(sc, rq, i) 	\
134 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135 #define OCE_EQ_FOREACH(sc, eq, i) 	\
136 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137 #define OCE_CQ_FOREACH(sc, cq, i) 	\
138 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139 #define OCE_RING_FOREACH(_r, _v, _c)	\
140 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141 
142 static inline int
143 ilog2(unsigned int v)
144 {
145 	int r = 0;
146 
147 	while (v >>= 1)
148 		r++;
149 	return (r);
150 }
151 
152 struct oce_pkt {
153 	struct mbuf *		mbuf;
154 	bus_dmamap_t		map;
155 	int			nsegs;
156 	SIMPLEQ_ENTRY(oce_pkt)	entry;
157 };
158 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
159 
160 struct oce_dma_mem {
161 	bus_dma_tag_t		tag;
162 	bus_dmamap_t		map;
163 	bus_dma_segment_t	segs;
164 	int			nsegs;
165 	bus_size_t		size;
166 	caddr_t			vaddr;
167 	bus_addr_t		paddr;
168 };
169 
170 struct oce_ring {
171 	int			index;
172 	int			nitems;
173 	int			nused;
174 	int			isize;
175 	struct oce_dma_mem	dma;
176 };
177 
178 struct oce_softc;
179 
180 enum cq_len {
181 	CQ_LEN_256  = 256,
182 	CQ_LEN_512  = 512,
183 	CQ_LEN_1024 = 1024
184 };
185 
186 enum eq_len {
187 	EQ_LEN_256  = 256,
188 	EQ_LEN_512  = 512,
189 	EQ_LEN_1024 = 1024,
190 	EQ_LEN_2048 = 2048,
191 	EQ_LEN_4096 = 4096
192 };
193 
194 enum eqe_size {
195 	EQE_SIZE_4  = 4,
196 	EQE_SIZE_16 = 16
197 };
198 
199 enum qtype {
200 	QTYPE_EQ,
201 	QTYPE_MQ,
202 	QTYPE_WQ,
203 	QTYPE_RQ,
204 	QTYPE_CQ,
205 	QTYPE_RSS
206 };
207 
208 struct oce_eq {
209 	struct oce_softc *	sc;
210 	struct oce_ring *	ring;
211 	enum qtype		type;
212 	int			id;
213 
214 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
215 	int			cq_valid;
216 
217 	int			nitems;
218 	int			isize;
219 	int			delay;
220 };
221 
222 struct oce_cq {
223 	struct oce_softc *	sc;
224 	struct oce_ring *	ring;
225 	enum qtype		type;
226 	int			id;
227 
228 	struct oce_eq *		eq;
229 
230 	void			(*cq_intr)(void *);
231 	void *			cb_arg;
232 
233 	int			nitems;
234 	int			nodelay;
235 	int			eventable;
236 	int			ncoalesce;
237 };
238 
239 struct oce_mq {
240 	struct oce_softc *	sc;
241 	struct oce_ring *	ring;
242 	enum qtype		type;
243 	int			id;
244 
245 	struct oce_cq *		cq;
246 
247 	int			nitems;
248 };
249 
250 struct oce_wq {
251 	struct oce_softc *	sc;
252 	struct oce_ring *	ring;
253 	enum qtype		type;
254 	int			id;
255 
256 	struct oce_cq *		cq;
257 
258 	struct oce_pkt_list	pkt_list;
259 	struct oce_pkt_list	pkt_free;
260 
261 	int			nitems;
262 };
263 
264 struct oce_rq {
265 	struct oce_softc *	sc;
266 	struct oce_ring *	ring;
267 	enum qtype		type;
268 	int			id;
269 
270 	struct oce_cq *		cq;
271 
272 	struct if_rxring	rxring;
273 	struct oce_pkt_list	pkt_list;
274 	struct oce_pkt_list	pkt_free;
275 
276 	uint32_t		rss_cpuid;
277 
278 #ifdef OCE_LRO
279 	struct lro_ctrl		lro;
280 	int			lro_pkts_queued;
281 #endif
282 
283 	int			nitems;
284 	int			fragsize;
285 	int			mtu;
286 	int			rss;
287 };
288 
289 struct oce_softc {
290 	struct device		sc_dev;
291 
292 	uint			sc_flags;
293 #define  OCE_F_BE2		 0x00000001
294 #define  OCE_F_BE3		 0x00000002
295 #define  OCE_F_XE201		 0x00000008
296 #define  OCE_F_BE3_NATIVE	 0x00000100
297 #define  OCE_F_RESET_RQD	 0x00001000
298 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
299 
300 	bus_dma_tag_t		sc_dmat;
301 
302 	bus_space_tag_t		sc_cfg_iot;
303 	bus_space_handle_t	sc_cfg_ioh;
304 	bus_size_t		sc_cfg_size;
305 
306 	bus_space_tag_t		sc_csr_iot;
307 	bus_space_handle_t	sc_csr_ioh;
308 	bus_size_t		sc_csr_size;
309 
310 	bus_space_tag_t		sc_db_iot;
311 	bus_space_handle_t	sc_db_ioh;
312 	bus_size_t		sc_db_size;
313 
314 	void *			sc_ih;
315 
316 	struct arpcom		sc_ac;
317 	struct ifmedia		sc_media;
318 	ushort			sc_link_up;
319 	ushort			sc_link_speed;
320 	uint64_t		sc_fc;
321 
322 	struct oce_dma_mem	sc_mbx;
323 	struct oce_dma_mem	sc_pld;
324 
325 	uint			sc_port;
326 	uint			sc_fmode;
327 
328 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
329 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
330 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
331 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
332 	struct oce_mq *		sc_mq;			/* Mailbox queue */
333 
334 	ushort			sc_neq;
335 	ushort			sc_ncq;
336 	ushort			sc_nrq;
337 	ushort			sc_nwq;
338 	ushort			sc_nintr;
339 
340 	ushort			sc_tx_ring_size;
341 	ushort			sc_rx_ring_size;
342 	ushort			sc_rss_enable;
343 
344 	uint32_t		sc_if_id;	/* interface ID */
345 	uint32_t		sc_pmac_id;	/* PMAC id */
346 	char			sc_macaddr[ETHER_ADDR_LEN];
347 
348 	uint32_t		sc_pvid;
349 
350 	uint64_t		sc_rx_errors;
351 	uint64_t		sc_tx_errors;
352 
353 	struct timeout		sc_tick;
354 	struct timeout		sc_rxrefill;
355 
356 	void *			sc_statcmd;
357 };
358 
359 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
360 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
361 
362 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
363 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
364 
365 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
366 
367 int 	oce_match(struct device *, void *, void *);
368 void	oce_attach(struct device *, struct device *, void *);
369 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370 void	oce_attachhook(struct device *);
371 void	oce_attach_ifp(struct oce_softc *);
372 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
373 int	oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374 void	oce_iff(struct oce_softc *);
375 void	oce_link_status(struct oce_softc *);
376 void	oce_media_status(struct ifnet *, struct ifmediareq *);
377 int 	oce_media_change(struct ifnet *);
378 void	oce_tick(void *);
379 void	oce_init(void *);
380 void	oce_stop(struct oce_softc *);
381 void	oce_watchdog(struct ifnet *);
382 void	oce_start(struct ifnet *);
383 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384 #ifdef OCE_TSO
385 struct mbuf *
386 	oce_tso(struct oce_softc *, struct mbuf **);
387 #endif
388 int 	oce_intr(void *);
389 void	oce_intr_wq(void *);
390 void	oce_txeof(struct oce_wq *);
391 void	oce_intr_rq(void *);
392 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396 #ifdef OCE_LRO
397 void	oce_flush_lro(struct oce_rq *);
398 int 	oce_init_lro(struct oce_softc *);
399 void	oce_free_lro(struct oce_softc *);
400 #endif
401 int	oce_get_buf(struct oce_rq *);
402 int	oce_alloc_rx_bufs(struct oce_rq *);
403 void	oce_refill_rx(void *);
404 void	oce_free_posted_rxbuf(struct oce_rq *);
405 void	oce_intr_mq(void *);
406 void	oce_link_event(struct oce_softc *,
407 	    struct oce_async_cqe_link_state *);
408 
409 int 	oce_init_queues(struct oce_softc *);
410 void	oce_release_queues(struct oce_softc *);
411 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412 void	oce_drain_wq(struct oce_wq *);
413 void	oce_destroy_wq(struct oce_wq *);
414 struct oce_rq *
415 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416 void	oce_drain_rq(struct oce_rq *);
417 void	oce_destroy_rq(struct oce_rq *);
418 struct oce_eq *
419 	oce_create_eq(struct oce_softc *);
420 static inline void
421 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422 void	oce_drain_eq(struct oce_eq *);
423 void	oce_destroy_eq(struct oce_eq *);
424 struct oce_mq *
425 	oce_create_mq(struct oce_softc *, struct oce_eq *);
426 void	oce_drain_mq(struct oce_mq *);
427 void	oce_destroy_mq(struct oce_mq *);
428 struct oce_cq *
429 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 	    int isize, int eventable, int nodelay, int ncoalesce);
431 static inline void
432 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433 void	oce_destroy_cq(struct oce_cq *);
434 
435 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437 #define	oce_dma_sync(d, f) \
438 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
439 
440 struct oce_ring *
441 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
444 	    struct oce_pa *, int max_segs);
445 static inline void *
446 	oce_ring_get(struct oce_ring *);
447 static inline void *
448 	oce_ring_first(struct oce_ring *);
449 static inline void *
450 	oce_ring_next(struct oce_ring *);
451 struct oce_pkt *
452 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 	    int maxsegsz);
454 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455 static inline struct oce_pkt *
456 	oce_pkt_get(struct oce_pkt_list *);
457 static inline void
458 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459 
460 int	oce_init_fw(struct oce_softc *);
461 int	oce_mbox_init(struct oce_softc *);
462 int	oce_mbox_dispatch(struct oce_softc *);
463 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 	    void *payload, int length);
465 void	oce_first_mcc(struct oce_softc *);
466 
467 int	oce_get_fw_config(struct oce_softc *);
468 int	oce_check_native_mode(struct oce_softc *);
469 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 	    int nvtags, int untagged, int promisc);
472 int	oce_set_flow_control(struct oce_softc *, uint64_t);
473 int	oce_config_rss(struct oce_softc *, int enable);
474 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
475 	    int naddr);
476 int	oce_set_promisc(struct oce_softc *, int enable);
477 int	oce_get_link_status(struct oce_softc *);
478 
479 void	oce_macaddr_set(struct oce_softc *);
480 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483 
484 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
485 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
486 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
487 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
488 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
489 
490 int	oce_init_stats(struct oce_softc *);
491 int	oce_update_stats(struct oce_softc *);
492 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495 
496 struct pool *oce_pkt_pool;
497 
498 struct cfdriver oce_cd = {
499 	NULL, "oce", DV_IFNET
500 };
501 
502 struct cfattach oce_ca = {
503 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
504 };
505 
506 const struct pci_matchid oce_devices[] = {
507 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
508 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
509 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
510 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
511 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
512 };
513 
514 int
515 oce_match(struct device *parent, void *match, void *aux)
516 {
517 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
518 }
519 
520 void
521 oce_attach(struct device *parent, struct device *self, void *aux)
522 {
523 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 	struct oce_softc *sc = (struct oce_softc *)self;
525 	const char *intrstr = NULL;
526 	pci_intr_handle_t ih;
527 
528 	switch (PCI_PRODUCT(pa->pa_id)) {
529 	case PCI_PRODUCT_SERVERENGINES_BE2:
530 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
531 		SET(sc->sc_flags, OCE_F_BE2);
532 		break;
533 	case PCI_PRODUCT_SERVERENGINES_BE3:
534 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
535 		SET(sc->sc_flags, OCE_F_BE3);
536 		break;
537 	case PCI_PRODUCT_EMULEX_XE201:
538 		SET(sc->sc_flags, OCE_F_XE201);
539 		break;
540 	}
541 
542 	sc->sc_dmat = pa->pa_dmat;
543 	if (oce_pci_alloc(sc, pa))
544 		return;
545 
546 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
547 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
548 
549 	/* create the bootstrap mailbox */
550 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 		printf(": failed to allocate mailbox memory\n");
552 		return;
553 	}
554 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
555 		printf(": failed to allocate payload memory\n");
556 		goto fail_1;
557 	}
558 
559 	if (oce_init_fw(sc))
560 		goto fail_2;
561 
562 	if (oce_mbox_init(sc)) {
563 		printf(": failed to initialize mailbox\n");
564 		goto fail_2;
565 	}
566 
567 	if (oce_get_fw_config(sc)) {
568 		printf(": failed to get firmware configuration\n");
569 		goto fail_2;
570 	}
571 
572 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
573 		if (oce_check_native_mode(sc))
574 			goto fail_2;
575 	}
576 
577 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 		printf(": failed to fetch MAC address\n");
579 		goto fail_2;
580 	}
581 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
582 
583 	if (oce_pkt_pool == NULL) {
584 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
585 		if (oce_pkt_pool == NULL) {
586 			printf(": unable to allocate descriptor pool\n");
587 			goto fail_2;
588 		}
589 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET,
590 		    0, "ocepkts", NULL);
591 	}
592 
593 	/* We allocate a single interrupt resource */
594 	sc->sc_nintr = 1;
595 	if (pci_intr_map_msi(pa, &ih) != 0 &&
596 	    pci_intr_map(pa, &ih) != 0) {
597 		printf(": couldn't map interrupt\n");
598 		goto fail_2;
599 	}
600 
601 	intrstr = pci_intr_string(pa->pa_pc, ih);
602 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
603 	    sc->sc_dev.dv_xname);
604 	if (sc->sc_ih == NULL) {
605 		printf(": couldn't establish interrupt\n");
606 		if (intrstr != NULL)
607 			printf(" at %s", intrstr);
608 		printf("\n");
609 		goto fail_2;
610 	}
611 	printf(": %s", intrstr);
612 
613 	if (oce_init_stats(sc))
614 		goto fail_3;
615 
616 	if (oce_init_queues(sc))
617 		goto fail_3;
618 
619 	oce_attach_ifp(sc);
620 
621 #ifdef OCE_LRO
622 	if (oce_init_lro(sc))
623 		goto fail_4;
624 #endif
625 
626 	timeout_set(&sc->sc_tick, oce_tick, sc);
627 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
628 
629 	config_mountroot(self, oce_attachhook);
630 
631 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
632 
633 	return;
634 
635 #ifdef OCE_LRO
636 fail_4:
637 	oce_free_lro(sc);
638 	ether_ifdetach(&sc->sc_ac.ac_if);
639 	if_detach(&sc->sc_ac.ac_if);
640 	oce_release_queues(sc);
641 #endif
642 fail_3:
643 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
644 fail_2:
645 	oce_dma_free(sc, &sc->sc_pld);
646 fail_1:
647 	oce_dma_free(sc, &sc->sc_mbx);
648 }
649 
650 int
651 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
652 {
653 	pcireg_t memtype, reg;
654 
655 	/* setup the device config region */
656 	if (ISSET(sc->sc_flags, OCE_F_BE2))
657 		reg = OCE_BAR_CFG_BE2;
658 	else
659 		reg = OCE_BAR_CFG;
660 
661 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
662 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
663 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
664 	    IS_BE(sc) ? 0 : 32768)) {
665 		printf(": can't find cfg mem space\n");
666 		return (ENXIO);
667 	}
668 
669 	/*
670 	 * Read the SLI_INTF register and determine whether we
671 	 * can use this port and its features
672 	 */
673 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
674 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
675 		printf(": invalid signature\n");
676 		goto fail_1;
677 	}
678 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
679 		printf(": unsupported SLI revision\n");
680 		goto fail_1;
681 	}
682 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
683 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
684 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
685 		SET(sc->sc_flags, OCE_F_RESET_RQD);
686 
687 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
688 	if (IS_BE(sc)) {
689 		/* set up CSR region */
690 		reg = OCE_BAR_CSR;
691 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
692 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
693 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
694 			printf(": can't find csr mem space\n");
695 			goto fail_1;
696 		}
697 
698 		/* set up DB doorbell region */
699 		reg = OCE_BAR_DB;
700 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
701 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
702 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
703 			printf(": can't find csr mem space\n");
704 			goto fail_2;
705 		}
706 	} else {
707 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
708 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
709 	}
710 
711 	return (0);
712 
713 fail_2:
714 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
715 fail_1:
716 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
717 	return (ENXIO);
718 }
719 
720 static inline uint32_t
721 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
722 {
723 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
724 	    BUS_SPACE_BARRIER_READ);
725 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
726 }
727 
728 static inline uint32_t
729 oce_read_csr(struct oce_softc *sc, bus_size_t off)
730 {
731 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
732 	    BUS_SPACE_BARRIER_READ);
733 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
734 }
735 
736 static inline uint32_t
737 oce_read_db(struct oce_softc *sc, bus_size_t off)
738 {
739 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
740 	    BUS_SPACE_BARRIER_READ);
741 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
742 }
743 
744 static inline void
745 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
746 {
747 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
748 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
749 	    BUS_SPACE_BARRIER_WRITE);
750 }
751 
752 static inline void
753 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
754 {
755 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
756 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
757 	    BUS_SPACE_BARRIER_WRITE);
758 }
759 
760 static inline void
761 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
762 {
763 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
764 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
765 	    BUS_SPACE_BARRIER_WRITE);
766 }
767 
768 static inline void
769 oce_intr_enable(struct oce_softc *sc)
770 {
771 	uint32_t reg;
772 
773 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
774 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
775 }
776 
777 static inline void
778 oce_intr_disable(struct oce_softc *sc)
779 {
780 	uint32_t reg;
781 
782 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
783 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
784 }
785 
786 void
787 oce_attachhook(struct device *self)
788 {
789 	struct oce_softc *sc = (struct oce_softc *)self;
790 
791 	oce_get_link_status(sc);
792 
793 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
794 
795 	/*
796 	 * We need to get MCC async events. So enable intrs and arm
797 	 * first EQ, Other EQs will be armed after interface is UP
798 	 */
799 	oce_intr_enable(sc);
800 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
801 
802 	/*
803 	 * Send first mcc cmd and after that we get gracious
804 	 * MCC notifications from FW
805 	 */
806 	oce_first_mcc(sc);
807 }
808 
809 void
810 oce_attach_ifp(struct oce_softc *sc)
811 {
812 	struct ifnet *ifp = &sc->sc_ac.ac_if;
813 
814 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
815 	    oce_media_status);
816 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
817 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
818 
819 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
820 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 	ifp->if_ioctl = oce_ioctl;
822 	ifp->if_start = oce_start;
823 	ifp->if_watchdog = oce_watchdog;
824 	ifp->if_hardmtu = OCE_MAX_MTU;
825 	ifp->if_softc = sc;
826 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_size - 1);
827 
828 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
829 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
830 
831 #if NVLAN > 0
832 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
833 #endif
834 
835 #ifdef OCE_TSO
836 	ifp->if_capabilities |= IFCAP_TSO;
837 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
838 #endif
839 #ifdef OCE_LRO
840 	ifp->if_capabilities |= IFCAP_LRO;
841 #endif
842 
843 	if_attach(ifp);
844 	ether_ifattach(ifp);
845 }
846 
847 int
848 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
849 {
850 	struct oce_softc *sc = ifp->if_softc;
851 	struct ifreq *ifr = (struct ifreq *)data;
852 	int s, error = 0;
853 
854 	s = splnet();
855 
856 	switch (command) {
857 	case SIOCSIFADDR:
858 		ifp->if_flags |= IFF_UP;
859 		if (!(ifp->if_flags & IFF_RUNNING))
860 			oce_init(sc);
861 		break;
862 	case SIOCSIFFLAGS:
863 		if (ifp->if_flags & IFF_UP) {
864 			if (ifp->if_flags & IFF_RUNNING)
865 				error = ENETRESET;
866 			else
867 				oce_init(sc);
868 		} else {
869 			if (ifp->if_flags & IFF_RUNNING)
870 				oce_stop(sc);
871 		}
872 		break;
873 	case SIOCGIFMEDIA:
874 	case SIOCSIFMEDIA:
875 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
876 		break;
877 	case SIOCGIFRXR:
878 		error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
879 		break;
880 	default:
881 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
882 		break;
883 	}
884 
885 	if (error == ENETRESET) {
886 		if (ifp->if_flags & IFF_RUNNING)
887 			oce_iff(sc);
888 		error = 0;
889 	}
890 
891 	splx(s);
892 
893 	return (error);
894 }
895 
896 int
897 oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
898 {
899 	struct if_rxring_info *ifr, ifr1;
900 	struct oce_rq *rq;
901 	int error, i;
902 	u_int n = 0;
903 
904 	if (sc->sc_nrq > 1) {
905 		if ((ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF,
906 		    M_WAITOK | M_ZERO)) == NULL)
907 			return (ENOMEM);
908 	} else
909 		ifr = &ifr1;
910 
911 	OCE_RQ_FOREACH(sc, rq, i) {
912 		ifr[n].ifr_size = MCLBYTES;
913 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
914 		ifr[n].ifr_info = rq->rxring;
915 		n++;
916 	}
917 
918 	error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
919 
920 	if (sc->sc_nrq > 1)
921 		free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
922 	return (error);
923 }
924 
925 
926 void
927 oce_iff(struct oce_softc *sc)
928 {
929 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
930 	struct arpcom *ac = &sc->sc_ac;
931 	struct ifnet *ifp = &ac->ac_if;
932 	struct ether_multi *enm;
933 	struct ether_multistep step;
934 	int naddr = 0, promisc = 0;
935 
936 	ifp->if_flags &= ~IFF_ALLMULTI;
937 
938 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
939 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
940 		ifp->if_flags |= IFF_ALLMULTI;
941 		promisc = 1;
942 	} else {
943 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
944 		while (enm != NULL) {
945 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
946 			ETHER_NEXT_MULTI(step, enm);
947 		}
948 		oce_update_mcast(sc, multi, naddr);
949 	}
950 
951 	oce_set_promisc(sc, promisc);
952 }
953 
954 void
955 oce_link_status(struct oce_softc *sc)
956 {
957 	struct ifnet *ifp = &sc->sc_ac.ac_if;
958 	int link_state = LINK_STATE_DOWN;
959 
960 	ifp->if_baudrate = 0;
961 	if (sc->sc_link_up) {
962 		link_state = LINK_STATE_FULL_DUPLEX;
963 
964 		switch (sc->sc_link_speed) {
965 		case 1:
966 			ifp->if_baudrate = IF_Mbps(10);
967 			break;
968 		case 2:
969 			ifp->if_baudrate = IF_Mbps(100);
970 			break;
971 		case 3:
972 			ifp->if_baudrate = IF_Gbps(1);
973 			break;
974 		case 4:
975 			ifp->if_baudrate = IF_Gbps(10);
976 			break;
977 		}
978 	}
979 	if (ifp->if_link_state != link_state) {
980 		ifp->if_link_state = link_state;
981 		if_link_state_change(ifp);
982 	}
983 }
984 
985 void
986 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
987 {
988 	struct oce_softc *sc = ifp->if_softc;
989 
990 	ifmr->ifm_status = IFM_AVALID;
991 	ifmr->ifm_active = IFM_ETHER;
992 
993 	if (oce_get_link_status(sc) == 0)
994 		oce_link_status(sc);
995 
996 	if (!sc->sc_link_up) {
997 		ifmr->ifm_active |= IFM_NONE;
998 		return;
999 	}
1000 
1001 	ifmr->ifm_status |= IFM_ACTIVE;
1002 
1003 	switch (sc->sc_link_speed) {
1004 	case 1: /* 10 Mbps */
1005 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1006 		break;
1007 	case 2: /* 100 Mbps */
1008 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1009 		break;
1010 	case 3: /* 1 Gbps */
1011 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1012 		break;
1013 	case 4: /* 10 Gbps */
1014 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1015 		break;
1016 	}
1017 
1018 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1019 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1020 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1021 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1022 }
1023 
1024 int
1025 oce_media_change(struct ifnet *ifp)
1026 {
1027 	return (0);
1028 }
1029 
1030 void
1031 oce_tick(void *arg)
1032 {
1033 	struct oce_softc *sc = arg;
1034 	int s;
1035 
1036 	s = splnet();
1037 
1038 	if (oce_update_stats(sc) == 0)
1039 		timeout_add_sec(&sc->sc_tick, 1);
1040 
1041 	splx(s);
1042 }
1043 
1044 void
1045 oce_init(void *arg)
1046 {
1047 	struct oce_softc *sc = arg;
1048 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1049 	struct oce_eq *eq;
1050 	struct oce_rq *rq;
1051 	struct oce_wq *wq;
1052 	int i;
1053 
1054 	oce_stop(sc);
1055 
1056 	DELAY(10);
1057 
1058 	oce_macaddr_set(sc);
1059 
1060 	oce_iff(sc);
1061 
1062 	/* Enable VLAN promiscuous mode */
1063 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1064 		goto error;
1065 
1066 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1067 		goto error;
1068 
1069 	OCE_RQ_FOREACH(sc, rq, i) {
1070 		rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1071 		    ETHER_VLAN_ENCAP_LEN;
1072 		if (oce_new_rq(sc, rq)) {
1073 			printf("%s: failed to create rq\n",
1074 			    sc->sc_dev.dv_xname);
1075 			goto error;
1076 		}
1077 		rq->ring->index	 = 0;
1078 
1079 		/* oce splits jumbos into 2k chunks... */
1080 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1081 
1082 		if (!oce_alloc_rx_bufs(rq)) {
1083 			printf("%s: failed to allocate rx buffers\n",
1084 			    sc->sc_dev.dv_xname);
1085 			goto error;
1086 		}
1087 	}
1088 
1089 #ifdef OCE_RSS
1090 	/* RSS config */
1091 	if (sc->sc_rss_enable) {
1092 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1093 			printf("%s: failed to configure RSS\n",
1094 			    sc->sc_dev.dv_xname);
1095 			goto error;
1096 		}
1097 	}
1098 #endif
1099 
1100 	OCE_RQ_FOREACH(sc, rq, i)
1101 		oce_arm_cq(rq->cq, 0, TRUE);
1102 
1103 	OCE_WQ_FOREACH(sc, wq, i)
1104 		oce_arm_cq(wq->cq, 0, TRUE);
1105 
1106 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1107 
1108 	OCE_EQ_FOREACH(sc, eq, i)
1109 		oce_arm_eq(eq, 0, TRUE, FALSE);
1110 
1111 	if (oce_get_link_status(sc) == 0)
1112 		oce_link_status(sc);
1113 
1114 	ifp->if_flags |= IFF_RUNNING;
1115 	ifq_clr_oactive(&ifp->if_snd);
1116 
1117 	timeout_add_sec(&sc->sc_tick, 1);
1118 
1119 	oce_intr_enable(sc);
1120 
1121 	return;
1122 error:
1123 	oce_stop(sc);
1124 }
1125 
1126 void
1127 oce_stop(struct oce_softc *sc)
1128 {
1129 	struct mbx_delete_nic_rq cmd;
1130 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1131 	struct oce_rq *rq;
1132 	struct oce_wq *wq;
1133 	struct oce_eq *eq;
1134 	int i;
1135 
1136 	timeout_del(&sc->sc_tick);
1137 	timeout_del(&sc->sc_rxrefill);
1138 
1139 	ifp->if_flags &= ~IFF_RUNNING;
1140 	ifq_clr_oactive(&ifp->if_snd);
1141 
1142 	/* Stop intrs and finish any bottom halves pending */
1143 	oce_intr_disable(sc);
1144 
1145 	/* Invalidate any pending cq and eq entries */
1146 	OCE_EQ_FOREACH(sc, eq, i)
1147 		oce_drain_eq(eq);
1148 	OCE_RQ_FOREACH(sc, rq, i) {
1149 		/* destroy the work queue in the firmware */
1150 		memset(&cmd, 0, sizeof(cmd));
1151 		cmd.params.req.rq_id = htole16(rq->id);
1152 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1153 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1154 		DELAY(1000);
1155 		oce_drain_rq(rq);
1156 		oce_free_posted_rxbuf(rq);
1157 	}
1158 	OCE_WQ_FOREACH(sc, wq, i)
1159 		oce_drain_wq(wq);
1160 }
1161 
1162 void
1163 oce_watchdog(struct ifnet *ifp)
1164 {
1165 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1166 
1167 	oce_init(ifp->if_softc);
1168 
1169 	ifp->if_oerrors++;
1170 }
1171 
1172 void
1173 oce_start(struct ifnet *ifp)
1174 {
1175 	struct oce_softc *sc = ifp->if_softc;
1176 	struct mbuf *m;
1177 	int pkts = 0;
1178 
1179 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1180 		return;
1181 
1182 	for (;;) {
1183 		IFQ_DEQUEUE(&ifp->if_snd, m);
1184 		if (m == NULL)
1185 			break;
1186 
1187 		if (oce_encap(sc, &m, 0)) {
1188 			ifq_set_oactive(&ifp->if_snd);
1189 			break;
1190 		}
1191 
1192 #if NBPFILTER > 0
1193 		if (ifp->if_bpf)
1194 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1195 #endif
1196 		pkts++;
1197 	}
1198 
1199 	/* Set a timeout in case the chip goes out to lunch */
1200 	if (pkts)
1201 		ifp->if_timer = 5;
1202 }
1203 
1204 int
1205 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1206 {
1207 	struct mbuf *m = *mpp;
1208 	struct oce_wq *wq = sc->sc_wq[wqidx];
1209 	struct oce_pkt *pkt = NULL;
1210 	struct oce_nic_hdr_wqe *nhe;
1211 	struct oce_nic_frag_wqe *nfe;
1212 	int i, nwqe, err;
1213 
1214 #ifdef OCE_TSO
1215 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1216 		/* consolidate packet buffers for TSO/LSO segment offload */
1217 		m = oce_tso(sc, mpp);
1218 		if (m == NULL)
1219 			goto error;
1220 	}
1221 #endif
1222 
1223 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1224 		goto error;
1225 
1226 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1227 	if (err == EFBIG) {
1228 		if (m_defrag(m, M_DONTWAIT) ||
1229 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1230 			BUS_DMA_NOWAIT))
1231 			goto error;
1232 		*mpp = m;
1233 	} else if (err != 0)
1234 		goto error;
1235 
1236 	pkt->nsegs = pkt->map->dm_nsegs;
1237 
1238 	nwqe = pkt->nsegs + 1;
1239 	if (IS_BE(sc)) {
1240 		/* BE2 and BE3 require even number of WQEs */
1241 		if (nwqe & 1)
1242 			nwqe++;
1243 	}
1244 
1245 	/* Fail if there's not enough free WQEs */
1246 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1247 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1248 		goto error;
1249 	}
1250 
1251 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1252 	    BUS_DMASYNC_PREWRITE);
1253 	pkt->mbuf = m;
1254 
1255 	/* TX work queue entry for the header */
1256 	nhe = oce_ring_get(wq->ring);
1257 	memset(nhe, 0, sizeof(*nhe));
1258 
1259 	nhe->u0.s.complete = 1;
1260 	nhe->u0.s.event = 1;
1261 	nhe->u0.s.crc = 1;
1262 	nhe->u0.s.forward = 0;
1263 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1264 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1265 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1266 	nhe->u0.s.num_wqe = nwqe;
1267 	nhe->u0.s.total_length = m->m_pkthdr.len;
1268 
1269 #if NVLAN > 0
1270 	if (m->m_flags & M_VLANTAG) {
1271 		nhe->u0.s.vlan = 1; /* Vlan present */
1272 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1273 	}
1274 #endif
1275 
1276 #ifdef OCE_TSO
1277 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1278 		if (m->m_pkthdr.tso_segsz) {
1279 			nhe->u0.s.lso = 1;
1280 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1281 		}
1282 		if (!IS_BE(sc))
1283 			nhe->u0.s.ipcs = 1;
1284 	}
1285 #endif
1286 
1287 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1288 	    BUS_DMASYNC_PREWRITE);
1289 
1290 	wq->ring->nused++;
1291 
1292 	/* TX work queue entries for data chunks */
1293 	for (i = 0; i < pkt->nsegs; i++) {
1294 		nfe = oce_ring_get(wq->ring);
1295 		memset(nfe, 0, sizeof(*nfe));
1296 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1297 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1298 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1299 		wq->ring->nused++;
1300 	}
1301 	if (nwqe > (pkt->nsegs + 1)) {
1302 		nfe = oce_ring_get(wq->ring);
1303 		memset(nfe, 0, sizeof(*nfe));
1304 		wq->ring->nused++;
1305 		pkt->nsegs++;
1306 	}
1307 
1308 	oce_pkt_put(&wq->pkt_list, pkt);
1309 
1310 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1311 	    BUS_DMASYNC_POSTWRITE);
1312 
1313 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1314 
1315 	return (0);
1316 
1317 error:
1318 	if (pkt)
1319 		oce_pkt_put(&wq->pkt_free, pkt);
1320 	m_freem(*mpp);
1321 	*mpp = NULL;
1322 	return (1);
1323 }
1324 
1325 #ifdef OCE_TSO
1326 struct mbuf *
1327 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1328 {
1329 	struct mbuf *m;
1330 	struct ip *ip;
1331 #ifdef INET6
1332 	struct ip6_hdr *ip6;
1333 #endif
1334 	struct ether_vlan_header *eh;
1335 	struct tcphdr *th;
1336 	uint16_t etype;
1337 	int total_len = 0, ehdrlen = 0;
1338 
1339 	m = *mpp;
1340 
1341 	if (M_WRITABLE(m) == 0) {
1342 		m = m_dup(*mpp, M_DONTWAIT);
1343 		if (!m)
1344 			return (NULL);
1345 		m_freem(*mpp);
1346 		*mpp = m;
1347 	}
1348 
1349 	eh = mtod(m, struct ether_vlan_header *);
1350 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1351 		etype = ntohs(eh->evl_proto);
1352 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1353 	} else {
1354 		etype = ntohs(eh->evl_encap_proto);
1355 		ehdrlen = ETHER_HDR_LEN;
1356 	}
1357 
1358 	switch (etype) {
1359 	case ETHERTYPE_IP:
1360 		ip = (struct ip *)(m->m_data + ehdrlen);
1361 		if (ip->ip_p != IPPROTO_TCP)
1362 			return (NULL);
1363 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1364 
1365 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1366 		break;
1367 #ifdef INET6
1368 	case ETHERTYPE_IPV6:
1369 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1370 		if (ip6->ip6_nxt != IPPROTO_TCP)
1371 			return NULL;
1372 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1373 
1374 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1375 		    (th->th_off << 2);
1376 		break;
1377 #endif
1378 	default:
1379 		return (NULL);
1380 	}
1381 
1382 	m = m_pullup(m, total_len);
1383 	if (!m)
1384 		return (NULL);
1385 	*mpp = m;
1386 	return (m);
1387 
1388 }
1389 #endif /* OCE_TSO */
1390 
1391 int
1392 oce_intr(void *arg)
1393 {
1394 	struct oce_softc *sc = arg;
1395 	struct oce_eq *eq = sc->sc_eq[0];
1396 	struct oce_eqe *eqe;
1397 	struct oce_cq *cq = NULL;
1398 	int i, neqe = 0;
1399 
1400 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1401 
1402 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1403 		eqe->evnt = 0;
1404 		neqe++;
1405 	}
1406 
1407 	/* Spurious? */
1408 	if (!neqe) {
1409 		oce_arm_eq(eq, 0, TRUE, FALSE);
1410 		return (0);
1411 	}
1412 
1413 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1414 
1415  	/* Clear EQ entries, but dont arm */
1416 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1417 
1418 	/* Process TX, RX and MCC completion queues */
1419 	for (i = 0; i < eq->cq_valid; i++) {
1420 		cq = eq->cq[i];
1421 		(*cq->cq_intr)(cq->cb_arg);
1422 		oce_arm_cq(cq, 0, TRUE);
1423 	}
1424 
1425 	oce_arm_eq(eq, 0, TRUE, FALSE);
1426 	return (1);
1427 }
1428 
1429 /* Handle the Completion Queue for transmit */
1430 void
1431 oce_intr_wq(void *arg)
1432 {
1433 	struct oce_wq *wq = (struct oce_wq *)arg;
1434 	struct oce_cq *cq = wq->cq;
1435 	struct oce_nic_tx_cqe *cqe;
1436 	struct oce_softc *sc = wq->sc;
1437 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1438 	int ncqe = 0;
1439 
1440 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1441 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1442 		oce_txeof(wq);
1443 		WQ_CQE_INVALIDATE(cqe);
1444 		ncqe++;
1445 	}
1446 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1447 
1448 	if (ifq_is_oactive(&ifp->if_snd)) {
1449 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1450 			ifq_clr_oactive(&ifp->if_snd);
1451 			oce_start(ifp);
1452 		}
1453 	}
1454 	if (wq->ring->nused == 0)
1455 		ifp->if_timer = 0;
1456 
1457 	if (ncqe)
1458 		oce_arm_cq(cq, ncqe, FALSE);
1459 }
1460 
1461 void
1462 oce_txeof(struct oce_wq *wq)
1463 {
1464 	struct oce_softc *sc = wq->sc;
1465 	struct oce_pkt *pkt;
1466 	struct mbuf *m;
1467 
1468 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1469 		printf("%s: missing descriptor in txeof\n",
1470 		    sc->sc_dev.dv_xname);
1471 		return;
1472 	}
1473 
1474 	wq->ring->nused -= pkt->nsegs + 1;
1475 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1476 	    BUS_DMASYNC_POSTWRITE);
1477 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1478 
1479 	m = pkt->mbuf;
1480 	m_freem(m);
1481 	pkt->mbuf = NULL;
1482 	oce_pkt_put(&wq->pkt_free, pkt);
1483 }
1484 
1485 /* Handle the Completion Queue for receive */
1486 void
1487 oce_intr_rq(void *arg)
1488 {
1489 	struct oce_rq *rq = (struct oce_rq *)arg;
1490 	struct oce_cq *cq = rq->cq;
1491 	struct oce_softc *sc = rq->sc;
1492 	struct oce_nic_rx_cqe *cqe;
1493 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1494 	int maxrx, ncqe = 0;
1495 
1496 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1497 
1498 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1499 
1500 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1501 		if (cqe->u0.s.error == 0) {
1502 			if (cqe->u0.s.pkt_size == 0)
1503 				/* partial DMA workaround for Lancer */
1504 				oce_rxeoc(rq, cqe);
1505 			else
1506 				oce_rxeof(rq, cqe);
1507 		} else {
1508 			ifp->if_ierrors++;
1509 			if (IS_XE201(sc))
1510 				/* Lancer A0 no buffer workaround */
1511 				oce_rxeoc(rq, cqe);
1512 			else
1513 				/* Post L3/L4 errors to stack.*/
1514 				oce_rxeof(rq, cqe);
1515 		}
1516 #ifdef OCE_LRO
1517 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1518 			oce_flush_lro(rq);
1519 #endif
1520 		RQ_CQE_INVALIDATE(cqe);
1521 		ncqe++;
1522 	}
1523 
1524 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1525 
1526 #ifdef OCE_LRO
1527 	if (IF_LRO_ENABLED(ifp))
1528 		oce_flush_lro(rq);
1529 #endif
1530 
1531 	if (ncqe) {
1532 		oce_arm_cq(cq, ncqe, FALSE);
1533 		if (!oce_alloc_rx_bufs(rq))
1534 			timeout_add(&sc->sc_rxrefill, 1);
1535 	}
1536 }
1537 
1538 void
1539 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1540 {
1541 	struct oce_softc *sc = rq->sc;
1542 	struct oce_pkt *pkt = NULL;
1543 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1544 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1545 	struct mbuf *m = NULL, *tail = NULL;
1546 	int i, len, frag_len;
1547 	uint16_t vtag;
1548 
1549 	len = cqe->u0.s.pkt_size;
1550 
1551 	 /* Get vlan_tag value */
1552 	if (IS_BE(sc))
1553 		vtag = ntohs(cqe->u0.s.vlan_tag);
1554 	else
1555 		vtag = cqe->u0.s.vlan_tag;
1556 
1557 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1558 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1559 			printf("%s: missing descriptor in rxeof\n",
1560 			    sc->sc_dev.dv_xname);
1561 			goto exit;
1562 		}
1563 
1564 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1565 		    BUS_DMASYNC_POSTREAD);
1566 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1567 		if_rxr_put(&rq->rxring, 1);
1568 
1569 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1570 		pkt->mbuf->m_len = frag_len;
1571 
1572 		if (tail != NULL) {
1573 			/* additional fragments */
1574 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1575 			tail->m_next = pkt->mbuf;
1576 			tail = pkt->mbuf;
1577 		} else {
1578 			/* first fragment, fill out most of the header */
1579 			pkt->mbuf->m_pkthdr.len = len;
1580 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1581 			if (cqe->u0.s.ip_cksum_pass) {
1582 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1583 					pkt->mbuf->m_pkthdr.csum_flags =
1584 					    M_IPV4_CSUM_IN_OK;
1585 				}
1586 			}
1587 			if (cqe->u0.s.l4_cksum_pass) {
1588 				pkt->mbuf->m_pkthdr.csum_flags |=
1589 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1590 			}
1591 			m = tail = pkt->mbuf;
1592 		}
1593 		pkt->mbuf = NULL;
1594 		oce_pkt_put(&rq->pkt_free, pkt);
1595 		len -= frag_len;
1596 	}
1597 
1598 	if (m) {
1599 		if (!oce_port_valid(sc, cqe)) {
1600 			 m_freem(m);
1601 			 goto exit;
1602 		}
1603 
1604 #if NVLAN > 0
1605 		/* This determines if vlan tag is valid */
1606 		if (oce_vtp_valid(sc, cqe)) {
1607 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1608 				/* FLEX10. If QnQ is not set, neglect VLAN */
1609 				if (cqe->u0.s.qnq) {
1610 					m->m_pkthdr.ether_vtag = vtag;
1611 					m->m_flags |= M_VLANTAG;
1612 				}
1613 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1614 				/*
1615 				 * In UMC mode generally pvid will be striped.
1616 				 * But in some cases we have seen it comes
1617 				 * with pvid. So if pvid == vlan, neglect vlan.
1618 				 */
1619 				m->m_pkthdr.ether_vtag = vtag;
1620 				m->m_flags |= M_VLANTAG;
1621 			}
1622 		}
1623 #endif
1624 
1625 #ifdef OCE_LRO
1626 		/* Try to queue to LRO */
1627 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1628 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1629 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1630 
1631 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1632 				rq->lro_pkts_queued ++;
1633 				goto exit;
1634 			}
1635 			/* If LRO posting fails then try to post to STACK */
1636 		}
1637 #endif
1638 
1639 		ml_enqueue(&ml, m);
1640 	}
1641 exit:
1642 	if_input(ifp, &ml);
1643 }
1644 
1645 void
1646 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1647 {
1648 	struct oce_softc *sc = rq->sc;
1649 	struct oce_pkt *pkt;
1650 	int i, num_frags = cqe->u0.s.num_fragments;
1651 
1652 	if (IS_XE201(sc) && cqe->u0.s.error) {
1653 		/*
1654 		 * Lancer A0 workaround:
1655 		 * num_frags will be 1 more than actual in case of error
1656 		 */
1657 		if (num_frags)
1658 			num_frags--;
1659 	}
1660 	for (i = 0; i < num_frags; i++) {
1661 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1662 			printf("%s: missing descriptor in rxeoc\n",
1663 			    sc->sc_dev.dv_xname);
1664 			return;
1665 		}
1666 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1667 		    BUS_DMASYNC_POSTREAD);
1668 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1669 		if_rxr_put(&rq->rxring, 1);
1670 		m_freem(pkt->mbuf);
1671 		oce_pkt_put(&rq->pkt_free, pkt);
1672 	}
1673 }
1674 
1675 int
1676 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1677 {
1678 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1679 
1680 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1681 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1682 		return (cqe_v1->u0.s.vlan_tag_present);
1683 	}
1684 	return (cqe->u0.s.vlan_tag_present);
1685 }
1686 
1687 int
1688 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1689 {
1690 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1691 
1692 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1693 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1694 		if (sc->sc_port != cqe_v1->u0.s.port)
1695 			return (0);
1696 	}
1697 	return (1);
1698 }
1699 
1700 #ifdef OCE_LRO
1701 void
1702 oce_flush_lro(struct oce_rq *rq)
1703 {
1704 	struct oce_softc *sc = rq->sc;
1705 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1706 	struct lro_ctrl	*lro = &rq->lro;
1707 	struct lro_entry *queued;
1708 
1709 	if (!IF_LRO_ENABLED(ifp))
1710 		return;
1711 
1712 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1713 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1714 		tcp_lro_flush(lro, queued);
1715 	}
1716 	rq->lro_pkts_queued = 0;
1717 }
1718 
1719 int
1720 oce_init_lro(struct oce_softc *sc)
1721 {
1722 	struct lro_ctrl *lro = NULL;
1723 	int i = 0, rc = 0;
1724 
1725 	for (i = 0; i < sc->sc_nrq; i++) {
1726 		lro = &sc->sc_rq[i]->lro;
1727 		rc = tcp_lro_init(lro);
1728 		if (rc != 0) {
1729 			printf("%s: LRO init failed\n",
1730 			    sc->sc_dev.dv_xname);
1731 			return rc;
1732 		}
1733 		lro->ifp = &sc->sc_ac.ac_if;
1734 	}
1735 
1736 	return (rc);
1737 }
1738 
1739 void
1740 oce_free_lro(struct oce_softc *sc)
1741 {
1742 	struct lro_ctrl *lro = NULL;
1743 	int i = 0;
1744 
1745 	for (i = 0; i < sc->sc_nrq; i++) {
1746 		lro = &sc->sc_rq[i]->lro;
1747 		if (lro)
1748 			tcp_lro_free(lro);
1749 	}
1750 }
1751 #endif /* OCE_LRO */
1752 
1753 int
1754 oce_get_buf(struct oce_rq *rq)
1755 {
1756 	struct oce_softc *sc = rq->sc;
1757 	struct oce_pkt *pkt;
1758 	struct oce_nic_rqe *rqe;
1759 
1760 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1761 		return (0);
1762 
1763 	pkt->mbuf = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1764 	if (pkt->mbuf == NULL) {
1765 		oce_pkt_put(&rq->pkt_free, pkt);
1766 		return (0);
1767 	}
1768 
1769 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1770 #ifdef __STRICT_ALIGNMENT
1771 	m_adj(pkt->mbuf, ETHER_ALIGN);
1772 #endif
1773 
1774 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1775 	    BUS_DMA_NOWAIT)) {
1776 		m_freem(pkt->mbuf);
1777 		pkt->mbuf = NULL;
1778 		oce_pkt_put(&rq->pkt_free, pkt);
1779 		return (0);
1780 	}
1781 
1782 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1783 	    BUS_DMASYNC_PREREAD);
1784 
1785 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1786 	    BUS_DMASYNC_PREWRITE);
1787 
1788 	rqe = oce_ring_get(rq->ring);
1789 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1790 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1791 
1792 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1793 	    BUS_DMASYNC_POSTWRITE);
1794 
1795 	oce_pkt_put(&rq->pkt_list, pkt);
1796 
1797 	return (1);
1798 }
1799 
1800 int
1801 oce_alloc_rx_bufs(struct oce_rq *rq)
1802 {
1803 	struct oce_softc *sc = rq->sc;
1804 	int i, nbufs = 0;
1805 	u_int slots;
1806 
1807 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1808 		if (oce_get_buf(rq) == 0)
1809 			break;
1810 
1811 		nbufs++;
1812 	}
1813 	if_rxr_put(&rq->rxring, slots);
1814 
1815 	if (!nbufs)
1816 		return (0);
1817 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1818 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1819 		    (OCE_MAX_RQ_POSTS << 24));
1820 		nbufs -= OCE_MAX_RQ_POSTS;
1821 	}
1822 	if (nbufs > 0)
1823 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1824 	return (1);
1825 }
1826 
1827 void
1828 oce_refill_rx(void *arg)
1829 {
1830 	struct oce_softc *sc = arg;
1831 	struct oce_rq *rq;
1832 	int i, s;
1833 
1834 	s = splnet();
1835 	OCE_RQ_FOREACH(sc, rq, i) {
1836 		if (!oce_alloc_rx_bufs(rq))
1837 			timeout_add(&sc->sc_rxrefill, 5);
1838 	}
1839 	splx(s);
1840 }
1841 
1842 /* Handle the Completion Queue for the Mailbox/Async notifications */
1843 void
1844 oce_intr_mq(void *arg)
1845 {
1846 	struct oce_mq *mq = (struct oce_mq *)arg;
1847 	struct oce_softc *sc = mq->sc;
1848 	struct oce_cq *cq = mq->cq;
1849 	struct oce_mq_cqe *cqe;
1850 	struct oce_async_cqe_link_state *acqe;
1851 	struct oce_async_event_grp5_pvid_state *gcqe;
1852 	int evtype, optype, ncqe = 0;
1853 
1854 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1855 
1856 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1857 		if (cqe->u0.s.async_event) {
1858 			evtype = cqe->u0.s.event_type;
1859 			optype = cqe->u0.s.async_type;
1860 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1861 				/* Link status evt */
1862 				acqe = (struct oce_async_cqe_link_state *)cqe;
1863 				oce_link_event(sc, acqe);
1864 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1865 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1866 				/* GRP5 PVID */
1867 				gcqe =
1868 				(struct oce_async_event_grp5_pvid_state *)cqe;
1869 				if (gcqe->enabled)
1870 					sc->sc_pvid =
1871 					    gcqe->tag & VLAN_VID_MASK;
1872 				else
1873 					sc->sc_pvid = 0;
1874 			}
1875 		}
1876 		MQ_CQE_INVALIDATE(cqe);
1877 		ncqe++;
1878 	}
1879 
1880 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1881 
1882 	if (ncqe)
1883 		oce_arm_cq(cq, ncqe, FALSE);
1884 }
1885 
1886 void
1887 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1888 {
1889 	/* Update Link status */
1890 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1891 	    ASYNC_EVENT_LINK_UP);
1892 	/* Update speed */
1893 	sc->sc_link_speed = acqe->u0.s.speed;
1894 	oce_link_status(sc);
1895 }
1896 
1897 int
1898 oce_init_queues(struct oce_softc *sc)
1899 {
1900 	struct oce_wq *wq;
1901 	struct oce_rq *rq;
1902 	int i;
1903 
1904 	sc->sc_nrq = 1;
1905 	sc->sc_nwq = 1;
1906 
1907 	/* Create network interface on card */
1908 	if (oce_create_iface(sc, sc->sc_macaddr))
1909 		goto error;
1910 
1911 	/* create all of the event queues */
1912 	for (i = 0; i < sc->sc_nintr; i++) {
1913 		sc->sc_eq[i] = oce_create_eq(sc);
1914 		if (!sc->sc_eq[i])
1915 			goto error;
1916 	}
1917 
1918 	/* alloc tx queues */
1919 	OCE_WQ_FOREACH(sc, wq, i) {
1920 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1921 		if (!sc->sc_wq[i])
1922 			goto error;
1923 	}
1924 
1925 	/* alloc rx queues */
1926 	OCE_RQ_FOREACH(sc, rq, i) {
1927 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1928 		    i > 0 ? sc->sc_rss_enable : 0);
1929 		if (!sc->sc_rq[i])
1930 			goto error;
1931 	}
1932 
1933 	/* alloc mailbox queue */
1934 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1935 	if (!sc->sc_mq)
1936 		goto error;
1937 
1938 	return (0);
1939 error:
1940 	oce_release_queues(sc);
1941 	return (1);
1942 }
1943 
1944 void
1945 oce_release_queues(struct oce_softc *sc)
1946 {
1947 	struct oce_wq *wq;
1948 	struct oce_rq *rq;
1949 	struct oce_eq *eq;
1950 	int i;
1951 
1952 	OCE_RQ_FOREACH(sc, rq, i) {
1953 		if (rq)
1954 			oce_destroy_rq(sc->sc_rq[i]);
1955 	}
1956 
1957 	OCE_WQ_FOREACH(sc, wq, i) {
1958 		if (wq)
1959 			oce_destroy_wq(sc->sc_wq[i]);
1960 	}
1961 
1962 	if (sc->sc_mq)
1963 		oce_destroy_mq(sc->sc_mq);
1964 
1965 	OCE_EQ_FOREACH(sc, eq, i) {
1966 		if (eq)
1967 			oce_destroy_eq(sc->sc_eq[i]);
1968 	}
1969 }
1970 
1971 /**
1972  * @brief 		Function to create a WQ for NIC Tx
1973  * @param sc 		software handle to the device
1974  * @returns		the pointer to the WQ created or NULL on failure
1975  */
1976 struct oce_wq *
1977 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1978 {
1979 	struct oce_wq *wq;
1980 	struct oce_cq *cq;
1981 	struct oce_pkt *pkt;
1982 	int i;
1983 
1984 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1985 		return (NULL);
1986 
1987 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1988 	if (!wq)
1989 		return (NULL);
1990 
1991 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1992 	if (!wq->ring) {
1993 		free(wq, M_DEVBUF, 0);
1994 		return (NULL);
1995 	}
1996 
1997 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1998 	    1, 0, 3);
1999 	if (!cq) {
2000 		oce_destroy_ring(sc, wq->ring);
2001 		free(wq, M_DEVBUF, 0);
2002 		return (NULL);
2003 	}
2004 
2005 	wq->id = -1;
2006 	wq->sc = sc;
2007 
2008 	wq->cq = cq;
2009 	wq->nitems = sc->sc_tx_ring_size;
2010 
2011 	SIMPLEQ_INIT(&wq->pkt_free);
2012 	SIMPLEQ_INIT(&wq->pkt_list);
2013 
2014 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2015 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2016 		    PAGE_SIZE);
2017 		if (pkt == NULL) {
2018 			oce_destroy_wq(wq);
2019 			return (NULL);
2020 		}
2021 		oce_pkt_put(&wq->pkt_free, pkt);
2022 	}
2023 
2024 	if (oce_new_wq(sc, wq)) {
2025 		oce_destroy_wq(wq);
2026 		return (NULL);
2027 	}
2028 
2029 	eq->cq[eq->cq_valid] = cq;
2030 	eq->cq_valid++;
2031 	cq->cb_arg = wq;
2032 	cq->cq_intr = oce_intr_wq;
2033 
2034 	return (wq);
2035 }
2036 
2037 void
2038 oce_drain_wq(struct oce_wq *wq)
2039 {
2040 	struct oce_cq *cq = wq->cq;
2041 	struct oce_nic_tx_cqe *cqe;
2042 	int ncqe = 0;
2043 
2044 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2045 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2046 		WQ_CQE_INVALIDATE(cqe);
2047 		ncqe++;
2048 	}
2049 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2050 	oce_arm_cq(cq, ncqe, FALSE);
2051 }
2052 
2053 void
2054 oce_destroy_wq(struct oce_wq *wq)
2055 {
2056 	struct mbx_delete_nic_wq cmd;
2057 	struct oce_softc *sc = wq->sc;
2058 	struct oce_pkt *pkt;
2059 
2060 	if (wq->id >= 0) {
2061 		memset(&cmd, 0, sizeof(cmd));
2062 		cmd.params.req.wq_id = htole16(wq->id);
2063 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2064 		    &cmd, sizeof(cmd));
2065 	}
2066 	if (wq->cq != NULL)
2067 		oce_destroy_cq(wq->cq);
2068 	if (wq->ring != NULL)
2069 		oce_destroy_ring(sc, wq->ring);
2070 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2071 		oce_pkt_free(sc, pkt);
2072 	free(wq, M_DEVBUF, 0);
2073 }
2074 
2075 /**
2076  * @brief 		function to allocate receive queue resources
2077  * @param sc		software handle to the device
2078  * @param eq		pointer to associated event queue
2079  * @param rss		is-rss-queue flag
2080  * @returns		the pointer to the RQ created or NULL on failure
2081  */
2082 struct oce_rq *
2083 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2084 {
2085 	struct oce_rq *rq;
2086 	struct oce_cq *cq;
2087 	struct oce_pkt *pkt;
2088 	int i;
2089 
2090 	/* Hardware doesn't support any other value */
2091 	if (sc->sc_rx_ring_size != 1024)
2092 		return (NULL);
2093 
2094 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2095 	if (!rq)
2096 		return (NULL);
2097 
2098 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2099 	    sizeof(struct oce_nic_rqe), 2);
2100 	if (!rq->ring) {
2101 		free(rq, M_DEVBUF, 0);
2102 		return (NULL);
2103 	}
2104 
2105 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2106 	    1, 0, 3);
2107 	if (!cq) {
2108 		oce_destroy_ring(sc, rq->ring);
2109 		free(rq, M_DEVBUF, 0);
2110 		return (NULL);
2111 	}
2112 
2113 	rq->id = -1;
2114 	rq->sc = sc;
2115 
2116 	rq->nitems = sc->sc_rx_ring_size;
2117 	rq->fragsize = OCE_RX_BUF_SIZE;
2118 	rq->rss = rss;
2119 
2120 	SIMPLEQ_INIT(&rq->pkt_free);
2121 	SIMPLEQ_INIT(&rq->pkt_list);
2122 
2123 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2124 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2125 		if (pkt == NULL) {
2126 			oce_destroy_rq(rq);
2127 			return (NULL);
2128 		}
2129 		oce_pkt_put(&rq->pkt_free, pkt);
2130 	}
2131 
2132 	rq->cq = cq;
2133 	eq->cq[eq->cq_valid] = cq;
2134 	eq->cq_valid++;
2135 	cq->cb_arg = rq;
2136 	cq->cq_intr = oce_intr_rq;
2137 
2138 	/* RX queue is created in oce_init */
2139 
2140 	return (rq);
2141 }
2142 
2143 void
2144 oce_drain_rq(struct oce_rq *rq)
2145 {
2146 	struct oce_nic_rx_cqe *cqe;
2147 	struct oce_cq *cq = rq->cq;
2148 	int ncqe = 0;
2149 
2150 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2151 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2152 		RQ_CQE_INVALIDATE(cqe);
2153 		ncqe++;
2154 	}
2155 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2156 	oce_arm_cq(cq, ncqe, FALSE);
2157 }
2158 
2159 void
2160 oce_destroy_rq(struct oce_rq *rq)
2161 {
2162 	struct mbx_delete_nic_rq cmd;
2163 	struct oce_softc *sc = rq->sc;
2164 	struct oce_pkt *pkt;
2165 
2166 	if (rq->id >= 0) {
2167 		memset(&cmd, 0, sizeof(cmd));
2168 		cmd.params.req.rq_id = htole16(rq->id);
2169 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2170 		    &cmd, sizeof(cmd));
2171 	}
2172 	if (rq->cq != NULL)
2173 		oce_destroy_cq(rq->cq);
2174 	if (rq->ring != NULL)
2175 		oce_destroy_ring(sc, rq->ring);
2176 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2177 		oce_pkt_free(sc, pkt);
2178 	free(rq, M_DEVBUF, 0);
2179 }
2180 
2181 struct oce_eq *
2182 oce_create_eq(struct oce_softc *sc)
2183 {
2184 	struct oce_eq *eq;
2185 
2186 	/* allocate an eq */
2187 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2188 	if (eq == NULL)
2189 		return (NULL);
2190 
2191 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2192 	if (!eq->ring) {
2193 		free(eq, M_DEVBUF, 0);
2194 		return (NULL);
2195 	}
2196 
2197 	eq->id = -1;
2198 	eq->sc = sc;
2199 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2200 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2201 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2202 
2203 	if (oce_new_eq(sc, eq)) {
2204 		oce_destroy_ring(sc, eq->ring);
2205 		free(eq, M_DEVBUF, 0);
2206 		return (NULL);
2207 	}
2208 
2209 	return (eq);
2210 }
2211 
2212 /**
2213  * @brief		Function to arm an EQ so that it can generate events
2214  * @param eq		pointer to event queue structure
2215  * @param neqe		number of EQEs to arm
2216  * @param rearm		rearm bit enable/disable
2217  * @param clearint	bit to clear the interrupt condition because of which
2218  *			EQEs are generated
2219  */
2220 static inline void
2221 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2222 {
2223 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2224 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2225 }
2226 
2227 void
2228 oce_drain_eq(struct oce_eq *eq)
2229 {
2230 	struct oce_eqe *eqe;
2231 	int neqe = 0;
2232 
2233 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2234 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2235 		eqe->evnt = 0;
2236 		neqe++;
2237 	}
2238 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2239 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2240 }
2241 
2242 void
2243 oce_destroy_eq(struct oce_eq *eq)
2244 {
2245 	struct mbx_destroy_common_eq cmd;
2246 	struct oce_softc *sc = eq->sc;
2247 
2248 	if (eq->id >= 0) {
2249 		memset(&cmd, 0, sizeof(cmd));
2250 		cmd.params.req.id = htole16(eq->id);
2251 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2252 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2253 	}
2254 	if (eq->ring != NULL)
2255 		oce_destroy_ring(sc, eq->ring);
2256 	free(eq, M_DEVBUF, 0);
2257 }
2258 
2259 struct oce_mq *
2260 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2261 {
2262 	struct oce_mq *mq = NULL;
2263 	struct oce_cq *cq;
2264 
2265 	/* allocate the mq */
2266 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2267 	if (!mq)
2268 		return (NULL);
2269 
2270 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2271 	if (!mq->ring) {
2272 		free(mq, M_DEVBUF, 0);
2273 		return (NULL);
2274 	}
2275 
2276 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2277 	    1, 0, 0);
2278 	if (!cq) {
2279 		oce_destroy_ring(sc, mq->ring);
2280 		free(mq, M_DEVBUF, 0);
2281 		return (NULL);
2282 	}
2283 
2284 	mq->id = -1;
2285 	mq->sc = sc;
2286 	mq->cq = cq;
2287 
2288 	mq->nitems = 128;
2289 
2290 	if (oce_new_mq(sc, mq)) {
2291 		oce_destroy_cq(mq->cq);
2292 		oce_destroy_ring(sc, mq->ring);
2293 		free(mq, M_DEVBUF, 0);
2294 		return (NULL);
2295 	}
2296 
2297 	eq->cq[eq->cq_valid] = cq;
2298 	eq->cq_valid++;
2299 	mq->cq->eq = eq;
2300 	mq->cq->cb_arg = mq;
2301 	mq->cq->cq_intr = oce_intr_mq;
2302 
2303 	return (mq);
2304 }
2305 
2306 void
2307 oce_drain_mq(struct oce_mq *mq)
2308 {
2309 	struct oce_cq *cq = mq->cq;
2310 	struct oce_mq_cqe *cqe;
2311 	int ncqe = 0;
2312 
2313 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2314 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2315 		MQ_CQE_INVALIDATE(cqe);
2316 		ncqe++;
2317 	}
2318 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2319 	oce_arm_cq(cq, ncqe, FALSE);
2320 }
2321 
2322 void
2323 oce_destroy_mq(struct oce_mq *mq)
2324 {
2325 	struct mbx_destroy_common_mq cmd;
2326 	struct oce_softc *sc = mq->sc;
2327 
2328 	if (mq->id >= 0) {
2329 		memset(&cmd, 0, sizeof(cmd));
2330 		cmd.params.req.id = htole16(mq->id);
2331 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2332 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2333 	}
2334 	if (mq->ring != NULL)
2335 		oce_destroy_ring(sc, mq->ring);
2336 	if (mq->cq != NULL)
2337 		oce_destroy_cq(mq->cq);
2338 	free(mq, M_DEVBUF, 0);
2339 }
2340 
2341 /**
2342  * @brief		Function to create a completion queue
2343  * @param sc		software handle to the device
2344  * @param eq		optional eq to be associated with to the cq
2345  * @param nitems	length of completion queue
2346  * @param isize		size of completion queue items
2347  * @param eventable	event table
2348  * @param nodelay	no delay flag
2349  * @param ncoalesce	no coalescence flag
2350  * @returns 		pointer to the cq created, NULL on failure
2351  */
2352 struct oce_cq *
2353 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2354     int eventable, int nodelay, int ncoalesce)
2355 {
2356 	struct oce_cq *cq = NULL;
2357 
2358 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2359 	if (!cq)
2360 		return (NULL);
2361 
2362 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2363 	if (!cq->ring) {
2364 		free(cq, M_DEVBUF, 0);
2365 		return (NULL);
2366 	}
2367 
2368 	cq->sc = sc;
2369 	cq->eq = eq;
2370 	cq->nitems = nitems;
2371 	cq->nodelay = nodelay;
2372 	cq->ncoalesce = ncoalesce;
2373 	cq->eventable = eventable;
2374 
2375 	if (oce_new_cq(sc, cq)) {
2376 		oce_destroy_ring(sc, cq->ring);
2377 		free(cq, M_DEVBUF, 0);
2378 		return (NULL);
2379 	}
2380 
2381 	sc->sc_cq[sc->sc_ncq++] = cq;
2382 
2383 	return (cq);
2384 }
2385 
2386 void
2387 oce_destroy_cq(struct oce_cq *cq)
2388 {
2389 	struct mbx_destroy_common_cq cmd;
2390 	struct oce_softc *sc = cq->sc;
2391 
2392 	if (cq->id >= 0) {
2393 		memset(&cmd, 0, sizeof(cmd));
2394 		cmd.params.req.id = htole16(cq->id);
2395 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2396 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2397 	}
2398 	if (cq->ring != NULL)
2399 		oce_destroy_ring(sc, cq->ring);
2400 	free(cq, M_DEVBUF, 0);
2401 }
2402 
2403 /**
2404  * @brief		Function to arm a CQ with CQEs
2405  * @param cq		pointer to the completion queue structure
2406  * @param ncqe		number of CQEs to arm
2407  * @param rearm		rearm bit enable/disable
2408  */
2409 static inline void
2410 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2411 {
2412 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2413 }
2414 
2415 void
2416 oce_free_posted_rxbuf(struct oce_rq *rq)
2417 {
2418 	struct oce_softc *sc = rq->sc;
2419 	struct oce_pkt *pkt;
2420 
2421 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2422 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2423 		    BUS_DMASYNC_POSTREAD);
2424 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2425 		if (pkt->mbuf != NULL) {
2426 			m_freem(pkt->mbuf);
2427 			pkt->mbuf = NULL;
2428 		}
2429 		oce_pkt_put(&rq->pkt_free, pkt);
2430 		if_rxr_put(&rq->rxring, 1);
2431 	}
2432 }
2433 
2434 int
2435 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2436 {
2437 	int rc;
2438 
2439 	memset(dma, 0, sizeof(struct oce_dma_mem));
2440 
2441 	dma->tag = sc->sc_dmat;
2442 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2443 	    &dma->map);
2444 	if (rc != 0) {
2445 		printf("%s: failed to allocate DMA handle",
2446 		    sc->sc_dev.dv_xname);
2447 		goto fail_0;
2448 	}
2449 
2450 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2451 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2452 	if (rc != 0) {
2453 		printf("%s: failed to allocate DMA memory",
2454 		    sc->sc_dev.dv_xname);
2455 		goto fail_1;
2456 	}
2457 
2458 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2459 	    &dma->vaddr, BUS_DMA_NOWAIT);
2460 	if (rc != 0) {
2461 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2462 		goto fail_2;
2463 	}
2464 
2465 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2466 	    BUS_DMA_NOWAIT);
2467 	if (rc != 0) {
2468 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2469 		goto fail_3;
2470 	}
2471 
2472 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2473 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2474 
2475 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2476 	dma->size = size;
2477 
2478 	return (0);
2479 
2480 fail_3:
2481 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2482 fail_2:
2483 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2484 fail_1:
2485 	bus_dmamap_destroy(dma->tag, dma->map);
2486 fail_0:
2487 	return (rc);
2488 }
2489 
2490 void
2491 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2492 {
2493 	if (dma->tag == NULL)
2494 		return;
2495 
2496 	if (dma->map != NULL) {
2497 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2498 		bus_dmamap_unload(dma->tag, dma->map);
2499 
2500 		if (dma->vaddr != 0) {
2501 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2502 			dma->vaddr = 0;
2503 		}
2504 
2505 		bus_dmamap_destroy(dma->tag, dma->map);
2506 		dma->map = NULL;
2507 		dma->tag = NULL;
2508 	}
2509 }
2510 
2511 struct oce_ring *
2512 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2513 {
2514 	struct oce_dma_mem *dma;
2515 	struct oce_ring *ring;
2516 	bus_size_t size = nitems * isize;
2517 	int rc;
2518 
2519 	if (size > maxsegs * PAGE_SIZE)
2520 		return (NULL);
2521 
2522 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2523 	if (ring == NULL)
2524 		return (NULL);
2525 
2526 	ring->isize = isize;
2527 	ring->nitems = nitems;
2528 
2529 	dma = &ring->dma;
2530 	dma->tag = sc->sc_dmat;
2531 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2532 	    BUS_DMA_NOWAIT, &dma->map);
2533 	if (rc != 0) {
2534 		printf("%s: failed to allocate DMA handle",
2535 		    sc->sc_dev.dv_xname);
2536 		goto fail_0;
2537 	}
2538 
2539 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2540 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2541 	if (rc != 0) {
2542 		printf("%s: failed to allocate DMA memory",
2543 		    sc->sc_dev.dv_xname);
2544 		goto fail_1;
2545 	}
2546 
2547 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2548 	    &dma->vaddr, BUS_DMA_NOWAIT);
2549 	if (rc != 0) {
2550 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2551 		goto fail_2;
2552 	}
2553 
2554 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2555 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2556 
2557 	dma->paddr = 0;
2558 	dma->size = size;
2559 
2560 	return (ring);
2561 
2562 fail_2:
2563 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2564 fail_1:
2565 	bus_dmamap_destroy(dma->tag, dma->map);
2566 fail_0:
2567 	free(ring, M_DEVBUF, 0);
2568 	return (NULL);
2569 }
2570 
2571 void
2572 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2573 {
2574 	oce_dma_free(sc, &ring->dma);
2575 	free(ring, M_DEVBUF, 0);
2576 }
2577 
2578 int
2579 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2580     struct oce_pa *pa, int maxsegs)
2581 {
2582 	struct oce_dma_mem *dma = &ring->dma;
2583 	int i;
2584 
2585 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2586 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2587 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2588 		return (0);
2589 	}
2590 
2591 	if (dma->map->dm_nsegs > maxsegs) {
2592 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2593 		return (0);
2594 	}
2595 
2596 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2597 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2598 
2599 	for (i = 0; i < dma->map->dm_nsegs; i++)
2600 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2601 
2602 	return (dma->map->dm_nsegs);
2603 }
2604 
2605 static inline void *
2606 oce_ring_get(struct oce_ring *ring)
2607 {
2608 	int index = ring->index;
2609 
2610 	if (++ring->index == ring->nitems)
2611 		ring->index = 0;
2612 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2613 }
2614 
2615 static inline void *
2616 oce_ring_first(struct oce_ring *ring)
2617 {
2618 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2619 }
2620 
2621 static inline void *
2622 oce_ring_next(struct oce_ring *ring)
2623 {
2624 	if (++ring->index == ring->nitems)
2625 		ring->index = 0;
2626 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2627 }
2628 
2629 struct oce_pkt *
2630 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2631 {
2632 	struct oce_pkt *pkt;
2633 
2634 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2635 		return (NULL);
2636 
2637 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2638 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2639 		pool_put(oce_pkt_pool, pkt);
2640 		return (NULL);
2641 	}
2642 
2643 	return (pkt);
2644 }
2645 
2646 void
2647 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2648 {
2649 	if (pkt->map) {
2650 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2651 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2652 	}
2653 	pool_put(oce_pkt_pool, pkt);
2654 }
2655 
2656 static inline struct oce_pkt *
2657 oce_pkt_get(struct oce_pkt_list *lst)
2658 {
2659 	struct oce_pkt *pkt;
2660 
2661 	pkt = SIMPLEQ_FIRST(lst);
2662 	if (pkt == NULL)
2663 		return (NULL);
2664 
2665 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2666 
2667 	return (pkt);
2668 }
2669 
2670 static inline void
2671 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2672 {
2673 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2674 }
2675 
2676 /**
2677  * @brief Wait for FW to become ready and reset it
2678  * @param sc		software handle to the device
2679  */
2680 int
2681 oce_init_fw(struct oce_softc *sc)
2682 {
2683 	struct ioctl_common_function_reset cmd;
2684 	uint32_t reg;
2685 	int err = 0, tmo = 60000;
2686 
2687 	/* read semaphore CSR */
2688 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2689 
2690 	/* if host is ready then wait for fw ready else send POST */
2691 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2692 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2693 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2694 	}
2695 
2696 	/* wait for FW to become ready */
2697 	for (;;) {
2698 		if (--tmo == 0)
2699 			break;
2700 
2701 		DELAY(1000);
2702 
2703 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2704 		if (reg & MPU_EP_SEM_ERROR) {
2705 			printf(": POST failed: %#x\n", reg);
2706 			return (ENXIO);
2707 		}
2708 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2709 			/* reset FW */
2710 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2711 				memset(&cmd, 0, sizeof(cmd));
2712 				err = oce_cmd(sc, SUBSYS_COMMON,
2713 				    OPCODE_COMMON_FUNCTION_RESET,
2714 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2715 			}
2716 			return (err);
2717 		}
2718 	}
2719 
2720 	printf(": POST timed out: %#x\n", reg);
2721 
2722 	return (ENXIO);
2723 }
2724 
2725 static inline int
2726 oce_mbox_wait(struct oce_softc *sc)
2727 {
2728 	int i;
2729 
2730 	for (i = 0; i < 20000; i++) {
2731 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2732 			return (0);
2733 		DELAY(100);
2734 	}
2735 	return (ETIMEDOUT);
2736 }
2737 
2738 /**
2739  * @brief Mailbox dispatch
2740  * @param sc		software handle to the device
2741  */
2742 int
2743 oce_mbox_dispatch(struct oce_softc *sc)
2744 {
2745 	uint32_t pa, reg;
2746 	int err;
2747 
2748 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2749 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2750 
2751 	if ((err = oce_mbox_wait(sc)) != 0)
2752 		goto out;
2753 
2754 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2755 
2756 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2757 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2758 
2759 	if ((err = oce_mbox_wait(sc)) != 0)
2760 		goto out;
2761 
2762 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2763 
2764 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2765 
2766 	if ((err = oce_mbox_wait(sc)) != 0)
2767 		goto out;
2768 
2769 out:
2770 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2771 	return (err);
2772 }
2773 
2774 /**
2775  * @brief Function to initialize the hw with host endian information
2776  * @param sc		software handle to the device
2777  * @returns		0 on success, ETIMEDOUT on failure
2778  */
2779 int
2780 oce_mbox_init(struct oce_softc *sc)
2781 {
2782 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2783 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2784 
2785 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2786 		return (0);
2787 
2788 	/* Endian Signature */
2789 	*ptr++ = 0xff;
2790 	*ptr++ = 0x12;
2791 	*ptr++ = 0x34;
2792 	*ptr++ = 0xff;
2793 	*ptr++ = 0xff;
2794 	*ptr++ = 0x56;
2795 	*ptr++ = 0x78;
2796 	*ptr = 0xff;
2797 
2798 	return (oce_mbox_dispatch(sc));
2799 }
2800 
2801 int
2802 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2803     void *payload, int length)
2804 {
2805 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2806 	struct oce_mbx *mbx = &bmbx->mbx;
2807 	struct mbx_hdr *hdr;
2808 	caddr_t epayload = NULL;
2809 	int err;
2810 
2811 	if (length > OCE_MBX_PAYLOAD)
2812 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2813 	if (length > OCE_MAX_PAYLOAD)
2814 		return (EINVAL);
2815 
2816 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2817 
2818 	memset(mbx, 0, sizeof(struct oce_mbx));
2819 
2820 	mbx->payload_length = length;
2821 
2822 	if (epayload) {
2823 		mbx->flags = OCE_MBX_F_SGE;
2824 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2825 		memcpy(epayload, payload, length);
2826 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2827 		mbx->pld.sgl[0].length = length;
2828 		hdr = (struct mbx_hdr *)epayload;
2829 	} else {
2830 		mbx->flags = OCE_MBX_F_EMBED;
2831 		memcpy(mbx->pld.data, payload, length);
2832 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2833 	}
2834 
2835 	hdr->subsys = subsys;
2836 	hdr->opcode = opcode;
2837 	hdr->version = version;
2838 	hdr->length = length - sizeof(*hdr);
2839 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2840 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2841 	else
2842 		hdr->timeout = OCE_MBX_TIMEOUT;
2843 
2844 	if (epayload)
2845 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2846 
2847 	err = oce_mbox_dispatch(sc);
2848 	if (err == 0) {
2849 		if (epayload) {
2850 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2851 			memcpy(payload, epayload, length);
2852 		} else
2853 			memcpy(payload, &mbx->pld.data, length);
2854 	} else
2855 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2856 		    "%spayload length %d\n", sc->sc_dev.dv_xname, subsys,
2857 		    opcode, version, epayload ? "ext " : "",
2858 		    length);
2859 	return (err);
2860 }
2861 
2862 /**
2863  * @brief	Firmware will send gracious notifications during
2864  *		attach only after sending first mcc commnad. We
2865  *		use MCC queue only for getting async and mailbox
2866  *		for sending cmds. So to get gracious notifications
2867  *		atleast send one dummy command on mcc.
2868  */
2869 void
2870 oce_first_mcc(struct oce_softc *sc)
2871 {
2872 	struct oce_mbx *mbx;
2873 	struct oce_mq *mq = sc->sc_mq;
2874 	struct mbx_hdr *hdr;
2875 	struct mbx_get_common_fw_version *cmd;
2876 
2877 	mbx = oce_ring_get(mq->ring);
2878 	memset(mbx, 0, sizeof(struct oce_mbx));
2879 
2880 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2881 
2882 	hdr = &cmd->hdr;
2883 	hdr->subsys = SUBSYS_COMMON;
2884 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2885 	hdr->version = OCE_MBX_VER_V0;
2886 	hdr->timeout = OCE_MBX_TIMEOUT;
2887 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2888 
2889 	mbx->flags = OCE_MBX_F_EMBED;
2890 	mbx->payload_length = sizeof(*cmd);
2891 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2892 	    BUS_DMASYNC_PREWRITE);
2893 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2894 }
2895 
2896 int
2897 oce_get_fw_config(struct oce_softc *sc)
2898 {
2899 	struct mbx_common_query_fw_config cmd;
2900 	int err;
2901 
2902 	memset(&cmd, 0, sizeof(cmd));
2903 
2904 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2905 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2906 	if (err)
2907 		return (err);
2908 
2909 	sc->sc_port = cmd.params.rsp.port_id;
2910 	sc->sc_fmode = cmd.params.rsp.function_mode;
2911 
2912 	return (0);
2913 }
2914 
2915 int
2916 oce_check_native_mode(struct oce_softc *sc)
2917 {
2918 	struct mbx_common_set_function_cap cmd;
2919 	int err;
2920 
2921 	memset(&cmd, 0, sizeof(cmd));
2922 
2923 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2924 	    CAP_BE3_NATIVE_ERX_API;
2925 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2926 
2927 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2928 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2929 	if (err)
2930 		return (err);
2931 
2932 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2933 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2934 
2935 	return (0);
2936 }
2937 
2938 /**
2939  * @brief Function for creating a network interface.
2940  * @param sc		software handle to the device
2941  * @returns		0 on success, error otherwise
2942  */
2943 int
2944 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2945 {
2946 	struct mbx_create_common_iface cmd;
2947 	uint32_t caps, caps_en;
2948 	int err = 0;
2949 
2950 	/* interface capabilities to give device when creating interface */
2951 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2952 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2953 	    MBX_RX_IFACE_RSS;
2954 
2955 	/* capabilities to enable by default (others set dynamically) */
2956 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2957 
2958 	if (!IS_XE201(sc)) {
2959 		/* LANCER A0 workaround */
2960 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2961 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2962 	}
2963 
2964 	/* enable capabilities controlled via driver startup parameters */
2965 	if (sc->sc_rss_enable)
2966 		caps_en |= MBX_RX_IFACE_RSS;
2967 
2968 	memset(&cmd, 0, sizeof(cmd));
2969 
2970 	cmd.params.req.version = 0;
2971 	cmd.params.req.cap_flags = htole32(caps);
2972 	cmd.params.req.enable_flags = htole32(caps_en);
2973 	if (macaddr != NULL) {
2974 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2975 		cmd.params.req.mac_invalid = 0;
2976 	} else
2977 		cmd.params.req.mac_invalid = 1;
2978 
2979 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2980 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2981 	if (err)
2982 		return (err);
2983 
2984 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2985 
2986 	if (macaddr != NULL)
2987 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2988 
2989 	return (0);
2990 }
2991 
2992 /**
2993  * @brief Function to send the mbx command to configure vlan
2994  * @param sc 		software handle to the device
2995  * @param vtags		array of vlan tags
2996  * @param nvtags	number of elements in array
2997  * @param untagged	boolean TRUE/FLASE
2998  * @param promisc	flag to enable/disable VLAN promiscuous mode
2999  * @returns		0 on success, EIO on failure
3000  */
3001 int
3002 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3003     int untagged, int promisc)
3004 {
3005 	struct mbx_common_config_vlan cmd;
3006 
3007 	memset(&cmd, 0, sizeof(cmd));
3008 
3009 	cmd.params.req.if_id = sc->sc_if_id;
3010 	cmd.params.req.promisc = promisc;
3011 	cmd.params.req.untagged = untagged;
3012 	cmd.params.req.num_vlans = nvtags;
3013 
3014 	if (!promisc)
3015 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3016 			nvtags * sizeof(struct normal_vlan));
3017 
3018 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3019 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3020 }
3021 
3022 /**
3023  * @brief Function to set flow control capability in the hardware
3024  * @param sc 		software handle to the device
3025  * @param flags		flow control flags to set
3026  * @returns		0 on success, EIO on failure
3027  */
3028 int
3029 oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3030 {
3031 	struct mbx_common_get_set_flow_control cmd;
3032 	int err;
3033 
3034 	memset(&cmd, 0, sizeof(cmd));
3035 
3036 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3037 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3038 
3039 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3040 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3041 	if (err)
3042 		return (err);
3043 
3044 	memset(&cmd, 0, sizeof(cmd));
3045 
3046 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3047 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3048 	if (err)
3049 		return (err);
3050 
3051 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3052 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3053 
3054 	return (0);
3055 }
3056 
3057 #ifdef OCE_RSS
3058 /**
3059  * @brief Function to set flow control capability in the hardware
3060  * @param sc 		software handle to the device
3061  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3062  * @returns		0 on success, EIO on failure
3063  */
3064 int
3065 oce_config_rss(struct oce_softc *sc, int enable)
3066 {
3067 	struct mbx_config_nic_rss cmd;
3068 	uint8_t *tbl = &cmd.params.req.cputable;
3069 	int i, j;
3070 
3071 	memset(&cmd, 0, sizeof(cmd));
3072 
3073 	if (enable)
3074 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3075 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3076 	cmd.params.req.flush = OCE_FLUSH;
3077 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3078 
3079 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3080 
3081 	/*
3082 	 * Initialize the RSS CPU indirection table.
3083 	 *
3084 	 * The table is used to choose the queue to place incoming packets.
3085 	 * Incoming packets are hashed.  The lowest bits in the hash result
3086 	 * are used as the index into the CPU indirection table.
3087 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3088 	 * create.  Based on the CPU ID, the receive completion is routed to
3089 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3090 	 * on the default (0) CQ).
3091 	 */
3092 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3093 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3094 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3095 	}
3096 	if (i > 0)
3097 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3098 	else
3099 		return (ENXIO);
3100 
3101 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3102 	    &cmd, sizeof(cmd)));
3103 }
3104 #endif	/* OCE_RSS */
3105 
3106 /**
3107  * @brief Function for hardware update multicast filter
3108  * @param sc		software handle to the device
3109  * @param multi		table of multicast addresses
3110  * @param naddr		number of multicast addresses in the table
3111  */
3112 int
3113 oce_update_mcast(struct oce_softc *sc,
3114     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3115 {
3116 	struct mbx_set_common_iface_multicast cmd;
3117 
3118 	memset(&cmd, 0, sizeof(cmd));
3119 
3120 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3121 	cmd.params.req.num_mac = htole16(naddr);
3122 	cmd.params.req.if_id = sc->sc_if_id;
3123 
3124 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3125 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3126 }
3127 
3128 /**
3129  * @brief RXF function to enable/disable device promiscuous mode
3130  * @param sc		software handle to the device
3131  * @param enable	enable/disable flag
3132  * @returns		0 on success, EIO on failure
3133  * @note
3134  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3135  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3136  */
3137 int
3138 oce_set_promisc(struct oce_softc *sc, int enable)
3139 {
3140 	struct mbx_set_common_iface_rx_filter cmd;
3141 	struct iface_rx_filter_ctx *req;
3142 
3143 	memset(&cmd, 0, sizeof(cmd));
3144 
3145 	req = &cmd.params.req;
3146 	req->if_id = sc->sc_if_id;
3147 
3148 	if (enable)
3149 		req->iface_flags = req->iface_flags_mask =
3150 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3151 
3152 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3153 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3154 }
3155 
3156 /**
3157  * @brief Function to query the link status from the hardware
3158  * @param sc 		software handle to the device
3159  * @param[out] link	pointer to the structure returning link attributes
3160  * @returns		0 on success, EIO on failure
3161  */
3162 int
3163 oce_get_link_status(struct oce_softc *sc)
3164 {
3165 	struct mbx_query_common_link_config cmd;
3166 	int err;
3167 
3168 	memset(&cmd, 0, sizeof(cmd));
3169 
3170 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3171 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3172 	if (err)
3173 		return (err);
3174 
3175 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3176 	    NTWK_LOGICAL_LINK_UP);
3177 
3178 	if (cmd.params.rsp.mac_speed < 5)
3179 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3180 	else
3181 		sc->sc_link_speed = 0;
3182 
3183 	return (0);
3184 }
3185 
3186 void
3187 oce_macaddr_set(struct oce_softc *sc)
3188 {
3189 	uint32_t old_pmac_id = sc->sc_pmac_id;
3190 	int status = 0;
3191 
3192 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3193 		return;
3194 
3195 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3196 	if (!status)
3197 		status = oce_macaddr_del(sc, old_pmac_id);
3198 	else
3199 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3200 }
3201 
3202 int
3203 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3204 {
3205 	struct mbx_query_common_iface_mac cmd;
3206 	int err;
3207 
3208 	memset(&cmd, 0, sizeof(cmd));
3209 
3210 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3211 	cmd.params.req.permanent = 1;
3212 
3213 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3214 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3215 	if (err == 0)
3216 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3217 		    ETHER_ADDR_LEN);
3218 	return (err);
3219 }
3220 
3221 int
3222 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3223 {
3224 	struct mbx_add_common_iface_mac cmd;
3225 	int err;
3226 
3227 	memset(&cmd, 0, sizeof(cmd));
3228 
3229 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3230 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3231 
3232 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3233 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3234 	if (err == 0)
3235 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3236 	return (err);
3237 }
3238 
3239 int
3240 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3241 {
3242 	struct mbx_del_common_iface_mac cmd;
3243 
3244 	memset(&cmd, 0, sizeof(cmd));
3245 
3246 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3247 	cmd.params.req.pmac_id = htole32(pmac);
3248 
3249 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3250 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3251 }
3252 
3253 int
3254 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3255 {
3256 	struct mbx_create_nic_rq cmd;
3257 	int err, npages;
3258 
3259 	memset(&cmd, 0, sizeof(cmd));
3260 
3261 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3262 	    nitems(cmd.params.req.pages));
3263 	if (!npages) {
3264 		printf("%s: failed to load the rq ring\n", __func__);
3265 		return (1);
3266 	}
3267 
3268 	if (IS_XE201(sc)) {
3269 		cmd.params.req.frag_size = rq->fragsize / 2048;
3270 		cmd.params.req.page_size = 1;
3271 	} else
3272 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3273 	cmd.params.req.num_pages = npages;
3274 	cmd.params.req.cq_id = rq->cq->id;
3275 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3276 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3277 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3278 
3279 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3280 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3281 	    sizeof(cmd));
3282 	if (err)
3283 		return (err);
3284 
3285 	rq->id = letoh16(cmd.params.rsp.rq_id);
3286 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3287 
3288 	return (0);
3289 }
3290 
3291 int
3292 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3293 {
3294 	struct mbx_create_nic_wq cmd;
3295 	int err, npages;
3296 
3297 	memset(&cmd, 0, sizeof(cmd));
3298 
3299 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3300 	    nitems(cmd.params.req.pages));
3301 	if (!npages) {
3302 		printf("%s: failed to load the wq ring\n", __func__);
3303 		return (1);
3304 	}
3305 
3306 	if (IS_XE201(sc))
3307 		cmd.params.req.if_id = sc->sc_if_id;
3308 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3309 	cmd.params.req.num_pages = npages;
3310 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3311 	cmd.params.req.cq_id = htole16(wq->cq->id);
3312 	cmd.params.req.ulp_num = 1;
3313 
3314 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3315 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3316 	    sizeof(cmd));
3317 	if (err)
3318 		return (err);
3319 
3320 	wq->id = letoh16(cmd.params.rsp.wq_id);
3321 
3322 	return (0);
3323 }
3324 
3325 int
3326 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3327 {
3328 	struct mbx_create_common_mq_ex cmd;
3329 	union oce_mq_ext_ctx *ctx;
3330 	int err, npages;
3331 
3332 	memset(&cmd, 0, sizeof(cmd));
3333 
3334 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3335 	    nitems(cmd.params.req.pages));
3336 	if (!npages) {
3337 		printf("%s: failed to load the mq ring\n", __func__);
3338 		return (-1);
3339 	}
3340 
3341 	ctx = &cmd.params.req.context;
3342 	ctx->v0.num_pages = npages;
3343 	ctx->v0.cq_id = mq->cq->id;
3344 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3345 	ctx->v0.valid = 1;
3346 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3347 	ctx->v0.async_evt_bitmap = 0xffffffff;
3348 
3349 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3350 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3351 	if (err)
3352 		return (err);
3353 
3354 	mq->id = letoh16(cmd.params.rsp.mq_id);
3355 
3356 	return (0);
3357 }
3358 
3359 int
3360 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3361 {
3362 	struct mbx_create_common_eq cmd;
3363 	int err, npages;
3364 
3365 	memset(&cmd, 0, sizeof(cmd));
3366 
3367 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3368 	    nitems(cmd.params.req.pages));
3369 	if (!npages) {
3370 		printf("%s: failed to load the eq ring\n", __func__);
3371 		return (-1);
3372 	}
3373 
3374 	cmd.params.req.ctx.num_pages = htole16(npages);
3375 	cmd.params.req.ctx.valid = 1;
3376 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3377 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3378 	cmd.params.req.ctx.armed = 0;
3379 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3380 
3381 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3382 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3383 	if (err)
3384 		return (err);
3385 
3386 	eq->id = letoh16(cmd.params.rsp.eq_id);
3387 
3388 	return (0);
3389 }
3390 
3391 int
3392 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3393 {
3394 	struct mbx_create_common_cq cmd;
3395 	union oce_cq_ctx *ctx;
3396 	int err, npages;
3397 
3398 	memset(&cmd, 0, sizeof(cmd));
3399 
3400 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3401 	    nitems(cmd.params.req.pages));
3402 	if (!npages) {
3403 		printf("%s: failed to load the cq ring\n", __func__);
3404 		return (-1);
3405 	}
3406 
3407 	ctx = &cmd.params.req.cq_ctx;
3408 
3409 	if (IS_XE201(sc)) {
3410 		ctx->v2.num_pages = htole16(npages);
3411 		ctx->v2.page_size = 1; /* for 4K */
3412 		ctx->v2.eventable = cq->eventable;
3413 		ctx->v2.valid = 1;
3414 		ctx->v2.count = ilog2(cq->nitems / 256);
3415 		ctx->v2.nodelay = cq->nodelay;
3416 		ctx->v2.coalesce_wm = cq->ncoalesce;
3417 		ctx->v2.armed = 0;
3418 		ctx->v2.eq_id = cq->eq->id;
3419 		if (ctx->v2.count == 3) {
3420 			if (cq->nitems > (4*1024)-1)
3421 				ctx->v2.cqe_count = (4*1024)-1;
3422 			else
3423 				ctx->v2.cqe_count = cq->nitems;
3424 		}
3425 	} else {
3426 		ctx->v0.num_pages = htole16(npages);
3427 		ctx->v0.eventable = cq->eventable;
3428 		ctx->v0.valid = 1;
3429 		ctx->v0.count = ilog2(cq->nitems / 256);
3430 		ctx->v0.nodelay = cq->nodelay;
3431 		ctx->v0.coalesce_wm = cq->ncoalesce;
3432 		ctx->v0.armed = 0;
3433 		ctx->v0.eq_id = cq->eq->id;
3434 	}
3435 
3436 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3437 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3438 	    sizeof(cmd));
3439 	if (err)
3440 		return (err);
3441 
3442 	cq->id = letoh16(cmd.params.rsp.cq_id);
3443 
3444 	return (0);
3445 }
3446 
3447 int
3448 oce_init_stats(struct oce_softc *sc)
3449 {
3450 	union cmd {
3451 		struct mbx_get_nic_stats_v0	_be2;
3452 		struct mbx_get_nic_stats	_be3;
3453 		struct mbx_get_pport_stats	_xe201;
3454 	};
3455 
3456 	sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
3457 	if (sc->sc_statcmd == NULL) {
3458 		printf("%s: failed to allocate statistics command block\n",
3459 		    sc->sc_dev.dv_xname);
3460 		return (-1);
3461 	}
3462 	return (0);
3463 }
3464 
3465 int
3466 oce_update_stats(struct oce_softc *sc)
3467 {
3468 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3469 	uint64_t rxe, txe;
3470 	int err;
3471 
3472 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3473 		err = oce_stats_be2(sc, &rxe, &txe);
3474 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3475 		err = oce_stats_be3(sc, &rxe, &txe);
3476 	else
3477 		err = oce_stats_xe(sc, &rxe, &txe);
3478 	if (err)
3479 		return (err);
3480 
3481 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3482 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3483 	sc->sc_rx_errors = rxe;
3484 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3485 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3486 	sc->sc_tx_errors = txe;
3487 
3488 	return (0);
3489 }
3490 
3491 int
3492 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3493 {
3494 	struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3495 	struct oce_pmem_stats *ms;
3496 	struct oce_rxf_stats_v0 *rs;
3497 	struct oce_port_rxf_stats_v0 *ps;
3498 	int err;
3499 
3500 	memset(cmd, 0, sizeof(*cmd));
3501 
3502 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3503 	    cmd, sizeof(*cmd));
3504 	if (err)
3505 		return (err);
3506 
3507 	ms = &cmd->params.rsp.stats.pmem;
3508 	rs = &cmd->params.rsp.stats.rxf;
3509 	ps = &rs->port[sc->sc_port];
3510 
3511 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3512 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3513 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3514 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3515 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3516 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3517 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3518 	    ps->rx_alignment_symbol_errors;
3519 	if (sc->sc_if_id)
3520 		*rxe += rs->port1_jabber_events;
3521 	else
3522 		*rxe += rs->port0_jabber_events;
3523 	*rxe += ms->eth_red_drops;
3524 
3525 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3526 
3527 	return (0);
3528 }
3529 
3530 int
3531 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3532 {
3533 	struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3534 	struct oce_pmem_stats *ms;
3535 	struct oce_rxf_stats_v1 *rs;
3536 	struct oce_port_rxf_stats_v1 *ps;
3537 	int err;
3538 
3539 	memset(cmd, 0, sizeof(*cmd));
3540 
3541 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3542 	    cmd, sizeof(*cmd));
3543 	if (err)
3544 		return (err);
3545 
3546 	ms = &cmd->params.rsp.stats.pmem;
3547 	rs = &cmd->params.rsp.stats.rxf;
3548 	ps = &rs->port[sc->sc_port];
3549 
3550 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3551 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3552 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3553 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3554 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3555 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3556 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3557 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3558 	*rxe += ms->eth_red_drops;
3559 
3560 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3561 
3562 	return (0);
3563 }
3564 
3565 int
3566 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3567 {
3568 	struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3569 	struct oce_pport_stats *pps;
3570 	int err;
3571 
3572 	memset(cmd, 0, sizeof(*cmd));
3573 
3574 	cmd->params.req.reset_stats = 0;
3575 	cmd->params.req.port_number = sc->sc_if_id;
3576 
3577 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3578 	    OCE_MBX_VER_V0, cmd, sizeof(*cmd));
3579 	if (err)
3580 		return (err);
3581 
3582 	pps = &cmd->params.rsp.pps;
3583 
3584 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3585 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3586 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3587 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3588 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3589 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3590 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3591 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3592 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3593 
3594 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3595 
3596 	return (0);
3597 }
3598