xref: /openbsd-src/sys/dev/pci/if_oce.c (revision 6f05df2d9be0954bec42d51d943d77bd250fb664)
1 /*	$OpenBSD: if_oce.c,v 1.79 2014/08/30 09:48:23 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*-
20  * Copyright (C) 2012 Emulex
21  * All rights reserved.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions are met:
25  *
26  * 1. Redistributions of source code must retain the above copyright notice,
27  *    this list of conditions and the following disclaimer.
28  *
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * 3. Neither the name of the Emulex Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived from
35  *    this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47  * POSSIBILITY OF SUCH DAMAGE.
48  *
49  * Contact Information:
50  * freebsd-drivers@emulex.com
51  *
52  * Emulex
53  * 3333 Susan Street
54  * Costa Mesa, CA 92626
55  */
56 
57 #include "bpfilter.h"
58 #include "vlan.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/device.h>
67 #include <sys/socket.h>
68 #include <sys/queue.h>
69 #include <sys/timeout.h>
70 #include <sys/pool.h>
71 
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #endif
80 
81 #ifdef INET6
82 #include <netinet/ip6.h>
83 #endif
84 
85 #if NBPFILTER > 0
86 #include <net/bpf.h>
87 #endif
88 
89 #if NVLAN > 0
90 #include <net/if_types.h>
91 #include <net/if_vlan_var.h>
92 #endif
93 
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97 
98 #include <dev/pci/if_ocereg.h>
99 
100 #ifndef TRUE
101 #define TRUE			1
102 #endif
103 #ifndef FALSE
104 #define FALSE			0
105 #endif
106 
107 #define OCE_MBX_TIMEOUT		5
108 
109 #define OCE_MAX_PAYLOAD		65536
110 
111 #define OCE_TX_RING_SIZE	512
112 #define OCE_RX_RING_SIZE	1024
113 
114 /* This should be powers of 2. Like 2,4,8 & 16 */
115 #define OCE_MAX_RSS		4 /* TODO: 8 */
116 #define OCE_MAX_RQ		OCE_MAX_RSS + 1 /* one default queue */
117 #define OCE_MAX_WQ		8
118 
119 #define OCE_MAX_EQ		32
120 #define OCE_MAX_CQ		OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
121 #define OCE_MAX_CQ_EQ		8 /* Max CQ that can attached to an EQ */
122 
123 #define OCE_DEFAULT_EQD		80
124 
125 #define OCE_MIN_MTU		256
126 #define OCE_MAX_MTU		9000
127 
128 #define OCE_MAX_RQ_COMPL	64
129 #define OCE_MAX_RQ_POSTS	255
130 #define OCE_RX_BUF_SIZE		2048
131 
132 #define OCE_MAX_TX_ELEMENTS	29
133 #define OCE_MAX_TX_DESC		1024
134 #define OCE_MAX_TX_SIZE		65535
135 
136 #define OCE_MEM_KVA(_m)		((void *)((_m)->vaddr))
137 #define OCE_MEM_DVA(_m)		((_m)->paddr)
138 
139 #define OCE_WQ_FOREACH(sc, wq, i) 	\
140 	for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
141 #define OCE_RQ_FOREACH(sc, rq, i) 	\
142 	for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
143 #define OCE_EQ_FOREACH(sc, eq, i) 	\
144 	for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
145 #define OCE_CQ_FOREACH(sc, cq, i) 	\
146 	for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
147 #define OCE_RING_FOREACH(_r, _v, _c)	\
148 	for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
149 
150 static inline int
151 ilog2(unsigned int v)
152 {
153 	int r = 0;
154 
155 	while (v >>= 1)
156 		r++;
157 	return (r);
158 }
159 
160 struct oce_pkt {
161 	struct mbuf *		mbuf;
162 	bus_dmamap_t		map;
163 	int			nsegs;
164 	SIMPLEQ_ENTRY(oce_pkt)	entry;
165 };
166 SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
167 
168 struct oce_dma_mem {
169 	bus_dma_tag_t		tag;
170 	bus_dmamap_t		map;
171 	bus_dma_segment_t	segs;
172 	int			nsegs;
173 	bus_size_t		size;
174 	caddr_t			vaddr;
175 	bus_addr_t		paddr;
176 };
177 
178 struct oce_ring {
179 	int			index;
180 	int			nitems;
181 	int			nused;
182 	int			isize;
183 	struct oce_dma_mem	dma;
184 };
185 
186 struct oce_softc;
187 
188 enum cq_len {
189 	CQ_LEN_256  = 256,
190 	CQ_LEN_512  = 512,
191 	CQ_LEN_1024 = 1024
192 };
193 
194 enum eq_len {
195 	EQ_LEN_256  = 256,
196 	EQ_LEN_512  = 512,
197 	EQ_LEN_1024 = 1024,
198 	EQ_LEN_2048 = 2048,
199 	EQ_LEN_4096 = 4096
200 };
201 
202 enum eqe_size {
203 	EQE_SIZE_4  = 4,
204 	EQE_SIZE_16 = 16
205 };
206 
207 enum qtype {
208 	QTYPE_EQ,
209 	QTYPE_MQ,
210 	QTYPE_WQ,
211 	QTYPE_RQ,
212 	QTYPE_CQ,
213 	QTYPE_RSS
214 };
215 
216 struct oce_eq {
217 	struct oce_softc *	sc;
218 	struct oce_ring *	ring;
219 	enum qtype		type;
220 	int			id;
221 
222 	struct oce_cq *		cq[OCE_MAX_CQ_EQ];
223 	int			cq_valid;
224 
225 	int			nitems;
226 	int			isize;
227 	int			delay;
228 };
229 
230 struct oce_cq {
231 	struct oce_softc *	sc;
232 	struct oce_ring *	ring;
233 	enum qtype		type;
234 	int			id;
235 
236 	struct oce_eq *		eq;
237 
238 	void			(*cq_intr)(void *);
239 	void *			cb_arg;
240 
241 	int			nitems;
242 	int			nodelay;
243 	int			eventable;
244 	int			ncoalesce;
245 };
246 
247 struct oce_mq {
248 	struct oce_softc *	sc;
249 	struct oce_ring *	ring;
250 	enum qtype		type;
251 	int			id;
252 
253 	struct oce_cq *		cq;
254 
255 	int			nitems;
256 };
257 
258 struct oce_wq {
259 	struct oce_softc *	sc;
260 	struct oce_ring *	ring;
261 	enum qtype		type;
262 	int			id;
263 
264 	struct oce_cq *		cq;
265 
266 	struct oce_pkt_list	pkt_list;
267 	struct oce_pkt_list	pkt_free;
268 
269 	int			nitems;
270 };
271 
272 struct oce_rq {
273 	struct oce_softc *	sc;
274 	struct oce_ring *	ring;
275 	enum qtype		type;
276 	int			id;
277 
278 	struct oce_cq *		cq;
279 
280 	struct if_rxring	rxring;
281 	struct oce_pkt_list	pkt_list;
282 	struct oce_pkt_list	pkt_free;
283 
284 	uint32_t		rss_cpuid;
285 
286 #ifdef OCE_LRO
287 	struct lro_ctrl		lro;
288 	int			lro_pkts_queued;
289 #endif
290 
291 	int			nitems;
292 	int			fragsize;
293 	int			mtu;
294 	int			rss;
295 };
296 
297 struct oce_softc {
298 	struct device		sc_dev;
299 
300 	uint			sc_flags;
301 #define  OCE_F_BE2		 0x00000001
302 #define  OCE_F_BE3		 0x00000002
303 #define  OCE_F_XE201		 0x00000008
304 #define  OCE_F_BE3_NATIVE	 0x00000100
305 #define  OCE_F_RESET_RQD	 0x00001000
306 #define  OCE_F_MBOX_ENDIAN_RQD	 0x00002000
307 
308 	bus_dma_tag_t		sc_dmat;
309 
310 	bus_space_tag_t		sc_cfg_iot;
311 	bus_space_handle_t	sc_cfg_ioh;
312 	bus_size_t		sc_cfg_size;
313 
314 	bus_space_tag_t		sc_csr_iot;
315 	bus_space_handle_t	sc_csr_ioh;
316 	bus_size_t		sc_csr_size;
317 
318 	bus_space_tag_t		sc_db_iot;
319 	bus_space_handle_t	sc_db_ioh;
320 	bus_size_t		sc_db_size;
321 
322 	void *			sc_ih;
323 
324 	struct arpcom		sc_ac;
325 	struct ifmedia		sc_media;
326 	ushort			sc_link_up;
327 	ushort			sc_link_speed;
328 	uint			sc_fc;
329 
330 	struct oce_dma_mem	sc_mbx;
331 	struct oce_dma_mem	sc_pld;
332 
333 	uint			sc_port;
334 	uint			sc_fmode;
335 
336 	struct oce_wq *		sc_wq[OCE_MAX_WQ];	/* TX work queues */
337 	struct oce_rq *		sc_rq[OCE_MAX_RQ];	/* RX work queues */
338 	struct oce_cq *		sc_cq[OCE_MAX_CQ];	/* Completion queues */
339 	struct oce_eq *		sc_eq[OCE_MAX_EQ];	/* Event queues */
340 	struct oce_mq *		sc_mq;			/* Mailbox queue */
341 
342 	ushort			sc_neq;
343 	ushort			sc_ncq;
344 	ushort			sc_nrq;
345 	ushort			sc_nwq;
346 	ushort			sc_nintr;
347 
348 	ushort			sc_tx_ring_size;
349 	ushort			sc_rx_ring_size;
350 	ushort			sc_rss_enable;
351 
352 	uint32_t		sc_if_id;	/* interface ID */
353 	uint32_t		sc_pmac_id;	/* PMAC id */
354 	char			sc_macaddr[ETHER_ADDR_LEN];
355 
356 	uint32_t		sc_pvid;
357 
358 	uint64_t		sc_rx_errors;
359 	uint64_t		sc_tx_errors;
360 
361 	struct timeout		sc_tick;
362 	struct timeout		sc_rxrefill;
363 };
364 
365 #define IS_BE(sc)		ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
366 #define IS_XE201(sc)		ISSET((sc)->sc_flags, OCE_F_XE201)
367 
368 #define ADDR_HI(x)		((uint32_t)((uint64_t)(x) >> 32))
369 #define ADDR_LO(x)		((uint32_t)((uint64_t)(x) & 0xffffffff))
370 
371 #define IF_LRO_ENABLED(ifp)	ISSET((ifp)->if_capabilities, IFCAP_LRO)
372 
373 int 	oce_match(struct device *, void *, void *);
374 void	oce_attach(struct device *, struct device *, void *);
375 int 	oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
376 void	oce_attachhook(void *);
377 void	oce_attach_ifp(struct oce_softc *);
378 int 	oce_ioctl(struct ifnet *, u_long, caddr_t);
379 int	oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
380 void	oce_iff(struct oce_softc *);
381 void	oce_link_status(struct oce_softc *);
382 void	oce_media_status(struct ifnet *, struct ifmediareq *);
383 int 	oce_media_change(struct ifnet *);
384 void	oce_tick(void *);
385 void	oce_init(void *);
386 void	oce_stop(struct oce_softc *);
387 void	oce_watchdog(struct ifnet *);
388 void	oce_start(struct ifnet *);
389 int	oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
390 #ifdef OCE_TSO
391 struct mbuf *
392 	oce_tso(struct oce_softc *, struct mbuf **);
393 #endif
394 int 	oce_intr(void *);
395 void	oce_intr_wq(void *);
396 void	oce_txeof(struct oce_wq *);
397 void	oce_intr_rq(void *);
398 void	oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
399 void	oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
400 int 	oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
401 int 	oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
402 #ifdef OCE_LRO
403 void	oce_flush_lro(struct oce_rq *);
404 int 	oce_init_lro(struct oce_softc *);
405 void	oce_free_lro(struct oce_softc *);
406 #endif
407 int	oce_get_buf(struct oce_rq *);
408 int	oce_alloc_rx_bufs(struct oce_rq *);
409 void	oce_refill_rx(void *);
410 void	oce_free_posted_rxbuf(struct oce_rq *);
411 void	oce_intr_mq(void *);
412 void	oce_link_event(struct oce_softc *,
413 	    struct oce_async_cqe_link_state *);
414 
415 int 	oce_init_queues(struct oce_softc *);
416 void	oce_release_queues(struct oce_softc *);
417 struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
418 void	oce_drain_wq(struct oce_wq *);
419 void	oce_destroy_wq(struct oce_wq *);
420 struct oce_rq *
421 	oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
422 void	oce_drain_rq(struct oce_rq *);
423 void	oce_destroy_rq(struct oce_rq *);
424 struct oce_eq *
425 	oce_create_eq(struct oce_softc *);
426 static inline void
427 	oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
428 void	oce_drain_eq(struct oce_eq *);
429 void	oce_destroy_eq(struct oce_eq *);
430 struct oce_mq *
431 	oce_create_mq(struct oce_softc *, struct oce_eq *);
432 void	oce_drain_mq(struct oce_mq *);
433 void	oce_destroy_mq(struct oce_mq *);
434 struct oce_cq *
435 	oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
436 	    int isize, int eventable, int nodelay, int ncoalesce);
437 static inline void
438 	oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
439 void	oce_destroy_cq(struct oce_cq *);
440 
441 int	oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
442 void	oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
443 #define	oce_dma_sync(d, f) \
444 	    bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
445 
446 struct oce_ring *
447 	oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
448 void	oce_destroy_ring(struct oce_softc *, struct oce_ring *);
449 int	oce_load_ring(struct oce_softc *, struct oce_ring *,
450 	    struct oce_pa *, int max_segs);
451 static inline void *
452 	oce_ring_get(struct oce_ring *);
453 static inline void *
454 	oce_ring_first(struct oce_ring *);
455 static inline void *
456 	oce_ring_next(struct oce_ring *);
457 struct oce_pkt *
458 	oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
459 	    int maxsegsz);
460 void	oce_pkt_free(struct oce_softc *, struct oce_pkt *);
461 static inline struct oce_pkt *
462 	oce_pkt_get(struct oce_pkt_list *);
463 static inline void
464 	oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
465 
466 int	oce_init_fw(struct oce_softc *);
467 int	oce_mbox_init(struct oce_softc *);
468 int	oce_mbox_dispatch(struct oce_softc *);
469 int	oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
470 	    void *payload, int length);
471 void	oce_first_mcc(struct oce_softc *);
472 
473 int	oce_get_fw_config(struct oce_softc *);
474 int	oce_check_native_mode(struct oce_softc *);
475 int	oce_create_iface(struct oce_softc *, uint8_t *macaddr);
476 int	oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
477 	    int nvtags, int untagged, int promisc);
478 int	oce_set_flow_control(struct oce_softc *, uint flags);
479 int	oce_config_rss(struct oce_softc *, int enable);
480 int	oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
481 	    int naddr);
482 int	oce_set_promisc(struct oce_softc *, int enable);
483 int	oce_get_link_status(struct oce_softc *);
484 
485 void	oce_macaddr_set(struct oce_softc *);
486 int	oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
487 int	oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
488 int	oce_macaddr_del(struct oce_softc *, uint32_t pmac);
489 
490 int	oce_new_rq(struct oce_softc *, struct oce_rq *);
491 int	oce_new_wq(struct oce_softc *, struct oce_wq *);
492 int	oce_new_mq(struct oce_softc *, struct oce_mq *);
493 int	oce_new_eq(struct oce_softc *, struct oce_eq *);
494 int	oce_new_cq(struct oce_softc *, struct oce_cq *);
495 
496 static inline int
497 	oce_update_stats(struct oce_softc *);
498 int	oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
499 int	oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
500 int	oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
501 
502 struct pool *oce_pkt_pool;
503 
504 struct cfdriver oce_cd = {
505 	NULL, "oce", DV_IFNET
506 };
507 
508 struct cfattach oce_ca = {
509 	sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
510 };
511 
512 const struct pci_matchid oce_devices[] = {
513 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
514 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
515 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
516 	{ PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
517 	{ PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
518 };
519 
520 int
521 oce_match(struct device *parent, void *match, void *aux)
522 {
523 	return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
524 }
525 
526 void
527 oce_attach(struct device *parent, struct device *self, void *aux)
528 {
529 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
530 	struct oce_softc *sc = (struct oce_softc *)self;
531 	const char *intrstr = NULL;
532 	pci_intr_handle_t ih;
533 
534 	switch (PCI_PRODUCT(pa->pa_id)) {
535 	case PCI_PRODUCT_SERVERENGINES_BE2:
536 	case PCI_PRODUCT_SERVERENGINES_OCBE2:
537 		SET(sc->sc_flags, OCE_F_BE2);
538 		break;
539 	case PCI_PRODUCT_SERVERENGINES_BE3:
540 	case PCI_PRODUCT_SERVERENGINES_OCBE3:
541 		SET(sc->sc_flags, OCE_F_BE3);
542 		break;
543 	case PCI_PRODUCT_EMULEX_XE201:
544 		SET(sc->sc_flags, OCE_F_XE201);
545 		break;
546 	}
547 
548 	sc->sc_dmat = pa->pa_dmat;
549 	if (oce_pci_alloc(sc, pa))
550 		return;
551 
552 	sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
553 	sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
554 
555 	/* create the bootstrap mailbox */
556 	if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
557 		printf(": failed to allocate mailbox memory\n");
558 		return;
559 	}
560 	if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
561 		printf(": failed to allocate payload memory\n");
562 		goto fail_1;
563 	}
564 
565 	if (oce_init_fw(sc))
566 		goto fail_2;
567 
568 	if (oce_mbox_init(sc)) {
569 		printf(": failed to initialize mailbox\n");
570 		goto fail_2;
571 	}
572 
573 	if (oce_get_fw_config(sc)) {
574 		printf(": failed to get firmware configuration\n");
575 		goto fail_2;
576 	}
577 
578 	if (ISSET(sc->sc_flags, OCE_F_BE3)) {
579 		if (oce_check_native_mode(sc))
580 			goto fail_2;
581 	}
582 
583 	if (oce_macaddr_get(sc, sc->sc_macaddr)) {
584 		printf(": failed to fetch MAC address\n");
585 		goto fail_2;
586 	}
587 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
588 
589 	if (oce_pkt_pool == NULL) {
590 		oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
591 		if (oce_pkt_pool == NULL) {
592 			printf(": unable to allocate descriptor pool\n");
593 			goto fail_2;
594 		}
595 		pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, 0, 0,
596 		    "ocepkts", NULL);
597 	}
598 
599 	/* We allocate a single interrupt resource */
600 	sc->sc_nintr = 1;
601 	if (pci_intr_map_msi(pa, &ih) != 0 &&
602 	    pci_intr_map(pa, &ih) != 0) {
603 		printf(": couldn't map interrupt\n");
604 		goto fail_2;
605 	}
606 
607 	intrstr = pci_intr_string(pa->pa_pc, ih);
608 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
609 	    sc->sc_dev.dv_xname);
610 	if (sc->sc_ih == NULL) {
611 		printf(": couldn't establish interrupt\n");
612 		if (intrstr != NULL)
613 			printf(" at %s", intrstr);
614 		printf("\n");
615 		goto fail_2;
616 	}
617 	printf(": %s", intrstr);
618 
619 	if (oce_init_queues(sc))
620 		goto fail_3;
621 
622 	oce_attach_ifp(sc);
623 
624 #ifdef OCE_LRO
625 	if (oce_init_lro(sc))
626 		goto fail_4;
627 #endif
628 
629 	timeout_set(&sc->sc_tick, oce_tick, sc);
630 	timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
631 
632 	mountroothook_establish(oce_attachhook, sc);
633 
634 	printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
635 
636 	return;
637 
638 #ifdef OCE_LRO
639 fail_4:
640 	oce_free_lro(sc);
641 	ether_ifdetach(&sc->sc_ac.ac_if);
642 	if_detach(&sc->sc_ac.ac_if);
643 	oce_release_queues(sc);
644 #endif
645 fail_3:
646 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
647 fail_2:
648 	oce_dma_free(sc, &sc->sc_pld);
649 fail_1:
650 	oce_dma_free(sc, &sc->sc_mbx);
651 }
652 
653 int
654 oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
655 {
656 	pcireg_t memtype, reg;
657 
658 	/* setup the device config region */
659 	if (ISSET(sc->sc_flags, OCE_F_BE2))
660 		reg = OCE_BAR_CFG_BE2;
661 	else
662 		reg = OCE_BAR_CFG;
663 
664 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
665 	if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
666 	    &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
667 	    IS_BE(sc) ? 0 : 32768)) {
668 		printf(": can't find cfg mem space\n");
669 		return (ENXIO);
670 	}
671 
672 	/*
673 	 * Read the SLI_INTF register and determine whether we
674 	 * can use this port and its features
675 	 */
676 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
677 	if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
678 		printf(": invalid signature\n");
679 		goto fail_1;
680 	}
681 	if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
682 		printf(": unsupported SLI revision\n");
683 		goto fail_1;
684 	}
685 	if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
686 		SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
687 	if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
688 		SET(sc->sc_flags, OCE_F_RESET_RQD);
689 
690 	/* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
691 	if (IS_BE(sc)) {
692 		/* set up CSR region */
693 		reg = OCE_BAR_CSR;
694 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
695 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
696 		    &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
697 			printf(": can't find csr mem space\n");
698 			goto fail_1;
699 		}
700 
701 		/* set up DB doorbell region */
702 		reg = OCE_BAR_DB;
703 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
704 		if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
705 		    &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
706 			printf(": can't find csr mem space\n");
707 			goto fail_2;
708 		}
709 	} else {
710 		sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
711 		sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
712 	}
713 
714 	return (0);
715 
716 fail_2:
717 	bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
718 fail_1:
719 	bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
720 	return (ENXIO);
721 }
722 
723 static inline uint32_t
724 oce_read_cfg(struct oce_softc *sc, bus_size_t off)
725 {
726 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
727 	    BUS_SPACE_BARRIER_READ);
728 	return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
729 }
730 
731 static inline uint32_t
732 oce_read_csr(struct oce_softc *sc, bus_size_t off)
733 {
734 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
735 	    BUS_SPACE_BARRIER_READ);
736 	return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
737 }
738 
739 static inline uint32_t
740 oce_read_db(struct oce_softc *sc, bus_size_t off)
741 {
742 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
743 	    BUS_SPACE_BARRIER_READ);
744 	return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
745 }
746 
747 static inline void
748 oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
749 {
750 	bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
751 	bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
752 	    BUS_SPACE_BARRIER_WRITE);
753 }
754 
755 static inline void
756 oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
757 {
758 	bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
759 	bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
760 	    BUS_SPACE_BARRIER_WRITE);
761 }
762 
763 static inline void
764 oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
765 {
766 	bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
767 	bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
768 	    BUS_SPACE_BARRIER_WRITE);
769 }
770 
771 static inline void
772 oce_intr_enable(struct oce_softc *sc)
773 {
774 	uint32_t reg;
775 
776 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
777 	oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
778 }
779 
780 static inline void
781 oce_intr_disable(struct oce_softc *sc)
782 {
783 	uint32_t reg;
784 
785 	reg = oce_read_cfg(sc, PCI_INTR_CTRL);
786 	oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
787 }
788 
789 void
790 oce_attachhook(void *arg)
791 {
792 	struct oce_softc *sc = arg;
793 
794 	oce_get_link_status(sc);
795 
796 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
797 
798 	/*
799 	 * We need to get MCC async events. So enable intrs and arm
800 	 * first EQ, Other EQs will be armed after interface is UP
801 	 */
802 	oce_intr_enable(sc);
803 	oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
804 
805 	/*
806 	 * Send first mcc cmd and after that we get gracious
807 	 * MCC notifications from FW
808 	 */
809 	oce_first_mcc(sc);
810 }
811 
812 void
813 oce_attach_ifp(struct oce_softc *sc)
814 {
815 	struct ifnet *ifp = &sc->sc_ac.ac_if;
816 
817 	ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
818 	    oce_media_status);
819 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
820 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
821 
822 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
823 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
824 	ifp->if_ioctl = oce_ioctl;
825 	ifp->if_start = oce_start;
826 	ifp->if_watchdog = oce_watchdog;
827 	ifp->if_hardmtu = OCE_MAX_MTU;
828 	ifp->if_softc = sc;
829 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_size - 1);
830 	IFQ_SET_READY(&ifp->if_snd);
831 
832 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
833 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
834 
835 #if NVLAN > 0
836 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
837 #endif
838 
839 #ifdef OCE_TSO
840 	ifp->if_capabilities |= IFCAP_TSO;
841 	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
842 #endif
843 #ifdef OCE_LRO
844 	ifp->if_capabilities |= IFCAP_LRO;
845 #endif
846 
847 	if_attach(ifp);
848 	ether_ifattach(ifp);
849 }
850 
851 int
852 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
853 {
854 	struct oce_softc *sc = ifp->if_softc;
855 	struct ifaddr *ifa = (struct ifaddr *)data;
856 	struct ifreq *ifr = (struct ifreq *)data;
857 	int s, error = 0;
858 
859 	s = splnet();
860 
861 	switch (command) {
862 	case SIOCSIFADDR:
863 		ifp->if_flags |= IFF_UP;
864 		if (!(ifp->if_flags & IFF_RUNNING))
865 			oce_init(sc);
866 #ifdef INET
867 		if (ifa->ifa_addr->sa_family == AF_INET)
868 			arp_ifinit(&sc->sc_ac, ifa);
869 #endif
870 		break;
871 	case SIOCSIFFLAGS:
872 		if (ifp->if_flags & IFF_UP) {
873 			if (ifp->if_flags & IFF_RUNNING)
874 				error = ENETRESET;
875 			else
876 				oce_init(sc);
877 		} else {
878 			if (ifp->if_flags & IFF_RUNNING)
879 				oce_stop(sc);
880 		}
881 		break;
882 	case SIOCGIFMEDIA:
883 	case SIOCSIFMEDIA:
884 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
885 		break;
886 	case SIOCGIFRXR:
887 		error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
888 		break;
889 	default:
890 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
891 		break;
892 	}
893 
894 	if (error == ENETRESET) {
895 		if (ifp->if_flags & IFF_RUNNING)
896 			oce_iff(sc);
897 		error = 0;
898 	}
899 
900 	splx(s);
901 
902 	return (error);
903 }
904 
905 int
906 oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
907 {
908 	struct if_rxring_info *ifr, ifr1;
909 	struct oce_rq *rq;
910 	int error, i;
911 	u_int n = 0;
912 
913 	if (sc->sc_nrq > 1) {
914 		if ((ifr = malloc(sc->sc_nrq * sizeof(*ifr), M_DEVBUF,
915 		    M_WAITOK | M_ZERO)) == NULL)
916 			return (ENOMEM);
917 	} else
918 		ifr = &ifr1;
919 
920 	OCE_RQ_FOREACH(sc, rq, i) {
921 		ifr[n].ifr_size = MCLBYTES;
922 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
923 		ifr[n].ifr_info = rq->rxring;
924 		n++;
925 	}
926 
927 	error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
928 
929 	if (sc->sc_nrq > 1)
930 		free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
931 	return (error);
932 }
933 
934 
935 void
936 oce_iff(struct oce_softc *sc)
937 {
938 	uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
939 	struct arpcom *ac = &sc->sc_ac;
940 	struct ifnet *ifp = &ac->ac_if;
941 	struct ether_multi *enm;
942 	struct ether_multistep step;
943 	int naddr = 0, promisc = 0;
944 
945 	ifp->if_flags &= ~IFF_ALLMULTI;
946 
947 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
948 	    ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
949 		ifp->if_flags |= IFF_ALLMULTI;
950 		promisc = 1;
951 	} else {
952 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
953 		while (enm != NULL) {
954 			memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
955 			ETHER_NEXT_MULTI(step, enm);
956 		}
957 		oce_update_mcast(sc, multi, naddr);
958 	}
959 
960 	oce_set_promisc(sc, promisc);
961 }
962 
963 void
964 oce_link_status(struct oce_softc *sc)
965 {
966 	struct ifnet *ifp = &sc->sc_ac.ac_if;
967 	int link_state = LINK_STATE_DOWN;
968 
969 	ifp->if_baudrate = 0;
970 	if (sc->sc_link_up) {
971 		link_state = LINK_STATE_FULL_DUPLEX;
972 
973 		switch (sc->sc_link_speed) {
974 		case 1:
975 			ifp->if_baudrate = IF_Mbps(10);
976 			break;
977 		case 2:
978 			ifp->if_baudrate = IF_Mbps(100);
979 			break;
980 		case 3:
981 			ifp->if_baudrate = IF_Gbps(1);
982 			break;
983 		case 4:
984 			ifp->if_baudrate = IF_Gbps(10);
985 			break;
986 		}
987 	}
988 	if (ifp->if_link_state != link_state) {
989 		ifp->if_link_state = link_state;
990 		if_link_state_change(ifp);
991 	}
992 }
993 
994 void
995 oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
996 {
997 	struct oce_softc *sc = ifp->if_softc;
998 
999 	ifmr->ifm_status = IFM_AVALID;
1000 	ifmr->ifm_active = IFM_ETHER;
1001 
1002 	if (oce_get_link_status(sc) == 0)
1003 		oce_link_status(sc);
1004 
1005 	if (!sc->sc_link_up) {
1006 		ifmr->ifm_active |= IFM_NONE;
1007 		return;
1008 	}
1009 
1010 	ifmr->ifm_status |= IFM_ACTIVE;
1011 
1012 	switch (sc->sc_link_speed) {
1013 	case 1: /* 10 Mbps */
1014 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1015 		break;
1016 	case 2: /* 100 Mbps */
1017 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1018 		break;
1019 	case 3: /* 1 Gbps */
1020 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1021 		break;
1022 	case 4: /* 10 Gbps */
1023 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1024 		break;
1025 	}
1026 
1027 	if (sc->sc_fc & IFM_ETH_RXPAUSE)
1028 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1029 	if (sc->sc_fc & IFM_ETH_TXPAUSE)
1030 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1031 }
1032 
1033 int
1034 oce_media_change(struct ifnet *ifp)
1035 {
1036 	return (0);
1037 }
1038 
1039 void
1040 oce_tick(void *arg)
1041 {
1042 	struct oce_softc *sc = arg;
1043 	int s;
1044 
1045 	s = splnet();
1046 
1047 	if (oce_update_stats(sc) == 0)
1048 		timeout_add_sec(&sc->sc_tick, 1);
1049 
1050 	splx(s);
1051 }
1052 
1053 void
1054 oce_init(void *arg)
1055 {
1056 	struct oce_softc *sc = arg;
1057 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1058 	struct oce_eq *eq;
1059 	struct oce_rq *rq;
1060 	struct oce_wq *wq;
1061 	int i;
1062 
1063 	oce_stop(sc);
1064 
1065 	DELAY(10);
1066 
1067 	oce_macaddr_set(sc);
1068 
1069 	oce_iff(sc);
1070 
1071 	/* Enable VLAN promiscuous mode */
1072 	if (oce_config_vlan(sc, NULL, 0, 1, 1))
1073 		goto error;
1074 
1075 	if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1076 		goto error;
1077 
1078 	OCE_RQ_FOREACH(sc, rq, i) {
1079 		rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1080 		    ETHER_VLAN_ENCAP_LEN;
1081 		if (oce_new_rq(sc, rq)) {
1082 			printf("%s: failed to create rq\n",
1083 			    sc->sc_dev.dv_xname);
1084 			goto error;
1085 		}
1086 		rq->ring->index	 = 0;
1087 
1088 		/* oce splits jumbos into 2k chunks... */
1089 		if_rxr_init(&rq->rxring, 8, rq->nitems);
1090 
1091 		if (!oce_alloc_rx_bufs(rq)) {
1092 			printf("%s: failed to allocate rx buffers\n",
1093 			    sc->sc_dev.dv_xname);
1094 			goto error;
1095 		}
1096 	}
1097 
1098 #ifdef OCE_RSS
1099 	/* RSS config */
1100 	if (sc->sc_rss_enable) {
1101 		if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1102 			printf("%s: failed to configure RSS\n",
1103 			    sc->sc_dev.dv_xname);
1104 			goto error;
1105 		}
1106 	}
1107 #endif
1108 
1109 	OCE_RQ_FOREACH(sc, rq, i)
1110 		oce_arm_cq(rq->cq, 0, TRUE);
1111 
1112 	OCE_WQ_FOREACH(sc, wq, i)
1113 		oce_arm_cq(wq->cq, 0, TRUE);
1114 
1115 	oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1116 
1117 	OCE_EQ_FOREACH(sc, eq, i)
1118 		oce_arm_eq(eq, 0, TRUE, FALSE);
1119 
1120 	if (oce_get_link_status(sc) == 0)
1121 		oce_link_status(sc);
1122 
1123 	ifp->if_flags |= IFF_RUNNING;
1124 	ifp->if_flags &= ~IFF_OACTIVE;
1125 
1126 	timeout_add_sec(&sc->sc_tick, 1);
1127 
1128 	oce_intr_enable(sc);
1129 
1130 	return;
1131 error:
1132 	oce_stop(sc);
1133 }
1134 
1135 void
1136 oce_stop(struct oce_softc *sc)
1137 {
1138 	struct mbx_delete_nic_rq cmd;
1139 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1140 	struct oce_rq *rq;
1141 	struct oce_wq *wq;
1142 	struct oce_eq *eq;
1143 	int i;
1144 
1145 	timeout_del(&sc->sc_tick);
1146 	timeout_del(&sc->sc_rxrefill);
1147 
1148 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1149 
1150 	/* Stop intrs and finish any bottom halves pending */
1151 	oce_intr_disable(sc);
1152 
1153 	/* Invalidate any pending cq and eq entries */
1154 	OCE_EQ_FOREACH(sc, eq, i)
1155 		oce_drain_eq(eq);
1156 	OCE_RQ_FOREACH(sc, rq, i) {
1157 		/* destroy the work queue in the firmware */
1158 		memset(&cmd, 0, sizeof(cmd));
1159 		cmd.params.req.rq_id = htole16(rq->id);
1160 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1161 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1162 		DELAY(1000);
1163 		oce_drain_rq(rq);
1164 		oce_free_posted_rxbuf(rq);
1165 	}
1166 	OCE_WQ_FOREACH(sc, wq, i)
1167 		oce_drain_wq(wq);
1168 }
1169 
1170 void
1171 oce_watchdog(struct ifnet *ifp)
1172 {
1173 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1174 
1175 	oce_init(ifp->if_softc);
1176 
1177 	ifp->if_oerrors++;
1178 }
1179 
1180 void
1181 oce_start(struct ifnet *ifp)
1182 {
1183 	struct oce_softc *sc = ifp->if_softc;
1184 	struct mbuf *m;
1185 	int pkts = 0;
1186 
1187 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1188 		return;
1189 
1190 	for (;;) {
1191 		IFQ_DEQUEUE(&ifp->if_snd, m);
1192 		if (m == NULL)
1193 			break;
1194 
1195 		if (oce_encap(sc, &m, 0)) {
1196 			ifp->if_flags |= IFF_OACTIVE;
1197 			break;
1198 		}
1199 
1200 #if NBPFILTER > 0
1201 		if (ifp->if_bpf)
1202 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1203 #endif
1204 		pkts++;
1205 	}
1206 
1207 	/* Set a timeout in case the chip goes out to lunch */
1208 	if (pkts)
1209 		ifp->if_timer = 5;
1210 }
1211 
1212 int
1213 oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1214 {
1215 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1216 	struct mbuf *m = *mpp;
1217 	struct oce_wq *wq = sc->sc_wq[wqidx];
1218 	struct oce_pkt *pkt = NULL;
1219 	struct oce_nic_hdr_wqe *nhe;
1220 	struct oce_nic_frag_wqe *nfe;
1221 	int i, nwqe, err;
1222 
1223 #ifdef OCE_TSO
1224 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1225 		/* consolidate packet buffers for TSO/LSO segment offload */
1226 		m = oce_tso(sc, mpp);
1227 		if (m == NULL)
1228 			goto error;
1229 	}
1230 #endif
1231 
1232 	if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1233 		goto error;
1234 
1235 	err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1236 	if (err == EFBIG) {
1237 		if (m_defrag(m, M_DONTWAIT) ||
1238 		    bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1239 			BUS_DMA_NOWAIT))
1240 			goto error;
1241 		*mpp = m;
1242 	} else if (err != 0)
1243 		goto error;
1244 
1245 	pkt->nsegs = pkt->map->dm_nsegs;
1246 
1247 	nwqe = pkt->nsegs + 1;
1248 	if (IS_BE(sc)) {
1249 		/* BE2 and BE3 require even number of WQEs */
1250 		if (nwqe & 1)
1251 			nwqe++;
1252 	}
1253 
1254 	/* Fail if there's not enough free WQEs */
1255 	if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1256 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1257 		goto error;
1258 	}
1259 
1260 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1261 	    BUS_DMASYNC_PREWRITE);
1262 	pkt->mbuf = m;
1263 
1264 	/* TX work queue entry for the header */
1265 	nhe = oce_ring_get(wq->ring);
1266 	memset(nhe, 0, sizeof(*nhe));
1267 
1268 	nhe->u0.s.complete = 1;
1269 	nhe->u0.s.event = 1;
1270 	nhe->u0.s.crc = 1;
1271 	nhe->u0.s.forward = 0;
1272 	nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1273 	nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1274 	nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1275 	nhe->u0.s.num_wqe = nwqe;
1276 	nhe->u0.s.total_length = m->m_pkthdr.len;
1277 
1278 #if NVLAN > 0
1279 	if (m->m_flags & M_VLANTAG) {
1280 		nhe->u0.s.vlan = 1; /* Vlan present */
1281 		nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1282 	}
1283 #endif
1284 
1285 #ifdef OCE_TSO
1286 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1287 		if (m->m_pkthdr.tso_segsz) {
1288 			nhe->u0.s.lso = 1;
1289 			nhe->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1290 		}
1291 		if (!IS_BE(sc))
1292 			nhe->u0.s.ipcs = 1;
1293 	}
1294 #endif
1295 
1296 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1297 	    BUS_DMASYNC_PREWRITE);
1298 
1299 	wq->ring->nused++;
1300 
1301 	/* TX work queue entries for data chunks */
1302 	for (i = 0; i < pkt->nsegs; i++) {
1303 		nfe = oce_ring_get(wq->ring);
1304 		memset(nfe, 0, sizeof(*nfe));
1305 		nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1306 		nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1307 		nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1308 		wq->ring->nused++;
1309 	}
1310 	if (nwqe > (pkt->nsegs + 1)) {
1311 		nfe = oce_ring_get(wq->ring);
1312 		memset(nfe, 0, sizeof(*nfe));
1313 		wq->ring->nused++;
1314 		pkt->nsegs++;
1315 	}
1316 
1317 	oce_pkt_put(&wq->pkt_list, pkt);
1318 
1319 	ifp->if_opackets++;
1320 
1321 	oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1322 	    BUS_DMASYNC_POSTWRITE);
1323 
1324 	oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1325 
1326 	return (0);
1327 
1328 error:
1329 	if (pkt)
1330 		oce_pkt_put(&wq->pkt_free, pkt);
1331 	m_freem(*mpp);
1332 	*mpp = NULL;
1333 	return (1);
1334 }
1335 
1336 #ifdef OCE_TSO
1337 struct mbuf *
1338 oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1339 {
1340 	struct mbuf *m;
1341 #ifdef INET
1342 	struct ip *ip;
1343 #endif
1344 #ifdef INET6
1345 	struct ip6_hdr *ip6;
1346 #endif
1347 	struct ether_vlan_header *eh;
1348 	struct tcphdr *th;
1349 	uint16_t etype;
1350 	int total_len = 0, ehdrlen = 0;
1351 
1352 	m = *mpp;
1353 
1354 	if (M_WRITABLE(m) == 0) {
1355 		m = m_dup(*mpp, M_DONTWAIT);
1356 		if (!m)
1357 			return (NULL);
1358 		m_freem(*mpp);
1359 		*mpp = m;
1360 	}
1361 
1362 	eh = mtod(m, struct ether_vlan_header *);
1363 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1364 		etype = ntohs(eh->evl_proto);
1365 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1366 	} else {
1367 		etype = ntohs(eh->evl_encap_proto);
1368 		ehdrlen = ETHER_HDR_LEN;
1369 	}
1370 
1371 	switch (etype) {
1372 #ifdef INET
1373 	case ETHERTYPE_IP:
1374 		ip = (struct ip *)(m->m_data + ehdrlen);
1375 		if (ip->ip_p != IPPROTO_TCP)
1376 			return (NULL);
1377 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1378 
1379 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1380 		break;
1381 #endif
1382 #ifdef INET6
1383 	case ETHERTYPE_IPV6:
1384 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1385 		if (ip6->ip6_nxt != IPPROTO_TCP)
1386 			return NULL;
1387 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1388 
1389 		total_len = ehdrlen + sizeof(struct ip6_hdr) +
1390 		    (th->th_off << 2);
1391 		break;
1392 #endif
1393 	default:
1394 		return (NULL);
1395 	}
1396 
1397 	m = m_pullup(m, total_len);
1398 	if (!m)
1399 		return (NULL);
1400 	*mpp = m;
1401 	return (m);
1402 
1403 }
1404 #endif /* OCE_TSO */
1405 
1406 int
1407 oce_intr(void *arg)
1408 {
1409 	struct oce_softc *sc = arg;
1410 	struct oce_eq *eq = sc->sc_eq[0];
1411 	struct oce_eqe *eqe;
1412 	struct oce_cq *cq = NULL;
1413 	int i, neqe = 0;
1414 
1415 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1416 
1417 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1418 		eqe->evnt = 0;
1419 		neqe++;
1420 	}
1421 
1422 	/* Spurious? */
1423 	if (!neqe) {
1424 		oce_arm_eq(eq, 0, TRUE, FALSE);
1425 		return (0);
1426 	}
1427 
1428 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1429 
1430  	/* Clear EQ entries, but dont arm */
1431 	oce_arm_eq(eq, neqe, FALSE, TRUE);
1432 
1433 	/* Process TX, RX and MCC completion queues */
1434 	for (i = 0; i < eq->cq_valid; i++) {
1435 		cq = eq->cq[i];
1436 		(*cq->cq_intr)(cq->cb_arg);
1437 		oce_arm_cq(cq, 0, TRUE);
1438 	}
1439 
1440 	oce_arm_eq(eq, 0, TRUE, FALSE);
1441 	return (1);
1442 }
1443 
1444 /* Handle the Completion Queue for transmit */
1445 void
1446 oce_intr_wq(void *arg)
1447 {
1448 	struct oce_wq *wq = (struct oce_wq *)arg;
1449 	struct oce_cq *cq = wq->cq;
1450 	struct oce_nic_tx_cqe *cqe;
1451 	struct oce_softc *sc = wq->sc;
1452 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1453 	int ncqe = 0;
1454 
1455 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1456 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1457 		oce_txeof(wq);
1458 		WQ_CQE_INVALIDATE(cqe);
1459 		ncqe++;
1460 	}
1461 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1462 
1463 	if (ifp->if_flags & IFF_OACTIVE) {
1464 		if (wq->ring->nused < (wq->ring->nitems / 2)) {
1465 			ifp->if_flags &= ~IFF_OACTIVE;
1466 			oce_start(ifp);
1467 		}
1468 	}
1469 	if (wq->ring->nused == 0)
1470 		ifp->if_timer = 0;
1471 
1472 	if (ncqe)
1473 		oce_arm_cq(cq, ncqe, FALSE);
1474 }
1475 
1476 void
1477 oce_txeof(struct oce_wq *wq)
1478 {
1479 	struct oce_softc *sc = wq->sc;
1480 	struct oce_pkt *pkt;
1481 	struct mbuf *m;
1482 
1483 	if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1484 		printf("%s: missing descriptor in txeof\n",
1485 		    sc->sc_dev.dv_xname);
1486 		return;
1487 	}
1488 
1489 	wq->ring->nused -= pkt->nsegs + 1;
1490 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1491 	    BUS_DMASYNC_POSTWRITE);
1492 	bus_dmamap_unload(sc->sc_dmat, pkt->map);
1493 
1494 	m = pkt->mbuf;
1495 	m_freem(m);
1496 	pkt->mbuf = NULL;
1497 	oce_pkt_put(&wq->pkt_free, pkt);
1498 }
1499 
1500 /* Handle the Completion Queue for receive */
1501 void
1502 oce_intr_rq(void *arg)
1503 {
1504 	struct oce_rq *rq = (struct oce_rq *)arg;
1505 	struct oce_cq *cq = rq->cq;
1506 	struct oce_softc *sc = rq->sc;
1507 	struct oce_nic_rx_cqe *cqe;
1508 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1509 	int maxrx, ncqe = 0;
1510 
1511 	maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1512 
1513 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1514 
1515 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1516 		if (cqe->u0.s.error == 0) {
1517 			if (cqe->u0.s.pkt_size == 0)
1518 				/* partial DMA workaround for Lancer */
1519 				oce_rxeoc(rq, cqe);
1520 			else
1521 				oce_rxeof(rq, cqe);
1522 		} else {
1523 			ifp->if_ierrors++;
1524 			if (IS_XE201(sc))
1525 				/* Lancer A0 no buffer workaround */
1526 				oce_rxeoc(rq, cqe);
1527 			else
1528 				/* Post L3/L4 errors to stack.*/
1529 				oce_rxeof(rq, cqe);
1530 		}
1531 #ifdef OCE_LRO
1532 		if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1533 			oce_flush_lro(rq);
1534 #endif
1535 		RQ_CQE_INVALIDATE(cqe);
1536 		ncqe++;
1537 	}
1538 
1539 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1540 
1541 #ifdef OCE_LRO
1542 	if (IF_LRO_ENABLED(ifp))
1543 		oce_flush_lro(rq);
1544 #endif
1545 
1546 	if (ncqe) {
1547 		oce_arm_cq(cq, ncqe, FALSE);
1548 		if (!oce_alloc_rx_bufs(rq))
1549 			timeout_add(&sc->sc_rxrefill, 1);
1550 	}
1551 }
1552 
1553 void
1554 oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1555 {
1556 	struct oce_softc *sc = rq->sc;
1557 	struct oce_pkt *pkt = NULL;
1558 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1559 	struct mbuf *m = NULL, *tail = NULL;
1560 	int i, len, frag_len;
1561 	uint16_t vtag;
1562 
1563 	len = cqe->u0.s.pkt_size;
1564 
1565 	 /* Get vlan_tag value */
1566 	if (IS_BE(sc))
1567 		vtag = ntohs(cqe->u0.s.vlan_tag);
1568 	else
1569 		vtag = cqe->u0.s.vlan_tag;
1570 
1571 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1572 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1573 			printf("%s: missing descriptor in rxeof\n",
1574 			    sc->sc_dev.dv_xname);
1575 			goto exit;
1576 		}
1577 
1578 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1579 		    BUS_DMASYNC_POSTREAD);
1580 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1581 		if_rxr_put(&rq->rxring, 1);
1582 
1583 		frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1584 		pkt->mbuf->m_len = frag_len;
1585 
1586 		if (tail != NULL) {
1587 			/* additional fragments */
1588 			pkt->mbuf->m_flags &= ~M_PKTHDR;
1589 			tail->m_next = pkt->mbuf;
1590 			tail = pkt->mbuf;
1591 		} else {
1592 			/* first fragment, fill out most of the header */
1593 			pkt->mbuf->m_pkthdr.len = len;
1594 			pkt->mbuf->m_pkthdr.csum_flags = 0;
1595 			if (cqe->u0.s.ip_cksum_pass) {
1596 				if (!cqe->u0.s.ip_ver) { /* IPV4 */
1597 					pkt->mbuf->m_pkthdr.csum_flags =
1598 					    M_IPV4_CSUM_IN_OK;
1599 				}
1600 			}
1601 			if (cqe->u0.s.l4_cksum_pass) {
1602 				pkt->mbuf->m_pkthdr.csum_flags |=
1603 				    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1604 			}
1605 			m = tail = pkt->mbuf;
1606 		}
1607 		pkt->mbuf = NULL;
1608 		oce_pkt_put(&rq->pkt_free, pkt);
1609 		len -= frag_len;
1610 	}
1611 
1612 	if (m) {
1613 		if (!oce_port_valid(sc, cqe)) {
1614 			 m_freem(m);
1615 			 goto exit;
1616 		}
1617 
1618 		m->m_pkthdr.rcvif = ifp;
1619 
1620 #if NVLAN > 0
1621 		/* This determines if vlan tag is valid */
1622 		if (oce_vtp_valid(sc, cqe)) {
1623 			if (sc->sc_fmode & FNM_FLEX10_MODE) {
1624 				/* FLEX10. If QnQ is not set, neglect VLAN */
1625 				if (cqe->u0.s.qnq) {
1626 					m->m_pkthdr.ether_vtag = vtag;
1627 					m->m_flags |= M_VLANTAG;
1628 				}
1629 			} else if (sc->sc_pvid != (vtag & VLAN_VID_MASK))  {
1630 				/*
1631 				 * In UMC mode generally pvid will be striped.
1632 				 * But in some cases we have seen it comes
1633 				 * with pvid. So if pvid == vlan, neglect vlan.
1634 				 */
1635 				m->m_pkthdr.ether_vtag = vtag;
1636 				m->m_flags |= M_VLANTAG;
1637 			}
1638 		}
1639 #endif
1640 
1641 		ifp->if_ipackets++;
1642 
1643 #ifdef OCE_LRO
1644 		/* Try to queue to LRO */
1645 		if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1646 		    cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1647 		    !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1648 
1649 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1650 				rq->lro_pkts_queued ++;
1651 				goto exit;
1652 			}
1653 			/* If LRO posting fails then try to post to STACK */
1654 		}
1655 #endif
1656 
1657 #if NBPFILTER > 0
1658 		if (ifp->if_bpf)
1659 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1660 #endif
1661 
1662 		ether_input_mbuf(ifp, m);
1663 	}
1664 exit:
1665 	return;
1666 }
1667 
1668 void
1669 oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1670 {
1671 	struct oce_softc *sc = rq->sc;
1672 	struct oce_pkt *pkt;
1673 	int i, num_frags = cqe->u0.s.num_fragments;
1674 
1675 	if (IS_XE201(sc) && cqe->u0.s.error) {
1676 		/*
1677 		 * Lancer A0 workaround:
1678 		 * num_frags will be 1 more than actual in case of error
1679 		 */
1680 		if (num_frags)
1681 			num_frags--;
1682 	}
1683 	for (i = 0; i < num_frags; i++) {
1684 		if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1685 			printf("%s: missing descriptor in rxeoc\n",
1686 			    sc->sc_dev.dv_xname);
1687 			return;
1688 		}
1689 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1690 		    BUS_DMASYNC_POSTREAD);
1691 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
1692 		if_rxr_put(&rq->rxring, 1);
1693 		m_freem(pkt->mbuf);
1694 		oce_pkt_put(&rq->pkt_free, pkt);
1695 	}
1696 }
1697 
1698 int
1699 oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1700 {
1701 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1702 
1703 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1704 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1705 		return (cqe_v1->u0.s.vlan_tag_present);
1706 	}
1707 	return (cqe->u0.s.vlan_tag_present);
1708 }
1709 
1710 int
1711 oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1712 {
1713 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1714 
1715 	if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1716 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1717 		if (sc->sc_port != cqe_v1->u0.s.port)
1718 			return (0);
1719 	}
1720 	return (1);
1721 }
1722 
1723 #ifdef OCE_LRO
1724 void
1725 oce_flush_lro(struct oce_rq *rq)
1726 {
1727 	struct oce_softc *sc = rq->sc;
1728 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1729 	struct lro_ctrl	*lro = &rq->lro;
1730 	struct lro_entry *queued;
1731 
1732 	if (!IF_LRO_ENABLED(ifp))
1733 		return;
1734 
1735 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1736 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1737 		tcp_lro_flush(lro, queued);
1738 	}
1739 	rq->lro_pkts_queued = 0;
1740 }
1741 
1742 int
1743 oce_init_lro(struct oce_softc *sc)
1744 {
1745 	struct lro_ctrl *lro = NULL;
1746 	int i = 0, rc = 0;
1747 
1748 	for (i = 0; i < sc->sc_nrq; i++) {
1749 		lro = &sc->sc_rq[i]->lro;
1750 		rc = tcp_lro_init(lro);
1751 		if (rc != 0) {
1752 			printf("%s: LRO init failed\n",
1753 			    sc->sc_dev.dv_xname);
1754 			return rc;
1755 		}
1756 		lro->ifp = &sc->sc_ac.ac_if;
1757 	}
1758 
1759 	return (rc);
1760 }
1761 
1762 void
1763 oce_free_lro(struct oce_softc *sc)
1764 {
1765 	struct lro_ctrl *lro = NULL;
1766 	int i = 0;
1767 
1768 	for (i = 0; i < sc->sc_nrq; i++) {
1769 		lro = &sc->sc_rq[i]->lro;
1770 		if (lro)
1771 			tcp_lro_free(lro);
1772 	}
1773 }
1774 #endif /* OCE_LRO */
1775 
1776 int
1777 oce_get_buf(struct oce_rq *rq)
1778 {
1779 	struct oce_softc *sc = rq->sc;
1780 	struct oce_pkt *pkt;
1781 	struct oce_nic_rqe *rqe;
1782 
1783 	if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1784 		return (0);
1785 
1786 	pkt->mbuf = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1787 	if (pkt->mbuf == NULL) {
1788 		oce_pkt_put(&rq->pkt_free, pkt);
1789 		return (0);
1790 	}
1791 
1792 	pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1793 	m_adj(pkt->mbuf, ETHER_ALIGN);
1794 
1795 	if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1796 	    BUS_DMA_NOWAIT)) {
1797 		m_freem(pkt->mbuf);
1798 		pkt->mbuf = NULL;
1799 		oce_pkt_put(&rq->pkt_free, pkt);
1800 		return (0);
1801 	}
1802 
1803 	bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1804 	    BUS_DMASYNC_PREREAD);
1805 
1806 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1807 	    BUS_DMASYNC_PREWRITE);
1808 
1809 	rqe = oce_ring_get(rq->ring);
1810 	rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1811 	rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1812 
1813 	oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1814 	    BUS_DMASYNC_POSTWRITE);
1815 
1816 	oce_pkt_put(&rq->pkt_list, pkt);
1817 
1818 	return (1);
1819 }
1820 
1821 int
1822 oce_alloc_rx_bufs(struct oce_rq *rq)
1823 {
1824 	struct oce_softc *sc = rq->sc;
1825 	int i, nbufs = 0;
1826 	u_int slots;
1827 
1828 	for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1829 		if (oce_get_buf(rq) == 0)
1830 			break;
1831 
1832 		nbufs++;
1833 	}
1834 	if_rxr_put(&rq->rxring, slots);
1835 
1836 	if (!nbufs)
1837 		return (0);
1838 	for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1839 		oce_write_db(sc, PD_RXULP_DB, rq->id |
1840 		    (OCE_MAX_RQ_POSTS << 24));
1841 		nbufs -= OCE_MAX_RQ_POSTS;
1842 	}
1843 	if (nbufs > 0)
1844 		oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1845 	return (1);
1846 }
1847 
1848 void
1849 oce_refill_rx(void *arg)
1850 {
1851 	struct oce_softc *sc = arg;
1852 	struct oce_rq *rq;
1853 	int i, s;
1854 
1855 	s = splnet();
1856 	OCE_RQ_FOREACH(sc, rq, i) {
1857 		if (!oce_alloc_rx_bufs(rq))
1858 			timeout_add(&sc->sc_rxrefill, 5);
1859 	}
1860 	splx(s);
1861 }
1862 
1863 /* Handle the Completion Queue for the Mailbox/Async notifications */
1864 void
1865 oce_intr_mq(void *arg)
1866 {
1867 	struct oce_mq *mq = (struct oce_mq *)arg;
1868 	struct oce_softc *sc = mq->sc;
1869 	struct oce_cq *cq = mq->cq;
1870 	struct oce_mq_cqe *cqe;
1871 	struct oce_async_cqe_link_state *acqe;
1872 	struct oce_async_event_grp5_pvid_state *gcqe;
1873 	int evtype, optype, ncqe = 0;
1874 
1875 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1876 
1877 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1878 		if (cqe->u0.s.async_event) {
1879 			evtype = cqe->u0.s.event_type;
1880 			optype = cqe->u0.s.async_type;
1881 			if (evtype  == ASYNC_EVENT_CODE_LINK_STATE) {
1882 				/* Link status evt */
1883 				acqe = (struct oce_async_cqe_link_state *)cqe;
1884 				oce_link_event(sc, acqe);
1885 			} else if ((evtype == ASYNC_EVENT_GRP5) &&
1886 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1887 				/* GRP5 PVID */
1888 				gcqe =
1889 				(struct oce_async_event_grp5_pvid_state *)cqe;
1890 				if (gcqe->enabled)
1891 					sc->sc_pvid =
1892 					    gcqe->tag & VLAN_VID_MASK;
1893 				else
1894 					sc->sc_pvid = 0;
1895 			}
1896 		}
1897 		MQ_CQE_INVALIDATE(cqe);
1898 		ncqe++;
1899 	}
1900 
1901 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1902 
1903 	if (ncqe)
1904 		oce_arm_cq(cq, ncqe, FALSE);
1905 }
1906 
1907 void
1908 oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1909 {
1910 	/* Update Link status */
1911 	sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1912 	    ASYNC_EVENT_LINK_UP);
1913 	/* Update speed */
1914 	sc->sc_link_speed = acqe->u0.s.speed;
1915 	oce_link_status(sc);
1916 }
1917 
1918 int
1919 oce_init_queues(struct oce_softc *sc)
1920 {
1921 	struct oce_wq *wq;
1922 	struct oce_rq *rq;
1923 	int i;
1924 
1925 	sc->sc_nrq = 1;
1926 	sc->sc_nwq = 1;
1927 
1928 	/* Create network interface on card */
1929 	if (oce_create_iface(sc, sc->sc_macaddr))
1930 		goto error;
1931 
1932 	/* create all of the event queues */
1933 	for (i = 0; i < sc->sc_nintr; i++) {
1934 		sc->sc_eq[i] = oce_create_eq(sc);
1935 		if (!sc->sc_eq[i])
1936 			goto error;
1937 	}
1938 
1939 	/* alloc tx queues */
1940 	OCE_WQ_FOREACH(sc, wq, i) {
1941 		sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1942 		if (!sc->sc_wq[i])
1943 			goto error;
1944 	}
1945 
1946 	/* alloc rx queues */
1947 	OCE_RQ_FOREACH(sc, rq, i) {
1948 		sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1949 		    i > 0 ? sc->sc_rss_enable : 0);
1950 		if (!sc->sc_rq[i])
1951 			goto error;
1952 	}
1953 
1954 	/* alloc mailbox queue */
1955 	sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1956 	if (!sc->sc_mq)
1957 		goto error;
1958 
1959 	return (0);
1960 error:
1961 	oce_release_queues(sc);
1962 	return (1);
1963 }
1964 
1965 void
1966 oce_release_queues(struct oce_softc *sc)
1967 {
1968 	struct oce_wq *wq;
1969 	struct oce_rq *rq;
1970 	struct oce_eq *eq;
1971 	int i;
1972 
1973 	OCE_RQ_FOREACH(sc, rq, i) {
1974 		if (rq)
1975 			oce_destroy_rq(sc->sc_rq[i]);
1976 	}
1977 
1978 	OCE_WQ_FOREACH(sc, wq, i) {
1979 		if (wq)
1980 			oce_destroy_wq(sc->sc_wq[i]);
1981 	}
1982 
1983 	if (sc->sc_mq)
1984 		oce_destroy_mq(sc->sc_mq);
1985 
1986 	OCE_EQ_FOREACH(sc, eq, i) {
1987 		if (eq)
1988 			oce_destroy_eq(sc->sc_eq[i]);
1989 	}
1990 }
1991 
1992 /**
1993  * @brief 		Function to create a WQ for NIC Tx
1994  * @param sc 		software handle to the device
1995  * @returns		the pointer to the WQ created or NULL on failure
1996  */
1997 struct oce_wq *
1998 oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1999 {
2000 	struct oce_wq *wq;
2001 	struct oce_cq *cq;
2002 	struct oce_pkt *pkt;
2003 	int i;
2004 
2005 	if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
2006 		return (NULL);
2007 
2008 	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
2009 	if (!wq)
2010 		return (NULL);
2011 
2012 	wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
2013 	if (!wq->ring) {
2014 		free(wq, M_DEVBUF, 0);
2015 		return (NULL);
2016 	}
2017 
2018 	cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
2019 	    1, 0, 3);
2020 	if (!cq) {
2021 		oce_destroy_ring(sc, wq->ring);
2022 		free(wq, M_DEVBUF, 0);
2023 		return (NULL);
2024 	}
2025 
2026 	wq->id = -1;
2027 	wq->sc = sc;
2028 
2029 	wq->cq = cq;
2030 	wq->nitems = sc->sc_tx_ring_size;
2031 
2032 	SIMPLEQ_INIT(&wq->pkt_free);
2033 	SIMPLEQ_INIT(&wq->pkt_list);
2034 
2035 	for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2036 		pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2037 		    PAGE_SIZE);
2038 		if (pkt == NULL) {
2039 			oce_destroy_wq(wq);
2040 			return (NULL);
2041 		}
2042 		oce_pkt_put(&wq->pkt_free, pkt);
2043 	}
2044 
2045 	if (oce_new_wq(sc, wq)) {
2046 		oce_destroy_wq(wq);
2047 		return (NULL);
2048 	}
2049 
2050 	eq->cq[eq->cq_valid] = cq;
2051 	eq->cq_valid++;
2052 	cq->cb_arg = wq;
2053 	cq->cq_intr = oce_intr_wq;
2054 
2055 	return (wq);
2056 }
2057 
2058 void
2059 oce_drain_wq(struct oce_wq *wq)
2060 {
2061 	struct oce_cq *cq = wq->cq;
2062 	struct oce_nic_tx_cqe *cqe;
2063 	int ncqe = 0;
2064 
2065 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2066 	OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2067 		WQ_CQE_INVALIDATE(cqe);
2068 		ncqe++;
2069 	}
2070 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2071 	oce_arm_cq(cq, ncqe, FALSE);
2072 }
2073 
2074 void
2075 oce_destroy_wq(struct oce_wq *wq)
2076 {
2077 	struct mbx_delete_nic_wq cmd;
2078 	struct oce_softc *sc = wq->sc;
2079 	struct oce_pkt *pkt;
2080 
2081 	if (wq->id >= 0) {
2082 		memset(&cmd, 0, sizeof(cmd));
2083 		cmd.params.req.wq_id = htole16(wq->id);
2084 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2085 		    &cmd, sizeof(cmd));
2086 	}
2087 	if (wq->cq != NULL)
2088 		oce_destroy_cq(wq->cq);
2089 	if (wq->ring != NULL)
2090 		oce_destroy_ring(sc, wq->ring);
2091 	while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2092 		oce_pkt_free(sc, pkt);
2093 	free(wq, M_DEVBUF, 0);
2094 }
2095 
2096 /**
2097  * @brief 		function to allocate receive queue resources
2098  * @param sc		software handle to the device
2099  * @param eq		pointer to associated event queue
2100  * @param rss		is-rss-queue flag
2101  * @returns		the pointer to the RQ created or NULL on failure
2102  */
2103 struct oce_rq *
2104 oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2105 {
2106 	struct oce_rq *rq;
2107 	struct oce_cq *cq;
2108 	struct oce_pkt *pkt;
2109 	int i;
2110 
2111 	/* Hardware doesn't support any other value */
2112 	if (sc->sc_rx_ring_size != 1024)
2113 		return (NULL);
2114 
2115 	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2116 	if (!rq)
2117 		return (NULL);
2118 
2119 	rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2120 	    sizeof(struct oce_nic_rqe), 2);
2121 	if (!rq->ring) {
2122 		free(rq, M_DEVBUF, 0);
2123 		return (NULL);
2124 	}
2125 
2126 	cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2127 	    1, 0, 3);
2128 	if (!cq) {
2129 		oce_destroy_ring(sc, rq->ring);
2130 		free(rq, M_DEVBUF, 0);
2131 		return (NULL);
2132 	}
2133 
2134 	rq->id = -1;
2135 	rq->sc = sc;
2136 
2137 	rq->nitems = sc->sc_rx_ring_size;
2138 	rq->fragsize = OCE_RX_BUF_SIZE;
2139 	rq->rss = rss;
2140 
2141 	SIMPLEQ_INIT(&rq->pkt_free);
2142 	SIMPLEQ_INIT(&rq->pkt_list);
2143 
2144 	for (i = 0; i < sc->sc_rx_ring_size; i++) {
2145 		pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2146 		if (pkt == NULL) {
2147 			oce_destroy_rq(rq);
2148 			return (NULL);
2149 		}
2150 		oce_pkt_put(&rq->pkt_free, pkt);
2151 	}
2152 
2153 	rq->cq = cq;
2154 	eq->cq[eq->cq_valid] = cq;
2155 	eq->cq_valid++;
2156 	cq->cb_arg = rq;
2157 	cq->cq_intr = oce_intr_rq;
2158 
2159 	/* RX queue is created in oce_init */
2160 
2161 	return (rq);
2162 }
2163 
2164 void
2165 oce_drain_rq(struct oce_rq *rq)
2166 {
2167 	struct oce_nic_rx_cqe *cqe;
2168 	struct oce_cq *cq = rq->cq;
2169 	int ncqe = 0;
2170 
2171 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2172 	OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2173 		RQ_CQE_INVALIDATE(cqe);
2174 		ncqe++;
2175 	}
2176 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2177 	oce_arm_cq(cq, ncqe, FALSE);
2178 }
2179 
2180 void
2181 oce_destroy_rq(struct oce_rq *rq)
2182 {
2183 	struct mbx_delete_nic_rq cmd;
2184 	struct oce_softc *sc = rq->sc;
2185 	struct oce_pkt *pkt;
2186 
2187 	if (rq->id >= 0) {
2188 		memset(&cmd, 0, sizeof(cmd));
2189 		cmd.params.req.rq_id = htole16(rq->id);
2190 		oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2191 		    &cmd, sizeof(cmd));
2192 	}
2193 	if (rq->cq != NULL)
2194 		oce_destroy_cq(rq->cq);
2195 	if (rq->ring != NULL)
2196 		oce_destroy_ring(sc, rq->ring);
2197 	while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2198 		oce_pkt_free(sc, pkt);
2199 	free(rq, M_DEVBUF, 0);
2200 }
2201 
2202 struct oce_eq *
2203 oce_create_eq(struct oce_softc *sc)
2204 {
2205 	struct oce_eq *eq;
2206 
2207 	/* allocate an eq */
2208 	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2209 	if (eq == NULL)
2210 		return (NULL);
2211 
2212 	eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2213 	if (!eq->ring) {
2214 		free(eq, M_DEVBUF, 0);
2215 		return (NULL);
2216 	}
2217 
2218 	eq->id = -1;
2219 	eq->sc = sc;
2220 	eq->nitems = EQ_LEN_1024;	/* length of event queue */
2221 	eq->isize = EQE_SIZE_4; 	/* size of a queue item */
2222 	eq->delay = OCE_DEFAULT_EQD;	/* event queue delay */
2223 
2224 	if (oce_new_eq(sc, eq)) {
2225 		oce_destroy_ring(sc, eq->ring);
2226 		free(eq, M_DEVBUF, 0);
2227 		return (NULL);
2228 	}
2229 
2230 	return (eq);
2231 }
2232 
2233 /**
2234  * @brief		Function to arm an EQ so that it can generate events
2235  * @param eq		pointer to event queue structure
2236  * @param neqe		number of EQEs to arm
2237  * @param rearm		rearm bit enable/disable
2238  * @param clearint	bit to clear the interrupt condition because of which
2239  *			EQEs are generated
2240  */
2241 static inline void
2242 oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2243 {
2244 	oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2245 	    (clearint << 9) | (neqe << 16) | (rearm << 29));
2246 }
2247 
2248 void
2249 oce_drain_eq(struct oce_eq *eq)
2250 {
2251 	struct oce_eqe *eqe;
2252 	int neqe = 0;
2253 
2254 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2255 	OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2256 		eqe->evnt = 0;
2257 		neqe++;
2258 	}
2259 	oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2260 	oce_arm_eq(eq, neqe, FALSE, TRUE);
2261 }
2262 
2263 void
2264 oce_destroy_eq(struct oce_eq *eq)
2265 {
2266 	struct mbx_destroy_common_eq cmd;
2267 	struct oce_softc *sc = eq->sc;
2268 
2269 	if (eq->id >= 0) {
2270 		memset(&cmd, 0, sizeof(cmd));
2271 		cmd.params.req.id = htole16(eq->id);
2272 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2273 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2274 	}
2275 	if (eq->ring != NULL)
2276 		oce_destroy_ring(sc, eq->ring);
2277 	free(eq, M_DEVBUF, 0);
2278 }
2279 
2280 struct oce_mq *
2281 oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2282 {
2283 	struct oce_mq *mq = NULL;
2284 	struct oce_cq *cq;
2285 
2286 	/* allocate the mq */
2287 	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2288 	if (!mq)
2289 		return (NULL);
2290 
2291 	mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2292 	if (!mq->ring) {
2293 		free(mq, M_DEVBUF, 0);
2294 		return (NULL);
2295 	}
2296 
2297 	cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2298 	    1, 0, 0);
2299 	if (!cq) {
2300 		oce_destroy_ring(sc, mq->ring);
2301 		free(mq, M_DEVBUF, 0);
2302 		return (NULL);
2303 	}
2304 
2305 	mq->id = -1;
2306 	mq->sc = sc;
2307 	mq->cq = cq;
2308 
2309 	mq->nitems = 128;
2310 
2311 	if (oce_new_mq(sc, mq)) {
2312 		oce_destroy_cq(mq->cq);
2313 		oce_destroy_ring(sc, mq->ring);
2314 		free(mq, M_DEVBUF, 0);
2315 		return (NULL);
2316 	}
2317 
2318 	eq->cq[eq->cq_valid] = cq;
2319 	eq->cq_valid++;
2320 	mq->cq->eq = eq;
2321 	mq->cq->cb_arg = mq;
2322 	mq->cq->cq_intr = oce_intr_mq;
2323 
2324 	return (mq);
2325 }
2326 
2327 void
2328 oce_drain_mq(struct oce_mq *mq)
2329 {
2330 	struct oce_cq *cq = mq->cq;
2331 	struct oce_mq_cqe *cqe;
2332 	int ncqe = 0;
2333 
2334 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2335 	OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2336 		MQ_CQE_INVALIDATE(cqe);
2337 		ncqe++;
2338 	}
2339 	oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2340 	oce_arm_cq(cq, ncqe, FALSE);
2341 }
2342 
2343 void
2344 oce_destroy_mq(struct oce_mq *mq)
2345 {
2346 	struct mbx_destroy_common_mq cmd;
2347 	struct oce_softc *sc = mq->sc;
2348 
2349 	if (mq->id >= 0) {
2350 		memset(&cmd, 0, sizeof(cmd));
2351 		cmd.params.req.id = htole16(mq->id);
2352 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2353 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2354 	}
2355 	if (mq->ring != NULL)
2356 		oce_destroy_ring(sc, mq->ring);
2357 	if (mq->cq != NULL)
2358 		oce_destroy_cq(mq->cq);
2359 	free(mq, M_DEVBUF, 0);
2360 }
2361 
2362 /**
2363  * @brief		Function to create a completion queue
2364  * @param sc		software handle to the device
2365  * @param eq		optional eq to be associated with to the cq
2366  * @param nitems	length of completion queue
2367  * @param isize		size of completion queue items
2368  * @param eventable	event table
2369  * @param nodelay	no delay flag
2370  * @param ncoalesce	no coalescence flag
2371  * @returns 		pointer to the cq created, NULL on failure
2372  */
2373 struct oce_cq *
2374 oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2375     int eventable, int nodelay, int ncoalesce)
2376 {
2377 	struct oce_cq *cq = NULL;
2378 
2379 	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2380 	if (!cq)
2381 		return (NULL);
2382 
2383 	cq->ring = oce_create_ring(sc, nitems, isize, 4);
2384 	if (!cq->ring) {
2385 		free(cq, M_DEVBUF, 0);
2386 		return (NULL);
2387 	}
2388 
2389 	cq->sc = sc;
2390 	cq->eq = eq;
2391 	cq->nitems = nitems;
2392 	cq->nodelay = nodelay;
2393 	cq->ncoalesce = ncoalesce;
2394 	cq->eventable = eventable;
2395 
2396 	if (oce_new_cq(sc, cq)) {
2397 		oce_destroy_ring(sc, cq->ring);
2398 		free(cq, M_DEVBUF, 0);
2399 		return (NULL);
2400 	}
2401 
2402 	sc->sc_cq[sc->sc_ncq++] = cq;
2403 
2404 	return (cq);
2405 }
2406 
2407 void
2408 oce_destroy_cq(struct oce_cq *cq)
2409 {
2410 	struct mbx_destroy_common_cq cmd;
2411 	struct oce_softc *sc = cq->sc;
2412 
2413 	if (cq->id >= 0) {
2414 		memset(&cmd, 0, sizeof(cmd));
2415 		cmd.params.req.id = htole16(cq->id);
2416 		oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2417 		    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2418 	}
2419 	if (cq->ring != NULL)
2420 		oce_destroy_ring(sc, cq->ring);
2421 	free(cq, M_DEVBUF, 0);
2422 }
2423 
2424 /**
2425  * @brief		Function to arm a CQ with CQEs
2426  * @param cq		pointer to the completion queue structure
2427  * @param ncqe		number of CQEs to arm
2428  * @param rearm		rearm bit enable/disable
2429  */
2430 static inline void
2431 oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2432 {
2433 	oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2434 }
2435 
2436 void
2437 oce_free_posted_rxbuf(struct oce_rq *rq)
2438 {
2439 	struct oce_softc *sc = rq->sc;
2440 	struct oce_pkt *pkt;
2441 
2442 	while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2443 		bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2444 		    BUS_DMASYNC_POSTREAD);
2445 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2446 		if (pkt->mbuf != NULL) {
2447 			m_freem(pkt->mbuf);
2448 			pkt->mbuf = NULL;
2449 		}
2450 		oce_pkt_put(&rq->pkt_free, pkt);
2451 		if_rxr_put(&rq->rxring, 1);
2452 	}
2453 }
2454 
2455 int
2456 oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2457 {
2458 	int rc;
2459 
2460 	memset(dma, 0, sizeof(struct oce_dma_mem));
2461 
2462 	dma->tag = sc->sc_dmat;
2463 	rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2464 	    &dma->map);
2465 	if (rc != 0) {
2466 		printf("%s: failed to allocate DMA handle",
2467 		    sc->sc_dev.dv_xname);
2468 		goto fail_0;
2469 	}
2470 
2471 	rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2472 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2473 	if (rc != 0) {
2474 		printf("%s: failed to allocate DMA memory",
2475 		    sc->sc_dev.dv_xname);
2476 		goto fail_1;
2477 	}
2478 
2479 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2480 	    &dma->vaddr, BUS_DMA_NOWAIT);
2481 	if (rc != 0) {
2482 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2483 		goto fail_2;
2484 	}
2485 
2486 	rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2487 	    BUS_DMA_NOWAIT);
2488 	if (rc != 0) {
2489 		printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2490 		goto fail_3;
2491 	}
2492 
2493 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2494 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2495 
2496 	dma->paddr = dma->map->dm_segs[0].ds_addr;
2497 	dma->size = size;
2498 
2499 	return (0);
2500 
2501 fail_3:
2502 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2503 fail_2:
2504 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2505 fail_1:
2506 	bus_dmamap_destroy(dma->tag, dma->map);
2507 fail_0:
2508 	return (rc);
2509 }
2510 
2511 void
2512 oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2513 {
2514 	if (dma->tag == NULL)
2515 		return;
2516 
2517 	if (dma->map != NULL) {
2518 		oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2519 		bus_dmamap_unload(dma->tag, dma->map);
2520 
2521 		if (dma->vaddr != 0) {
2522 			bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2523 			dma->vaddr = 0;
2524 		}
2525 
2526 		bus_dmamap_destroy(dma->tag, dma->map);
2527 		dma->map = NULL;
2528 		dma->tag = NULL;
2529 	}
2530 }
2531 
2532 struct oce_ring *
2533 oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2534 {
2535 	struct oce_dma_mem *dma;
2536 	struct oce_ring *ring;
2537 	bus_size_t size = nitems * isize;
2538 	int rc;
2539 
2540 	if (size > maxsegs * PAGE_SIZE)
2541 		return (NULL);
2542 
2543 	ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2544 	if (ring == NULL)
2545 		return (NULL);
2546 
2547 	ring->isize = isize;
2548 	ring->nitems = nitems;
2549 
2550 	dma = &ring->dma;
2551 	dma->tag = sc->sc_dmat;
2552 	rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2553 	    BUS_DMA_NOWAIT, &dma->map);
2554 	if (rc != 0) {
2555 		printf("%s: failed to allocate DMA handle",
2556 		    sc->sc_dev.dv_xname);
2557 		goto fail_0;
2558 	}
2559 
2560 	rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2561 	    &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2562 	if (rc != 0) {
2563 		printf("%s: failed to allocate DMA memory",
2564 		    sc->sc_dev.dv_xname);
2565 		goto fail_1;
2566 	}
2567 
2568 	rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2569 	    &dma->vaddr, BUS_DMA_NOWAIT);
2570 	if (rc != 0) {
2571 		printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2572 		goto fail_2;
2573 	}
2574 
2575 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2576 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2577 
2578 	dma->paddr = 0;
2579 	dma->size = size;
2580 
2581 	return (ring);
2582 
2583 fail_2:
2584 	bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2585 fail_1:
2586 	bus_dmamap_destroy(dma->tag, dma->map);
2587 fail_0:
2588 	free(ring, M_DEVBUF, 0);
2589 	return (NULL);
2590 }
2591 
2592 void
2593 oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2594 {
2595 	oce_dma_free(sc, &ring->dma);
2596 	free(ring, M_DEVBUF, 0);
2597 }
2598 
2599 int
2600 oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2601     struct oce_pa *pa, int maxsegs)
2602 {
2603 	struct oce_dma_mem *dma = &ring->dma;
2604 	int i;
2605 
2606 	if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2607 	    ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2608 		printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2609 		return (0);
2610 	}
2611 
2612 	if (dma->map->dm_nsegs > maxsegs) {
2613 		printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2614 		return (0);
2615 	}
2616 
2617 	bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2618 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2619 
2620 	for (i = 0; i < dma->map->dm_nsegs; i++)
2621 		pa[i].addr = dma->map->dm_segs[i].ds_addr;
2622 
2623 	return (dma->map->dm_nsegs);
2624 }
2625 
2626 static inline void *
2627 oce_ring_get(struct oce_ring *ring)
2628 {
2629 	int index = ring->index;
2630 
2631 	if (++ring->index == ring->nitems)
2632 		ring->index = 0;
2633 	return ((void *)(ring->dma.vaddr + index * ring->isize));
2634 }
2635 
2636 static inline void *
2637 oce_ring_first(struct oce_ring *ring)
2638 {
2639 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2640 }
2641 
2642 static inline void *
2643 oce_ring_next(struct oce_ring *ring)
2644 {
2645 	if (++ring->index == ring->nitems)
2646 		ring->index = 0;
2647 	return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2648 }
2649 
2650 struct oce_pkt *
2651 oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2652 {
2653 	struct oce_pkt *pkt;
2654 
2655 	if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2656 		return (NULL);
2657 
2658 	if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2659 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2660 		pool_put(oce_pkt_pool, pkt);
2661 		return (NULL);
2662 	}
2663 
2664 	return (pkt);
2665 }
2666 
2667 void
2668 oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2669 {
2670 	if (pkt->map) {
2671 		bus_dmamap_unload(sc->sc_dmat, pkt->map);
2672 		bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2673 	}
2674 	pool_put(oce_pkt_pool, pkt);
2675 }
2676 
2677 static inline struct oce_pkt *
2678 oce_pkt_get(struct oce_pkt_list *lst)
2679 {
2680 	struct oce_pkt *pkt;
2681 
2682 	pkt = SIMPLEQ_FIRST(lst);
2683 	if (pkt == NULL)
2684 		return (NULL);
2685 
2686 	SIMPLEQ_REMOVE_HEAD(lst, entry);
2687 
2688 	return (pkt);
2689 }
2690 
2691 static inline void
2692 oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2693 {
2694 	SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2695 }
2696 
2697 /**
2698  * @brief Wait for FW to become ready and reset it
2699  * @param sc		software handle to the device
2700  */
2701 int
2702 oce_init_fw(struct oce_softc *sc)
2703 {
2704 	struct ioctl_common_function_reset cmd;
2705 	uint32_t reg;
2706 	int err = 0, tmo = 60000;
2707 
2708 	/* read semaphore CSR */
2709 	reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2710 
2711 	/* if host is ready then wait for fw ready else send POST */
2712 	if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2713 		reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2714 		oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2715 	}
2716 
2717 	/* wait for FW to become ready */
2718 	for (;;) {
2719 		if (--tmo == 0)
2720 			break;
2721 
2722 		DELAY(1000);
2723 
2724 		reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2725 		if (reg & MPU_EP_SEM_ERROR) {
2726 			printf(": POST failed: %#x\n", reg);
2727 			return (ENXIO);
2728 		}
2729 		if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2730 			/* reset FW */
2731 			if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2732 				memset(&cmd, 0, sizeof(cmd));
2733 				err = oce_cmd(sc, SUBSYS_COMMON,
2734 				    OPCODE_COMMON_FUNCTION_RESET,
2735 				    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2736 			}
2737 			return (err);
2738 		}
2739 	}
2740 
2741 	printf(": POST timed out: %#x\n", reg);
2742 
2743 	return (ENXIO);
2744 }
2745 
2746 static inline int
2747 oce_mbox_wait(struct oce_softc *sc)
2748 {
2749 	int i;
2750 
2751 	for (i = 0; i < 20000; i++) {
2752 		if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2753 			return (0);
2754 		DELAY(100);
2755 	}
2756 	return (ETIMEDOUT);
2757 }
2758 
2759 /**
2760  * @brief Mailbox dispatch
2761  * @param sc		software handle to the device
2762  */
2763 int
2764 oce_mbox_dispatch(struct oce_softc *sc)
2765 {
2766 	uint32_t pa, reg;
2767 	int err;
2768 
2769 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2770 	reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2771 
2772 	if ((err = oce_mbox_wait(sc)) != 0)
2773 		goto out;
2774 
2775 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2776 
2777 	pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2778 	reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2779 
2780 	if ((err = oce_mbox_wait(sc)) != 0)
2781 		goto out;
2782 
2783 	oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2784 
2785 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2786 
2787 	if ((err = oce_mbox_wait(sc)) != 0)
2788 		goto out;
2789 
2790 out:
2791 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2792 	return (err);
2793 }
2794 
2795 /**
2796  * @brief Function to initialize the hw with host endian information
2797  * @param sc		software handle to the device
2798  * @returns		0 on success, ETIMEDOUT on failure
2799  */
2800 int
2801 oce_mbox_init(struct oce_softc *sc)
2802 {
2803 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2804 	uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2805 
2806 	if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2807 		return (0);
2808 
2809 	/* Endian Signature */
2810 	*ptr++ = 0xff;
2811 	*ptr++ = 0x12;
2812 	*ptr++ = 0x34;
2813 	*ptr++ = 0xff;
2814 	*ptr++ = 0xff;
2815 	*ptr++ = 0x56;
2816 	*ptr++ = 0x78;
2817 	*ptr = 0xff;
2818 
2819 	return (oce_mbox_dispatch(sc));
2820 }
2821 
2822 int
2823 oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2824     void *payload, int length)
2825 {
2826 	struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2827 	struct oce_mbx *mbx = &bmbx->mbx;
2828 	struct mbx_hdr *hdr;
2829 	caddr_t epayload = NULL;
2830 	int err;
2831 
2832 	if (length > OCE_MBX_PAYLOAD)
2833 		epayload = OCE_MEM_KVA(&sc->sc_pld);
2834 	if (length > OCE_MAX_PAYLOAD)
2835 		return (EINVAL);
2836 
2837 	oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2838 
2839 	memset(mbx, 0, sizeof(struct oce_mbx));
2840 
2841 	mbx->payload_length = length;
2842 
2843 	if (epayload) {
2844 		mbx->flags = OCE_MBX_F_SGE;
2845 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2846 		memcpy(epayload, payload, length);
2847 		mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2848 		mbx->pld.sgl[0].length = length;
2849 		hdr = (struct mbx_hdr *)epayload;
2850 	} else {
2851 		mbx->flags = OCE_MBX_F_EMBED;
2852 		memcpy(mbx->pld.data, payload, length);
2853 		hdr = (struct mbx_hdr *)&mbx->pld.data;
2854 	}
2855 
2856 	hdr->subsys = subsys;
2857 	hdr->opcode = opcode;
2858 	hdr->version = version;
2859 	hdr->length = length - sizeof(*hdr);
2860 	if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2861 		hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2862 	else
2863 		hdr->timeout = OCE_MBX_TIMEOUT;
2864 
2865 	if (epayload)
2866 		oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2867 
2868 	err = oce_mbox_dispatch(sc);
2869 	if (err == 0) {
2870 		if (epayload) {
2871 			oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2872 			memcpy(payload, epayload, length);
2873 		} else
2874 			memcpy(payload, &mbx->pld.data, length);
2875 	} else
2876 		printf("%s: mailbox timeout, subsys %d op %d ver %d "
2877 		    "%spayload lenght %d\n", sc->sc_dev.dv_xname, subsys,
2878 		    opcode, version, epayload ? "ext " : "",
2879 		    length);
2880 	return (err);
2881 }
2882 
2883 /**
2884  * @brief	Firmware will send gracious notifications during
2885  *		attach only after sending first mcc commnad. We
2886  *		use MCC queue only for getting async and mailbox
2887  *		for sending cmds. So to get gracious notifications
2888  *		atleast send one dummy command on mcc.
2889  */
2890 void
2891 oce_first_mcc(struct oce_softc *sc)
2892 {
2893 	struct oce_mbx *mbx;
2894 	struct oce_mq *mq = sc->sc_mq;
2895 	struct mbx_hdr *hdr;
2896 	struct mbx_get_common_fw_version *cmd;
2897 
2898 	mbx = oce_ring_get(mq->ring);
2899 	memset(mbx, 0, sizeof(struct oce_mbx));
2900 
2901 	cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2902 
2903 	hdr = &cmd->hdr;
2904 	hdr->subsys = SUBSYS_COMMON;
2905 	hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2906 	hdr->version = OCE_MBX_VER_V0;
2907 	hdr->timeout = OCE_MBX_TIMEOUT;
2908 	hdr->length = sizeof(*cmd) - sizeof(*hdr);
2909 
2910 	mbx->flags = OCE_MBX_F_EMBED;
2911 	mbx->payload_length = sizeof(*cmd);
2912 	oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2913 	    BUS_DMASYNC_PREWRITE);
2914 	oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2915 }
2916 
2917 int
2918 oce_get_fw_config(struct oce_softc *sc)
2919 {
2920 	struct mbx_common_query_fw_config cmd;
2921 	int err;
2922 
2923 	memset(&cmd, 0, sizeof(cmd));
2924 
2925 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2926 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2927 	if (err)
2928 		return (err);
2929 
2930 	sc->sc_port = cmd.params.rsp.port_id;
2931 	sc->sc_fmode = cmd.params.rsp.function_mode;
2932 
2933 	return (0);
2934 }
2935 
2936 int
2937 oce_check_native_mode(struct oce_softc *sc)
2938 {
2939 	struct mbx_common_set_function_cap cmd;
2940 	int err;
2941 
2942 	memset(&cmd, 0, sizeof(cmd));
2943 
2944 	cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2945 	    CAP_BE3_NATIVE_ERX_API;
2946 	cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2947 
2948 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2949 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2950 	if (err)
2951 		return (err);
2952 
2953 	if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2954 		SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2955 
2956 	return (0);
2957 }
2958 
2959 /**
2960  * @brief Function for creating a network interface.
2961  * @param sc		software handle to the device
2962  * @returns		0 on success, error otherwise
2963  */
2964 int
2965 oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2966 {
2967 	struct mbx_create_common_iface cmd;
2968 	uint32_t caps, caps_en;
2969 	int err = 0;
2970 
2971 	/* interface capabilities to give device when creating interface */
2972 	caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2973 	    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2974 	    MBX_RX_IFACE_RSS;
2975 
2976 	/* capabilities to enable by default (others set dynamically) */
2977 	caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2978 
2979 	if (!IS_XE201(sc)) {
2980 		/* LANCER A0 workaround */
2981 		caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2982 		caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2983 	}
2984 
2985 	/* enable capabilities controlled via driver startup parameters */
2986 	if (sc->sc_rss_enable)
2987 		caps_en |= MBX_RX_IFACE_RSS;
2988 
2989 	memset(&cmd, 0, sizeof(cmd));
2990 
2991 	cmd.params.req.version = 0;
2992 	cmd.params.req.cap_flags = htole32(caps);
2993 	cmd.params.req.enable_flags = htole32(caps_en);
2994 	if (macaddr != NULL) {
2995 		memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2996 		cmd.params.req.mac_invalid = 0;
2997 	} else
2998 		cmd.params.req.mac_invalid = 1;
2999 
3000 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
3001 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3002 	if (err)
3003 		return (err);
3004 
3005 	sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
3006 
3007 	if (macaddr != NULL)
3008 		sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
3009 
3010 	return (0);
3011 }
3012 
3013 /**
3014  * @brief Function to send the mbx command to configure vlan
3015  * @param sc 		software handle to the device
3016  * @param vtags		array of vlan tags
3017  * @param nvtags	number of elements in array
3018  * @param untagged	boolean TRUE/FLASE
3019  * @param promisc	flag to enable/disable VLAN promiscuous mode
3020  * @returns		0 on success, EIO on failure
3021  */
3022 int
3023 oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3024     int untagged, int promisc)
3025 {
3026 	struct mbx_common_config_vlan cmd;
3027 
3028 	memset(&cmd, 0, sizeof(cmd));
3029 
3030 	cmd.params.req.if_id = sc->sc_if_id;
3031 	cmd.params.req.promisc = promisc;
3032 	cmd.params.req.untagged = untagged;
3033 	cmd.params.req.num_vlans = nvtags;
3034 
3035 	if (!promisc)
3036 		memcpy(cmd.params.req.tags.normal_vlans, vtags,
3037 			nvtags * sizeof(struct normal_vlan));
3038 
3039 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3040 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3041 }
3042 
3043 /**
3044  * @brief Function to set flow control capability in the hardware
3045  * @param sc 		software handle to the device
3046  * @param flags		flow control flags to set
3047  * @returns		0 on success, EIO on failure
3048  */
3049 int
3050 oce_set_flow_control(struct oce_softc *sc, uint flags)
3051 {
3052 	struct mbx_common_get_set_flow_control cmd;
3053 	int err;
3054 
3055 	memset(&cmd, 0, sizeof(cmd));
3056 
3057 	cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3058 	cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3059 
3060 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3061 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3062 	if (err)
3063 		return (err);
3064 
3065 	memset(&cmd, 0, sizeof(cmd));
3066 
3067 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3068 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3069 	if (err)
3070 		return (err);
3071 
3072 	sc->sc_fc  = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3073 	sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3074 
3075 	return (0);
3076 }
3077 
3078 #ifdef OCE_RSS
3079 /**
3080  * @brief Function to set flow control capability in the hardware
3081  * @param sc 		software handle to the device
3082  * @param enable	0=disable, OCE_RSS_xxx flags otherwise
3083  * @returns		0 on success, EIO on failure
3084  */
3085 int
3086 oce_config_rss(struct oce_softc *sc, int enable)
3087 {
3088 	struct mbx_config_nic_rss cmd;
3089 	uint8_t *tbl = &cmd.params.req.cputable;
3090 	int i, j;
3091 
3092 	memset(&cmd, 0, sizeof(cmd));
3093 
3094 	if (enable)
3095 		cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3096 		    RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6);
3097 	cmd.params.req.flush = OCE_FLUSH;
3098 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3099 
3100 	arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3101 
3102 	/*
3103 	 * Initialize the RSS CPU indirection table.
3104 	 *
3105 	 * The table is used to choose the queue to place incoming packets.
3106 	 * Incoming packets are hashed.  The lowest bits in the hash result
3107 	 * are used as the index into the CPU indirection table.
3108 	 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3109 	 * create.  Based on the CPU ID, the receive completion is routed to
3110 	 * the corresponding RSS CQs.  (Non-RSS packets are always completed
3111 	 * on the default (0) CQ).
3112 	 */
3113 	for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3114 		if (sc->sc_rq[j]->cfg.is_rss_queue)
3115 			tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3116 	}
3117 	if (i > 0)
3118 		cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3119 	else
3120 		return (ENXIO);
3121 
3122 	return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3123 	    &cmd, sizeof(cmd)));
3124 }
3125 #endif	/* OCE_RSS */
3126 
3127 /**
3128  * @brief Function for hardware update multicast filter
3129  * @param sc		software handle to the device
3130  * @param multi		table of multicast addresses
3131  * @param naddr		number of multicast addresses in the table
3132  */
3133 int
3134 oce_update_mcast(struct oce_softc *sc,
3135     uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3136 {
3137 	struct mbx_set_common_iface_multicast cmd;
3138 
3139 	memset(&cmd, 0, sizeof(cmd));
3140 
3141 	memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3142 	cmd.params.req.num_mac = htole16(naddr);
3143 	cmd.params.req.if_id = sc->sc_if_id;
3144 
3145 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3146 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3147 }
3148 
3149 /**
3150  * @brief RXF function to enable/disable device promiscuous mode
3151  * @param sc		software handle to the device
3152  * @param enable	enable/disable flag
3153  * @returns		0 on success, EIO on failure
3154  * @note
3155  *	The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3156  *	This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3157  */
3158 int
3159 oce_set_promisc(struct oce_softc *sc, int enable)
3160 {
3161 	struct mbx_set_common_iface_rx_filter cmd;
3162 	struct iface_rx_filter_ctx *req;
3163 
3164 	memset(&cmd, 0, sizeof(cmd));
3165 
3166 	req = &cmd.params.req;
3167 	req->if_id = sc->sc_if_id;
3168 
3169 	if (enable)
3170 		req->iface_flags = req->iface_flags_mask =
3171 		    MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3172 
3173 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3174 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3175 }
3176 
3177 /**
3178  * @brief Function to query the link status from the hardware
3179  * @param sc 		software handle to the device
3180  * @param[out] link	pointer to the structure returning link attributes
3181  * @returns		0 on success, EIO on failure
3182  */
3183 int
3184 oce_get_link_status(struct oce_softc *sc)
3185 {
3186 	struct mbx_query_common_link_config cmd;
3187 	int err;
3188 
3189 	memset(&cmd, 0, sizeof(cmd));
3190 
3191 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3192 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3193 	if (err)
3194 		return (err);
3195 
3196 	sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3197 	    NTWK_LOGICAL_LINK_UP);
3198 
3199 	if (cmd.params.rsp.mac_speed < 5)
3200 		sc->sc_link_speed = cmd.params.rsp.mac_speed;
3201 	else
3202 		sc->sc_link_speed = 0;
3203 
3204 	return (0);
3205 }
3206 
3207 void
3208 oce_macaddr_set(struct oce_softc *sc)
3209 {
3210 	uint32_t old_pmac_id = sc->sc_pmac_id;
3211 	int status = 0;
3212 
3213 	if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3214 		return;
3215 
3216 	status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3217 	if (!status)
3218 		status = oce_macaddr_del(sc, old_pmac_id);
3219 	else
3220 		printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3221 }
3222 
3223 int
3224 oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3225 {
3226 	struct mbx_query_common_iface_mac cmd;
3227 	int err;
3228 
3229 	memset(&cmd, 0, sizeof(cmd));
3230 
3231 	cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3232 	cmd.params.req.permanent = 1;
3233 
3234 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3235 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3236 	if (err == 0)
3237 		memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3238 		    ETHER_ADDR_LEN);
3239 	return (err);
3240 }
3241 
3242 int
3243 oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3244 {
3245 	struct mbx_add_common_iface_mac cmd;
3246 	int err;
3247 
3248 	memset(&cmd, 0, sizeof(cmd));
3249 
3250 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3251 	memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3252 
3253 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3254 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3255 	if (err == 0)
3256 		*pmac = letoh32(cmd.params.rsp.pmac_id);
3257 	return (err);
3258 }
3259 
3260 int
3261 oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3262 {
3263 	struct mbx_del_common_iface_mac cmd;
3264 
3265 	memset(&cmd, 0, sizeof(cmd));
3266 
3267 	cmd.params.req.if_id = htole16(sc->sc_if_id);
3268 	cmd.params.req.pmac_id = htole32(pmac);
3269 
3270 	return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3271 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3272 }
3273 
3274 int
3275 oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3276 {
3277 	struct mbx_create_nic_rq cmd;
3278 	int err, npages;
3279 
3280 	memset(&cmd, 0, sizeof(cmd));
3281 
3282 	npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3283 	    nitems(cmd.params.req.pages));
3284 	if (!npages) {
3285 		printf("%s: failed to load the rq ring\n", __func__);
3286 		return (1);
3287 	}
3288 
3289 	if (IS_XE201(sc)) {
3290 		cmd.params.req.frag_size = rq->fragsize / 2048;
3291 		cmd.params.req.page_size = 1;
3292 	} else
3293 		cmd.params.req.frag_size = ilog2(rq->fragsize);
3294 	cmd.params.req.num_pages = npages;
3295 	cmd.params.req.cq_id = rq->cq->id;
3296 	cmd.params.req.if_id = htole32(sc->sc_if_id);
3297 	cmd.params.req.max_frame_size = htole16(rq->mtu);
3298 	cmd.params.req.is_rss_queue = htole32(rq->rss);
3299 
3300 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3301 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3302 	    sizeof(cmd));
3303 	if (err)
3304 		return (err);
3305 
3306 	rq->id = letoh16(cmd.params.rsp.rq_id);
3307 	rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3308 
3309 	return (0);
3310 }
3311 
3312 int
3313 oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3314 {
3315 	struct mbx_create_nic_wq cmd;
3316 	int err, npages;
3317 
3318 	memset(&cmd, 0, sizeof(cmd));
3319 
3320 	npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3321 	    nitems(cmd.params.req.pages));
3322 	if (!npages) {
3323 		printf("%s: failed to load the wq ring\n", __func__);
3324 		return (1);
3325 	}
3326 
3327 	if (IS_XE201(sc))
3328 		cmd.params.req.if_id = sc->sc_if_id;
3329 	cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3330 	cmd.params.req.num_pages = npages;
3331 	cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3332 	cmd.params.req.cq_id = htole16(wq->cq->id);
3333 	cmd.params.req.ulp_num = 1;
3334 
3335 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3336 	    IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3337 	    sizeof(cmd));
3338 	if (err)
3339 		return (err);
3340 
3341 	wq->id = letoh16(cmd.params.rsp.wq_id);
3342 
3343 	return (0);
3344 }
3345 
3346 int
3347 oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3348 {
3349 	struct mbx_create_common_mq_ex cmd;
3350 	union oce_mq_ext_ctx *ctx;
3351 	int err, npages;
3352 
3353 	memset(&cmd, 0, sizeof(cmd));
3354 
3355 	npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3356 	    nitems(cmd.params.req.pages));
3357 	if (!npages) {
3358 		printf("%s: failed to load the mq ring\n", __func__);
3359 		return (-1);
3360 	}
3361 
3362 	ctx = &cmd.params.req.context;
3363 	ctx->v0.num_pages = npages;
3364 	ctx->v0.cq_id = mq->cq->id;
3365 	ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3366 	ctx->v0.valid = 1;
3367 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3368 	ctx->v0.async_evt_bitmap = 0xffffffff;
3369 
3370 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3371 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3372 	if (err)
3373 		return (err);
3374 
3375 	mq->id = letoh16(cmd.params.rsp.mq_id);
3376 
3377 	return (0);
3378 }
3379 
3380 int
3381 oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3382 {
3383 	struct mbx_create_common_eq cmd;
3384 	int err, npages;
3385 
3386 	memset(&cmd, 0, sizeof(cmd));
3387 
3388 	npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3389 	    nitems(cmd.params.req.pages));
3390 	if (!npages) {
3391 		printf("%s: failed to load the eq ring\n", __func__);
3392 		return (-1);
3393 	}
3394 
3395 	cmd.params.req.ctx.num_pages = htole16(npages);
3396 	cmd.params.req.ctx.valid = 1;
3397 	cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3398 	cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3399 	cmd.params.req.ctx.armed = 0;
3400 	cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3401 
3402 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3403 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3404 	if (err)
3405 		return (err);
3406 
3407 	eq->id = letoh16(cmd.params.rsp.eq_id);
3408 
3409 	return (0);
3410 }
3411 
3412 int
3413 oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3414 {
3415 	struct mbx_create_common_cq cmd;
3416 	union oce_cq_ctx *ctx;
3417 	int err, npages;
3418 
3419 	memset(&cmd, 0, sizeof(cmd));
3420 
3421 	npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3422 	    nitems(cmd.params.req.pages));
3423 	if (!npages) {
3424 		printf("%s: failed to load the cq ring\n", __func__);
3425 		return (-1);
3426 	}
3427 
3428 	ctx = &cmd.params.req.cq_ctx;
3429 
3430 	if (IS_XE201(sc)) {
3431 		ctx->v2.num_pages = htole16(npages);
3432 		ctx->v2.page_size = 1; /* for 4K */
3433 		ctx->v2.eventable = cq->eventable;
3434 		ctx->v2.valid = 1;
3435 		ctx->v2.count = ilog2(cq->nitems / 256);
3436 		ctx->v2.nodelay = cq->nodelay;
3437 		ctx->v2.coalesce_wm = cq->ncoalesce;
3438 		ctx->v2.armed = 0;
3439 		ctx->v2.eq_id = cq->eq->id;
3440 		if (ctx->v2.count == 3) {
3441 			if (cq->nitems > (4*1024)-1)
3442 				ctx->v2.cqe_count = (4*1024)-1;
3443 			else
3444 				ctx->v2.cqe_count = cq->nitems;
3445 		}
3446 	} else {
3447 		ctx->v0.num_pages = htole16(npages);
3448 		ctx->v0.eventable = cq->eventable;
3449 		ctx->v0.valid = 1;
3450 		ctx->v0.count = ilog2(cq->nitems / 256);
3451 		ctx->v0.nodelay = cq->nodelay;
3452 		ctx->v0.coalesce_wm = cq->ncoalesce;
3453 		ctx->v0.armed = 0;
3454 		ctx->v0.eq_id = cq->eq->id;
3455 	}
3456 
3457 	err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3458 	    IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3459 	    sizeof(cmd));
3460 	if (err)
3461 		return (err);
3462 
3463 	cq->id = letoh16(cmd.params.rsp.cq_id);
3464 
3465 	return (0);
3466 }
3467 
3468 static inline int
3469 oce_update_stats(struct oce_softc *sc)
3470 {
3471 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3472 	uint64_t rxe, txe;
3473 	int err;
3474 
3475 	if (ISSET(sc->sc_flags, OCE_F_BE2))
3476 		err = oce_stats_be2(sc, &rxe, &txe);
3477 	else if (ISSET(sc->sc_flags, OCE_F_BE3))
3478 		err = oce_stats_be3(sc, &rxe, &txe);
3479 	else
3480 		err = oce_stats_xe(sc, &rxe, &txe);
3481 	if (err)
3482 		return (err);
3483 
3484 	ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3485 	    rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3486 	sc->sc_rx_errors = rxe;
3487 	ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3488 	    txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3489 	sc->sc_tx_errors = txe;
3490 
3491 	return (0);
3492 }
3493 
3494 int
3495 oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3496 {
3497 	struct mbx_get_nic_stats_v0 cmd;
3498 	struct oce_pmem_stats *ms;
3499 	struct oce_rxf_stats_v0 *rs;
3500 	struct oce_port_rxf_stats_v0 *ps;
3501 	int err;
3502 
3503 	memset(&cmd, 0, sizeof(cmd));
3504 
3505 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3506 	    &cmd, sizeof(cmd));
3507 	if (err)
3508 		return (err);
3509 
3510 	ms = &cmd.params.rsp.stats.pmem;
3511 	rs = &cmd.params.rsp.stats.rxf;
3512 	ps = &rs->port[sc->sc_port];
3513 
3514 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3515 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3516 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3517 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3518 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3519 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3520 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3521 	    ps->rx_alignment_symbol_errors;
3522 	if (sc->sc_if_id)
3523 		*rxe += rs->port1_jabber_events;
3524 	else
3525 		*rxe += rs->port0_jabber_events;
3526 	*rxe += ms->eth_red_drops;
3527 
3528 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3529 
3530 	return (0);
3531 }
3532 
3533 int
3534 oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3535 {
3536 	struct mbx_get_nic_stats cmd;
3537 	struct oce_pmem_stats *ms;
3538 	struct oce_rxf_stats_v1 *rs;
3539 	struct oce_port_rxf_stats_v1 *ps;
3540 	int err;
3541 
3542 	memset(&cmd, 0, sizeof(cmd));
3543 
3544 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3545 	    &cmd, sizeof(cmd));
3546 	if (err)
3547 		return (err);
3548 
3549 	ms = &cmd.params.rsp.stats.pmem;
3550 	rs = &cmd.params.rsp.stats.rxf;
3551 	ps = &rs->port[sc->sc_port];
3552 
3553 	*rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3554 	    ps->rx_frame_too_long + ps->rx_dropped_runt +
3555 	    ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3556 	    ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3557 	    ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3558 	    ps->rx_dropped_too_short + ps->rx_out_range_errors +
3559 	    ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3560 	    ps->rx_alignment_symbol_errors + ps->jabber_events;
3561 	*rxe += ms->eth_red_drops;
3562 
3563 	*txe = 0; /* hardware doesn't provide any extra tx error statistics */
3564 
3565 	return (0);
3566 }
3567 
3568 int
3569 oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3570 {
3571 	struct mbx_get_pport_stats cmd;
3572 	struct oce_pport_stats *pps;
3573 	int err;
3574 
3575 	memset(&cmd, 0, sizeof(cmd));
3576 
3577 	cmd.params.req.reset_stats = 0;
3578 	cmd.params.req.port_number = sc->sc_if_id;
3579 
3580 	err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3581 	    OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3582 	if (err)
3583 		return (err);
3584 
3585 	pps = &cmd.params.rsp.pps;
3586 
3587 	*rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3588 	    pps->rx_alignment_errors + pps->rx_symbol_errors +
3589 	    pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3590 	    pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3591 	    pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3592 	    pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3593 	    pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3594 	    pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3595 	    pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3596 
3597 	*txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3598 
3599 	return (0);
3600 }
3601