xref: /openbsd-src/sys/dev/pci/if_bnxt.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: if_bnxt.c,v 1.28 2020/12/12 11:48:53 jan Exp $	*/
2 /*-
3  * Broadcom NetXtreme-C/E network driver.
4  *
5  * Copyright (c) 2016 Broadcom, All Rights Reserved.
6  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45 
46 
47 #include "bpfilter.h"
48 #include "vlan.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/device.h>
56 #include <sys/stdint.h>
57 #include <sys/sockio.h>
58 #include <sys/atomic.h>
59 
60 #include <machine/bus.h>
61 
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pcidevs.h>
65 
66 #include <dev/pci/if_bnxtreg.h>
67 
68 #include <net/if.h>
69 #include <net/if_media.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #define BNXT_HWRM_BAR		0x10
79 #define BNXT_DOORBELL_BAR	0x18
80 
81 #define BNXT_RX_RING_ID		0
82 #define BNXT_AG_RING_ID		1
83 #define BNXT_TX_RING_ID		3
84 
85 #define BNXT_MAX_QUEUE		8
86 #define BNXT_MAX_MTU		9500
87 #define BNXT_AG_BUFFER_SIZE	8192
88 
89 #define BNXT_CP_PAGES		4
90 
91 #define BNXT_MAX_TX_SEGS	32	/* a bit much? */
92 #define BNXT_TX_SLOTS(bs)	(bs->bs_map->dm_nsegs + 1)
93 
94 #define BNXT_HWRM_SHORT_REQ_LEN	sizeof(struct hwrm_short_input)
95 
96 #define BNXT_HWRM_LOCK_INIT(_sc, _name)	\
97 	mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)
98 #define BNXT_HWRM_LOCK(_sc) 		mtx_enter(&_sc->sc_lock)
99 #define BNXT_HWRM_UNLOCK(_sc) 		mtx_leave(&_sc->sc_lock)
100 #define BNXT_HWRM_LOCK_DESTROY(_sc)	/* nothing */
101 #define BNXT_HWRM_LOCK_ASSERT(_sc)	MUTEX_ASSERT_LOCKED(&_sc->sc_lock)
102 
103 #define BNXT_FLAG_VF            0x0001
104 #define BNXT_FLAG_NPAR          0x0002
105 #define BNXT_FLAG_WOL_CAP       0x0004
106 #define BNXT_FLAG_SHORT_CMD     0x0008
107 #define BNXT_FLAG_MSIX          0x0010
108 
109 /* NVRam stuff has a five minute timeout */
110 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
111 
112 #define NEXT_CP_CONS_V(_ring, _cons, _v_bit)		\
113 do {	 						\
114 	if (++(_cons) == (_ring)->ring_size)		\
115 		((_cons) = 0, (_v_bit) = !_v_bit);	\
116 } while (0);
117 
118 struct bnxt_cos_queue {
119 	uint8_t			id;
120 	uint8_t			profile;
121 };
122 
123 struct bnxt_ring {
124 	uint64_t		paddr;
125 	uint64_t		doorbell;
126 	caddr_t			vaddr;
127 	uint32_t		ring_size;
128 	uint16_t		id;
129 	uint16_t		phys_id;
130 };
131 
132 struct bnxt_cp_ring {
133 	struct bnxt_ring	ring;
134 	void			*irq;
135 	struct bnxt_softc	*softc;
136 	uint32_t		cons;
137 	int			v_bit;
138 	uint32_t		commit_cons;
139 	int			commit_v_bit;
140 	struct ctx_hw_stats	*stats;
141 	uint32_t		stats_ctx_id;
142 };
143 
144 struct bnxt_grp_info {
145 	uint32_t		grp_id;
146 	uint16_t		stats_ctx;
147 	uint16_t		rx_ring_id;
148 	uint16_t		cp_ring_id;
149 	uint16_t		ag_ring_id;
150 };
151 
152 struct bnxt_vnic_info {
153 	uint16_t		id;
154 	uint16_t		def_ring_grp;
155 	uint16_t		cos_rule;
156 	uint16_t		lb_rule;
157 	uint16_t		mru;
158 
159 	uint32_t		flags;
160 #define BNXT_VNIC_FLAG_DEFAULT		0x01
161 #define BNXT_VNIC_FLAG_BD_STALL		0x02
162 #define BNXT_VNIC_FLAG_VLAN_STRIP	0x04
163 
164 	uint64_t		filter_id;
165 	uint32_t		flow_id;
166 
167 	uint16_t		rss_id;
168 	/* rss things */
169 };
170 
171 struct bnxt_slot {
172 	bus_dmamap_t		bs_map;
173 	struct mbuf		*bs_m;
174 };
175 
176 struct bnxt_dmamem {
177 	bus_dmamap_t		bdm_map;
178 	bus_dma_segment_t	bdm_seg;
179 	size_t			bdm_size;
180 	caddr_t			bdm_kva;
181 };
182 #define BNXT_DMA_MAP(_bdm)	((_bdm)->bdm_map)
183 #define BNXT_DMA_LEN(_bdm)	((_bdm)->bdm_size)
184 #define BNXT_DMA_DVA(_bdm)	((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
185 #define BNXT_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
186 
187 struct bnxt_softc {
188 	struct device		sc_dev;
189 	struct arpcom		sc_ac;
190 	struct ifmedia		sc_media;
191 
192 	struct mutex		sc_lock;
193 
194 	pci_chipset_tag_t	sc_pc;
195 	pcitag_t		sc_tag;
196 	bus_dma_tag_t		sc_dmat;
197 
198 	bus_space_tag_t		sc_hwrm_t;
199 	bus_space_handle_t	sc_hwrm_h;
200 	bus_size_t		sc_hwrm_s;
201 
202 	struct bnxt_dmamem	*sc_cmd_resp;
203 	uint16_t		sc_cmd_seq;
204 	uint16_t		sc_max_req_len;
205 	uint32_t		sc_cmd_timeo;
206 	uint32_t		sc_flags;
207 
208 	bus_space_tag_t		sc_db_t;
209 	bus_space_handle_t	sc_db_h;
210 	bus_size_t		sc_db_s;
211 
212 	void			*sc_ih;
213 
214 	int			sc_hwrm_ver;
215 	int			sc_max_tc;
216 	struct bnxt_cos_queue	sc_q_info[BNXT_MAX_QUEUE];
217 
218 	struct bnxt_vnic_info	sc_vnic;
219 	struct bnxt_dmamem	*sc_stats_ctx_mem;
220 
221 	struct bnxt_cp_ring	sc_cp_ring;
222 	struct bnxt_dmamem	*sc_cp_ring_mem;
223 
224 	/* rx */
225 	struct bnxt_dmamem	*sc_rx_ring_mem;	/* rx and ag */
226 	struct bnxt_dmamem	*sc_rx_mcast;
227 	struct bnxt_ring	sc_rx_ring;
228 	struct bnxt_ring	sc_rx_ag_ring;
229 	struct bnxt_grp_info	sc_ring_group;
230 	struct if_rxring	sc_rxr[2];
231 	struct bnxt_slot	*sc_rx_slots;
232 	struct bnxt_slot	*sc_rx_ag_slots;
233 	int			sc_rx_prod;
234 	int			sc_rx_cons;
235 	int			sc_rx_ag_prod;
236 	int			sc_rx_ag_cons;
237 	struct timeout		sc_rx_refill;
238 
239 	/* tx */
240 	struct bnxt_dmamem	*sc_tx_ring_mem;
241 	struct bnxt_ring	sc_tx_ring;
242 	struct bnxt_slot	*sc_tx_slots;
243 	int			sc_tx_prod;
244 	int			sc_tx_cons;
245 	int			sc_tx_ring_prod;
246 	int			sc_tx_ring_cons;
247 };
248 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
249 
250 const struct pci_matchid bnxt_devices[] = {
251 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57301 },
252 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57302 },
253 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57304 },
254 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57311 },
255 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57312 },
256 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57314 },
257 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57402 },
258 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57404 },
259 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57406 },
260 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57407 },
261 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57412 },
262 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57414 },
263 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416 },
264 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416_SFP },
265 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417 },
266 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417_SFP }
267 };
268 
269 int		bnxt_match(struct device *, void *, void *);
270 void		bnxt_attach(struct device *, struct device *, void *);
271 
272 void		bnxt_up(struct bnxt_softc *);
273 void		bnxt_down(struct bnxt_softc *);
274 void		bnxt_iff(struct bnxt_softc *);
275 int		bnxt_ioctl(struct ifnet *, u_long, caddr_t);
276 int		bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
277 void		bnxt_start(struct ifqueue *);
278 int		bnxt_intr(void *);
279 void		bnxt_watchdog(struct ifnet *);
280 void		bnxt_media_status(struct ifnet *, struct ifmediareq *);
281 int		bnxt_media_change(struct ifnet *);
282 int		bnxt_media_autonegotiate(struct bnxt_softc *);
283 
284 struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
285 void		bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
286 void		bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
287 
288 void		bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
289 void		bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
290 		    int);
291 void		bnxt_write_cp_doorbell_index(struct bnxt_softc *,
292 		    struct bnxt_ring *, uint32_t, int);
293 void		bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
294 		    int);
295 void		bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
296 		    int);
297 
298 int		bnxt_rx_fill(struct bnxt_softc *);
299 u_int		bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
300 		    struct bnxt_slot *, uint *, int, uint16_t, u_int);
301 void		bnxt_refill(void *);
302 int		bnxt_rx(struct bnxt_softc *, struct bnxt_cp_ring *,
303 		    struct mbuf_list *, int *, int *, struct cmpl_base *);
304 
305 void		bnxt_txeof(struct bnxt_softc *, int *, struct cmpl_base *);
306 
307 int		_hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
308 int		hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
309 void		bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
310 int 		bnxt_hwrm_err_map(uint16_t err);
311 
312 /* HWRM Function Prototypes */
313 int		bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
314 		    struct bnxt_ring *, uint16_t, uint32_t, int);
315 int		bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
316 		    struct bnxt_ring *);
317 int		bnxt_hwrm_ver_get(struct bnxt_softc *);
318 int		bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
319 int		bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
320 int		bnxt_hwrm_func_qcaps(struct bnxt_softc *);
321 int		bnxt_hwrm_func_qcfg(struct bnxt_softc *);
322 int		bnxt_hwrm_func_reset(struct bnxt_softc *);
323 int		bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
324 int		bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
325 int		bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
326 		    struct bnxt_vnic_info *);
327 int		bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
328 		    struct bnxt_vnic_info *vnic);
329 int		bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
330 		    struct bnxt_cp_ring *, uint64_t);
331 int		bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
332 		    struct bnxt_cp_ring *);
333 int		bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
334 		    struct bnxt_grp_info *);
335 int		bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
336 		    struct bnxt_grp_info *);
337 int		bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
338 		    struct bnxt_vnic_info *);
339 int		bnxt_hwrm_vnic_free(struct bnxt_softc *,
340 		    struct bnxt_vnic_info *);
341 int		bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
342 		    uint32_t, uint32_t, uint64_t, uint32_t);
343 int		bnxt_hwrm_set_filter(struct bnxt_softc *,
344 		    struct bnxt_vnic_info *);
345 int		bnxt_hwrm_free_filter(struct bnxt_softc *,
346 		    struct bnxt_vnic_info *);
347 int		bnxt_cfg_async_cr(struct bnxt_softc *);
348 int		bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
349 		    uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
350 int		bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
351 		    struct ifmediareq *);
352 int		bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
353 int		bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
354 
355 /* not used yet: */
356 #if 0
357 int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
358 
359 int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
360 
361 int bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
362     uint32_t hash_type);
363 
364 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
365 void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
366 int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
367     uint8_t *selfreset);
368 int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
369     uint8_t *selfreset);
370 int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
371     uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
372     uint8_t *second, uint16_t *millisecond, uint16_t *zone);
373 int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
374     uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
375     uint16_t millisecond, uint16_t zone);
376 
377 #endif
378 
379 
380 struct cfattach bnxt_ca = {
381 	sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
382 };
383 
384 struct cfdriver bnxt_cd = {
385 	NULL, "bnxt", DV_IFNET
386 };
387 
388 struct bnxt_dmamem *
389 bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
390 {
391 	struct bnxt_dmamem *m;
392 	int nsegs;
393 
394 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
395 	if (m == NULL)
396 		return (NULL);
397 
398 	m->bdm_size = size;
399 
400 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
401 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map) != 0)
402 		goto bdmfree;
403 
404 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,
405 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
406 		goto destroy;
407 
408 	if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,
409 	    BUS_DMA_NOWAIT) != 0)
410 		goto free;
411 
412 	if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,
413 	    BUS_DMA_NOWAIT) != 0)
414 		goto unmap;
415 
416 	return (m);
417 
418 unmap:
419 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
420 free:
421 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
422 destroy:
423 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
424 bdmfree:
425 	free(m, M_DEVBUF, sizeof *m);
426 
427 	return (NULL);
428 }
429 
430 void
431 bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
432 {
433 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
434 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
435 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
436 	free(m, M_DEVBUF, sizeof *m);
437 }
438 
439 int
440 bnxt_match(struct device *parent, void *match, void *aux)
441 {
442 	return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)));
443 }
444 
445 void
446 bnxt_attach(struct device *parent, struct device *self, void *aux)
447 {
448 	struct bnxt_softc *sc = (struct bnxt_softc *)self;
449 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
450 	struct ifnet *ifp = &sc->sc_ac.ac_if;
451 	struct pci_attach_args *pa = aux;
452 	pci_intr_handle_t ih;
453 	const char *intrstr;
454 	u_int memtype;
455 
456 	/* enable busmaster? */
457 
458 	sc->sc_pc = pa->pa_pc;
459 	sc->sc_tag = pa->pa_tag;
460 	sc->sc_dmat = pa->pa_dmat;
461 
462 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR);
463 	if (pci_mapreg_map(pa, BNXT_HWRM_BAR, memtype, 0, &sc->sc_hwrm_t,
464 	    &sc->sc_hwrm_h, NULL, &sc->sc_hwrm_s, 0)) {
465 		printf(": failed to map hwrm\n");
466 		return;
467 	}
468 
469 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR);
470 	if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR, memtype, 0, &sc->sc_db_t,
471 	    &sc->sc_db_h, NULL, &sc->sc_db_s, 0)) {
472 		printf(": failed to map doorbell\n");
473 		goto unmap_1;
474 	}
475 
476 	BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc));
477 	sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE);
478 	if (sc->sc_cmd_resp == NULL) {
479 		printf(": failed to allocate command response buffer\n");
480 		goto unmap_2;
481 	}
482 
483 	if (bnxt_hwrm_ver_get(sc) != 0) {
484 		printf(": failed to query version info\n");
485 		goto free_resp;
486 	}
487 
488 	if (bnxt_hwrm_nvm_get_dev_info(sc, NULL, NULL, NULL, NULL, NULL, NULL)
489 	    != 0) {
490 		printf(": failed to get nvram info\n");
491 		goto free_resp;
492 	}
493 
494 	if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
495 		printf(": failed to register driver with firmware\n");
496 		goto free_resp;
497 	}
498 
499 	if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
500 		printf(": failed to register async events\n");
501 		goto free_resp;
502 	}
503 
504 	if (bnxt_hwrm_func_qcaps(sc) != 0) {
505 		printf(": failed to get queue capabilities\n");
506 		goto free_resp;
507 	}
508 
509 	/*
510 	 * devices advertise msi support, but there's no way to tell a
511 	 * completion queue to use msi mode, only legacy or msi-x.
512 	 */
513 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
514 		sc->sc_flags |= BNXT_FLAG_MSIX;
515 	} else if (pci_intr_map(pa, &ih) != 0) {
516 		printf(": unable to map interrupt\n");
517 		goto free_resp;
518 	}
519 	intrstr = pci_intr_string(sc->sc_pc, ih);
520 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
521 	    bnxt_intr, sc, DEVNAME(sc));
522 	if (sc->sc_ih == NULL) {
523 		printf(": unable to establish interrupt");
524 		if (intrstr != NULL)
525 			printf(" at %s", intrstr);
526 		printf("\n");
527 		goto deintr;
528 	}
529 	printf("%s, address %s\n", intrstr, ether_sprintf(sc->sc_ac.ac_enaddr));
530 
531 	if (bnxt_hwrm_func_qcfg(sc) != 0) {
532 		printf("%s: failed to query function config\n", DEVNAME(sc));
533 		goto deintr;
534 	}
535 
536 	if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
537 		printf("%s: failed to query port config\n", DEVNAME(sc));
538 		goto deintr;
539 	}
540 
541 	if (bnxt_hwrm_func_reset(sc) != 0) {
542 		printf("%s: reset failed\n", DEVNAME(sc));
543 		goto deintr;
544 	}
545 
546 	sc->sc_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
547 	sc->sc_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
548 	sc->sc_cp_ring.softc = sc;
549 	sc->sc_cp_ring.ring.id = 0;
550 	sc->sc_cp_ring.ring.doorbell = sc->sc_cp_ring.ring.id * 0x80;
551 	sc->sc_cp_ring.ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
552 	    sizeof(struct cmpl_base);
553 	sc->sc_cp_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * BNXT_CP_PAGES);
554 	if (sc->sc_cp_ring_mem == NULL) {
555 		printf("%s: failed to allocate completion queue memory\n",
556 		    DEVNAME(sc));
557 		goto deintr;
558 	}
559 	sc->sc_cp_ring.ring.vaddr = BNXT_DMA_KVA(sc->sc_cp_ring_mem);
560 	sc->sc_cp_ring.ring.paddr = BNXT_DMA_DVA(sc->sc_cp_ring_mem);
561 	sc->sc_cp_ring.cons = UINT32_MAX;
562 	sc->sc_cp_ring.v_bit = 1;
563 	bnxt_mark_cpr_invalid(&sc->sc_cp_ring);
564 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
565 	    &sc->sc_cp_ring.ring, (uint16_t)HWRM_NA_SIGNATURE,
566 	    HWRM_NA_SIGNATURE, 1) != 0) {
567 		printf("%s: failed to allocate completion queue\n",
568 		    DEVNAME(sc));
569 		goto free_cp_mem;
570 	}
571 	if (bnxt_cfg_async_cr(sc) != 0) {
572 		printf("%s: failed to set async completion ring\n",
573 		    DEVNAME(sc));
574 		goto free_cp_mem;
575 	}
576 	bnxt_write_cp_doorbell(sc, &sc->sc_cp_ring.ring, 1);
577 
578 	/*
579 	 * set interrupt aggregation parameters for around 10k interrupts
580 	 * per second.  the timers are in units of 80usec, and the counters
581 	 * are based on the minimum rx ring size of 32.
582 	 */
583 	memset(&aggint, 0, sizeof(aggint));
584         bnxt_hwrm_cmd_hdr_init(sc, &aggint,
585 	    HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
586 	aggint.ring_id = htole16(sc->sc_cp_ring.ring.phys_id);
587 	aggint.num_cmpl_dma_aggr = htole16(32);
588 	aggint.num_cmpl_dma_aggr_during_int  = aggint.num_cmpl_dma_aggr;
589 	aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80);
590 	aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
591 	aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80);
592 	aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80);
593 	aggint.num_cmpl_aggr_int = htole16(16);
594 	if (hwrm_send_message(sc, &aggint, sizeof(aggint)))
595 		goto free_cp_mem;
596 
597 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
598 	ifp->if_softc = sc;
599 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
600 	ifp->if_xflags = IFXF_MPSAFE;
601 	ifp->if_ioctl = bnxt_ioctl;
602 	ifp->if_qstart = bnxt_start;
603 	ifp->if_watchdog = bnxt_watchdog;
604 	ifp->if_hardmtu = BNXT_MAX_MTU;
605 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
606 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv6 |
607 	    IFCAP_CSUM_TCPv6;
608 #if NVLAN > 0
609 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
610 #endif
611 	ifq_set_maxlen(&ifp->if_snd, 1024);	/* ? */
612 
613 	ifmedia_init(&sc->sc_media, IFM_IMASK, bnxt_media_change,
614 	    bnxt_media_status);
615 
616 	if_attach(ifp);
617 	ether_ifattach(ifp);
618 
619 	timeout_set(&sc->sc_rx_refill, bnxt_refill, sc);
620 
621 	bnxt_media_autonegotiate(sc);
622 	bnxt_hwrm_port_phy_qcfg(sc, NULL);
623 	return;
624 
625 free_cp_mem:
626 	bnxt_dmamem_free(sc, sc->sc_cp_ring_mem);
627 deintr:
628 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
629 	sc->sc_ih = NULL;
630 free_resp:
631 	bnxt_dmamem_free(sc, sc->sc_cmd_resp);
632 unmap_2:
633 	bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
634 	sc->sc_hwrm_s = 0;
635 unmap_1:
636 	bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
637 	sc->sc_db_s = 0;
638 }
639 
640 void
641 bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
642     int total)
643 {
644 	struct bnxt_slot *bs;
645 
646 	int i = allocated;
647 	while (i-- > 0) {
648 		bs = &slots[i];
649 		bus_dmamap_destroy(sc->sc_dmat, bs->bs_map);
650 	}
651 	free(slots, M_DEVBUF, total * sizeof(*bs));
652 }
653 
654 void
655 bnxt_up(struct bnxt_softc *sc)
656 {
657 	struct ifnet *ifp = &sc->sc_ac.ac_if;
658 	struct bnxt_slot *bs;
659 	int i;
660 
661 	sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
662 	    sizeof(struct ctx_hw_stats));
663 	if (sc->sc_stats_ctx_mem == NULL) {
664 		printf("%s: failed to allocate stats contexts\n", DEVNAME(sc));
665 		return;
666 	}
667 
668 	sc->sc_tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE);
669 	if (sc->sc_tx_ring_mem == NULL) {
670 		printf("%s: failed to allocate tx ring\n", DEVNAME(sc));
671 		goto free_stats;
672 	}
673 
674 	sc->sc_rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
675 	if (sc->sc_rx_ring_mem == NULL) {
676 		printf("%s: failed to allocate rx ring\n", DEVNAME(sc));
677 		goto free_tx;
678 	}
679 
680 	sc->sc_rx_mcast = bnxt_dmamem_alloc(sc, PAGE_SIZE);
681 	if (sc->sc_rx_mcast == NULL) {
682 		printf("%s: failed to allocate multicast address table\n",
683 		    DEVNAME(sc));
684 		goto free_rx;
685 	}
686 
687 	if (bnxt_hwrm_stat_ctx_alloc(sc, &sc->sc_cp_ring,
688 	    BNXT_DMA_DVA(sc->sc_stats_ctx_mem)) != 0) {
689 		printf("%s: failed to set up stats context\n", DEVNAME(sc));
690 		goto free_mc;
691 	}
692 
693 	sc->sc_tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
694 	sc->sc_tx_ring.id = BNXT_TX_RING_ID;
695 	sc->sc_tx_ring.doorbell = sc->sc_tx_ring.id * 0x80;
696 	sc->sc_tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
697 	sc->sc_tx_ring.vaddr = BNXT_DMA_KVA(sc->sc_tx_ring_mem);
698 	sc->sc_tx_ring.paddr = BNXT_DMA_DVA(sc->sc_tx_ring_mem);
699 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
700 	    &sc->sc_tx_ring, sc->sc_cp_ring.ring.phys_id,
701 	    HWRM_NA_SIGNATURE, 1) != 0) {
702 		printf("%s: failed to set up tx ring\n",
703 		    DEVNAME(sc));
704 		goto dealloc_stats;
705 	}
706 	bnxt_write_tx_doorbell(sc, &sc->sc_tx_ring, 0);
707 
708 	sc->sc_rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
709 	sc->sc_rx_ring.id = BNXT_RX_RING_ID;
710 	sc->sc_rx_ring.doorbell = sc->sc_rx_ring.id * 0x80;
711 	sc->sc_rx_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
712 	sc->sc_rx_ring.vaddr = BNXT_DMA_KVA(sc->sc_rx_ring_mem);
713 	sc->sc_rx_ring.paddr = BNXT_DMA_DVA(sc->sc_rx_ring_mem);
714 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
715 	    &sc->sc_rx_ring, sc->sc_cp_ring.ring.phys_id,
716 	    HWRM_NA_SIGNATURE, 1) != 0) {
717 		printf("%s: failed to set up rx ring\n",
718 		    DEVNAME(sc));
719 		goto dealloc_tx;
720 	}
721 	bnxt_write_rx_doorbell(sc, &sc->sc_rx_ring, 0);
722 
723 	sc->sc_rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
724 	sc->sc_rx_ag_ring.id = BNXT_AG_RING_ID;
725 	sc->sc_rx_ag_ring.doorbell = sc->sc_rx_ag_ring.id * 0x80;
726 	sc->sc_rx_ag_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
727 	sc->sc_rx_ag_ring.vaddr = BNXT_DMA_KVA(sc->sc_rx_ring_mem) + PAGE_SIZE;
728 	sc->sc_rx_ag_ring.paddr = BNXT_DMA_DVA(sc->sc_rx_ring_mem) + PAGE_SIZE;
729 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
730 	    &sc->sc_rx_ag_ring, sc->sc_cp_ring.ring.phys_id,
731 	    HWRM_NA_SIGNATURE, 1) != 0) {
732 		printf("%s: failed to set up rx ag ring\n",
733 		    DEVNAME(sc));
734 		goto dealloc_rx;
735 	}
736 	bnxt_write_rx_doorbell(sc, &sc->sc_rx_ag_ring, 0);
737 
738 	sc->sc_ring_group.grp_id = HWRM_NA_SIGNATURE;
739 	sc->sc_ring_group.stats_ctx = sc->sc_cp_ring.stats_ctx_id;
740 	sc->sc_ring_group.rx_ring_id = sc->sc_rx_ring.phys_id;
741 	sc->sc_ring_group.ag_ring_id = sc->sc_rx_ag_ring.phys_id;
742 	sc->sc_ring_group.cp_ring_id = sc->sc_cp_ring.ring.phys_id;
743 	if (bnxt_hwrm_ring_grp_alloc(sc, &sc->sc_ring_group) != 0) {
744 		printf("%s: failed to allocate ring group\n",
745 		    DEVNAME(sc));
746 		goto dealloc_ag;
747 	}
748 
749 	sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
750 	if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
751 		printf("%s: failed to allocate vnic rss context\n",
752 		    DEVNAME(sc));
753 		goto dealloc_ring_group;
754 	}
755 
756 	sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE;
757 	sc->sc_vnic.def_ring_grp = sc->sc_ring_group.grp_id;
758 	sc->sc_vnic.mru = BNXT_MAX_MTU;
759 	sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
760 	sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
761 	sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT |
762 	    BNXT_VNIC_FLAG_VLAN_STRIP;
763 	if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
764 		printf("%s: failed to allocate vnic\n", DEVNAME(sc));
765 		goto dealloc_vnic_ctx;
766 	}
767 
768 	if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
769 		printf("%s: failed to configure vnic\n", DEVNAME(sc));
770 		goto dealloc_vnic;
771 	}
772 
773 	if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
774 		printf("%s: failed to configure vnic placement mode\n",
775 		    DEVNAME(sc));
776 		goto dealloc_vnic;
777 	}
778 
779 	sc->sc_vnic.filter_id = -1;
780 	if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
781 		printf("%s: failed to set vnic filter\n", DEVNAME(sc));
782 		goto dealloc_vnic;
783 	}
784 
785 	/* don't configure rss or tpa yet */
786 
787 	sc->sc_rx_slots = mallocarray(sizeof(*bs), sc->sc_rx_ring.ring_size,
788 	    M_DEVBUF, M_WAITOK | M_ZERO);
789 	if (sc->sc_rx_slots == NULL) {
790 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
791 		goto dealloc_filter;
792 	}
793 
794 	for (i = 0; i < sc->sc_rx_ring.ring_size; i++) {
795 		bs = &sc->sc_rx_slots[i];
796 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
797 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map) != 0) {
798 			printf("%s: failed to allocate rx dma maps\n",
799 			    DEVNAME(sc));
800 			goto destroy_rx_slots;
801 		}
802 	}
803 
804 	sc->sc_rx_ag_slots = mallocarray(sizeof(*bs), sc->sc_rx_ag_ring.ring_size,
805 	    M_DEVBUF, M_WAITOK | M_ZERO);
806 	if (sc->sc_rx_ag_slots == NULL) {
807 		printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc));
808 		goto destroy_rx_slots;
809 	}
810 
811 	for (i = 0; i < sc->sc_rx_ag_ring.ring_size; i++) {
812 		bs = &sc->sc_rx_ag_slots[i];
813 		if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,
814 		    BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
815 		    &bs->bs_map) != 0) {
816 			printf("%s: failed to allocate rx ag dma maps\n",
817 			    DEVNAME(sc));
818 			goto destroy_rx_ag_slots;
819 		}
820 	}
821 
822 	sc->sc_tx_slots = mallocarray(sizeof(*bs), sc->sc_tx_ring.ring_size,
823 	    M_DEVBUF, M_WAITOK | M_ZERO);
824 	if (sc->sc_tx_slots == NULL) {
825 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
826 		goto destroy_rx_ag_slots;
827 	}
828 
829 	for (i = 0; i < sc->sc_tx_ring.ring_size; i++) {
830 		bs = &sc->sc_tx_slots[i];
831 		if (bus_dmamap_create(sc->sc_dmat, BNXT_MAX_MTU, BNXT_MAX_TX_SEGS,
832 		    BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
833 		    &bs->bs_map) != 0) {
834 			printf("%s: failed to allocate tx dma maps\n",
835 			    DEVNAME(sc));
836 			goto destroy_tx_slots;
837 		}
838 	}
839 
840 	bnxt_iff(sc);
841 
842 	/*
843 	 * initially, the rx ring must be filled at least some distance beyond
844 	 * the current consumer index, as it looks like the firmware assumes the
845 	 * ring is full on creation, but doesn't prefetch the whole thing.
846 	 * once the whole ring has been used once, we should be able to back off
847 	 * to 2 or so slots, but we currently don't have a way of doing that.
848 	 */
849 	if_rxr_init(&sc->sc_rxr[0], 32, sc->sc_rx_ring.ring_size - 1);
850 	if_rxr_init(&sc->sc_rxr[1], 32, sc->sc_rx_ag_ring.ring_size - 1);
851 	sc->sc_rx_prod = 0;
852 	sc->sc_rx_cons = 0;
853 	sc->sc_rx_ag_prod = 0;
854 	sc->sc_rx_ag_cons = 0;
855 	bnxt_rx_fill(sc);
856 
857 	SET(ifp->if_flags, IFF_RUNNING);
858 
859 	sc->sc_tx_cons = 0;
860 	sc->sc_tx_prod = 0;
861 	sc->sc_tx_ring_cons = 0;
862 	sc->sc_tx_ring_prod = 0;
863 	ifq_clr_oactive(&ifp->if_snd);
864 	ifq_restart(&ifp->if_snd);
865 
866 	return;
867 
868 destroy_tx_slots:
869 	bnxt_free_slots(sc, sc->sc_tx_slots, i, sc->sc_tx_ring.ring_size);
870 	sc->sc_tx_slots = NULL;
871 
872 	i = sc->sc_rx_ag_ring.ring_size;
873 destroy_rx_ag_slots:
874 	bnxt_free_slots(sc, sc->sc_rx_ag_slots, i, sc->sc_rx_ag_ring.ring_size);
875 	sc->sc_rx_ag_slots = NULL;
876 
877 	i = sc->sc_rx_ring.ring_size;
878 destroy_rx_slots:
879 	bnxt_free_slots(sc, sc->sc_rx_slots, i, sc->sc_rx_ring.ring_size);
880 	sc->sc_rx_slots = NULL;
881 dealloc_filter:
882 	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
883 dealloc_vnic:
884 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
885 dealloc_vnic_ctx:
886 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
887 dealloc_ring_group:
888 	bnxt_hwrm_ring_grp_free(sc, &sc->sc_ring_group);
889 dealloc_ag:
890 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
891 	    &sc->sc_rx_ag_ring);
892 dealloc_tx:
893 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
894 	    &sc->sc_tx_ring);
895 dealloc_rx:
896 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
897 	    &sc->sc_rx_ring);
898 dealloc_stats:
899 	bnxt_hwrm_stat_ctx_free(sc, &sc->sc_cp_ring);
900 free_mc:
901 	bnxt_dmamem_free(sc, sc->sc_rx_mcast);
902 	sc->sc_rx_mcast = NULL;
903 free_rx:
904 	bnxt_dmamem_free(sc, sc->sc_rx_ring_mem);
905 	sc->sc_rx_ring_mem = NULL;
906 free_tx:
907 	bnxt_dmamem_free(sc, sc->sc_tx_ring_mem);
908 	sc->sc_tx_ring_mem = NULL;
909 free_stats:
910 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
911 	sc->sc_stats_ctx_mem = NULL;
912 }
913 
914 void
915 bnxt_down(struct bnxt_softc *sc)
916 {
917 	struct ifnet *ifp = &sc->sc_ac.ac_if;
918 
919 	CLR(ifp->if_flags, IFF_RUNNING);
920 
921 	intr_barrier(sc->sc_ih);
922 	ifq_barrier(&ifp->if_snd);
923 
924 	timeout_del(&sc->sc_rx_refill);
925 
926 	/* empty rx ring first i guess */
927 
928 	bnxt_free_slots(sc, sc->sc_tx_slots, sc->sc_tx_ring.ring_size,
929 	    sc->sc_tx_ring.ring_size);
930 	sc->sc_tx_slots = NULL;
931 
932 	bnxt_free_slots(sc, sc->sc_rx_ag_slots, sc->sc_rx_ag_ring.ring_size,
933 	    sc->sc_rx_ag_ring.ring_size);
934 	sc->sc_rx_ag_slots = NULL;
935 
936 	bnxt_free_slots(sc, sc->sc_rx_slots, sc->sc_rx_ring.ring_size,
937 	    sc->sc_rx_ring.ring_size);
938 	sc->sc_rx_slots = NULL;
939 
940 	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
941 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
942 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
943 	bnxt_hwrm_ring_grp_free(sc, &sc->sc_ring_group);
944 	bnxt_hwrm_stat_ctx_free(sc, &sc->sc_cp_ring);
945 
946 	/* may need to wait for 500ms here before we can free the rings */
947 
948 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
949 	    &sc->sc_tx_ring);
950 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
951 	    &sc->sc_rx_ag_ring);
952 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
953 	    &sc->sc_rx_ring);
954 
955 	bnxt_dmamem_free(sc, sc->sc_rx_mcast);
956 	sc->sc_rx_mcast = NULL;
957 
958 	bnxt_dmamem_free(sc, sc->sc_rx_ring_mem);
959 	sc->sc_rx_ring_mem = NULL;
960 
961 	bnxt_dmamem_free(sc, sc->sc_tx_ring_mem);
962 	sc->sc_tx_ring_mem = NULL;
963 
964 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
965 	sc->sc_stats_ctx_mem = NULL;
966 }
967 
968 void
969 bnxt_iff(struct bnxt_softc *sc)
970 {
971 	struct ifnet *ifp = &sc->sc_ac.ac_if;
972 	struct ether_multi *enm;
973 	struct ether_multistep step;
974 	char *mc_list;
975 	uint32_t rx_mask, mc_count;
976 
977 	rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
978 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
979 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
980 
981 	mc_list = BNXT_DMA_KVA(sc->sc_rx_mcast);
982 	mc_count = 0;
983 
984 	if (ifp->if_flags & IFF_PROMISC) {
985 		SET(ifp->if_flags, IFF_ALLMULTI);
986 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
987 	} else if ((sc->sc_ac.ac_multirangecnt > 0) ||
988 	    (sc->sc_ac.ac_multicnt > (PAGE_SIZE / ETHER_ADDR_LEN))) {
989 		SET(ifp->if_flags, IFF_ALLMULTI);
990 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
991 	} else {
992 		CLR(ifp->if_flags, IFF_ALLMULTI);
993 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
994 		while (enm != NULL) {
995 			memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN);
996 			mc_list += ETHER_ADDR_LEN;
997 			mc_count++;
998 
999 			ETHER_NEXT_MULTI(step, enm);
1000 		}
1001 	}
1002 
1003 	bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
1004 	    BNXT_DMA_DVA(sc->sc_rx_mcast), mc_count);
1005 }
1006 
1007 int
1008 bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1009 {
1010 	struct bnxt_softc 	*sc = (struct bnxt_softc *)ifp->if_softc;
1011 	struct ifreq		*ifr = (struct ifreq *)data;
1012 	int			s, error = 0;
1013 
1014 	s = splnet();
1015 	switch (cmd) {
1016 	case SIOCSIFADDR:
1017 		ifp->if_flags |= IFF_UP;
1018 		/* FALLTHROUGH */
1019 
1020 	case SIOCSIFFLAGS:
1021 		if (ISSET(ifp->if_flags, IFF_UP)) {
1022 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1023 				error = ENETRESET;
1024 			else
1025 				bnxt_up(sc);
1026 		} else {
1027 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1028 				bnxt_down(sc);
1029 		}
1030 		break;
1031 
1032 	case SIOCGIFMEDIA:
1033 	case SIOCSIFMEDIA:
1034 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1035 		break;
1036 
1037 	case SIOCGIFRXR:
1038 		error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1039 		break;
1040 
1041 	case SIOCGIFSFFPAGE:
1042 		error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1043 		break;
1044 
1045 	default:
1046 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1047 	}
1048 
1049 	if (error == ENETRESET) {
1050 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1051 		    (IFF_UP | IFF_RUNNING))
1052 			bnxt_iff(sc);
1053 		error = 0;
1054 	}
1055 
1056 	splx(s);
1057 
1058 	return (error);
1059 }
1060 
1061 int
1062 bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1063 {
1064 	struct if_rxring_info ifr[2];
1065 
1066 	memset(&ifr, 0, sizeof(ifr));
1067 	ifr[0].ifr_size = MCLBYTES;
1068 	ifr[0].ifr_info = sc->sc_rxr[0];
1069 
1070 	ifr[1].ifr_size = BNXT_AG_BUFFER_SIZE;
1071 	ifr[1].ifr_info = sc->sc_rxr[1];
1072 
1073 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1074 }
1075 
1076 int
1077 bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1078 {
1079 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1080 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1081 	case 0:
1082 		break;
1083 
1084 	case EFBIG:
1085 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1086 		    bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1087 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1088 			break;
1089 
1090 	default:
1091 		return (1);
1092 	}
1093 
1094 	bs->bs_m = m;
1095 	return (0);
1096 }
1097 
1098 void
1099 bnxt_start(struct ifqueue *ifq)
1100 {
1101 	struct ifnet *ifp = ifq->ifq_if;
1102 	struct tx_bd_short *txring;
1103 	struct tx_bd_long_hi *txhi;
1104 	struct bnxt_softc *sc = ifp->if_softc;
1105 	struct bnxt_slot *bs;
1106 	bus_dmamap_t map;
1107 	struct mbuf *m;
1108 	u_int idx, free, used, laststart;
1109 	uint16_t txflags;
1110 	int i;
1111 
1112 	txring = (struct tx_bd_short *)BNXT_DMA_KVA(sc->sc_tx_ring_mem);
1113 
1114 	idx = sc->sc_tx_ring_prod;
1115 	free = sc->sc_tx_ring_cons;
1116 	if (free <= idx)
1117 		free += sc->sc_tx_ring.ring_size;
1118 	free -= idx;
1119 
1120 	used = 0;
1121 
1122 	for (;;) {
1123 		/* +1 for tx_bd_long_hi */
1124 		if (used + BNXT_MAX_TX_SEGS + 1 > free) {
1125 			ifq_set_oactive(ifq);
1126 			break;
1127 		}
1128 
1129 		m = ifq_dequeue(ifq);
1130 		if (m == NULL)
1131 			break;
1132 
1133 		bs = &sc->sc_tx_slots[sc->sc_tx_prod];
1134 		if (bnxt_load_mbuf(sc, bs, m) != 0) {
1135 			m_freem(m);
1136 			ifp->if_oerrors++;
1137 			continue;
1138 		}
1139 
1140 #if NBPFILTER > 0
1141 		if (ifp->if_bpf)
1142 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1143 #endif
1144 		map = bs->bs_map;
1145 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1146 		    BUS_DMASYNC_PREWRITE);
1147 		used += BNXT_TX_SLOTS(bs);
1148 
1149 		/* first segment */
1150 		laststart = idx;
1151 		txring[idx].len = htole16(map->dm_segs[0].ds_len);
1152 		txring[idx].opaque = sc->sc_tx_prod;
1153 		txring[idx].addr = htole64(map->dm_segs[0].ds_addr);
1154 
1155 		if (map->dm_mapsize < 512)
1156 			txflags = TX_BD_LONG_FLAGS_LHINT_LT512;
1157 		else if (map->dm_mapsize < 1024)
1158 			txflags = TX_BD_LONG_FLAGS_LHINT_LT1K;
1159 		else if (map->dm_mapsize < 2048)
1160 			txflags = TX_BD_LONG_FLAGS_LHINT_LT2K;
1161 		else
1162 			txflags = TX_BD_LONG_FLAGS_LHINT_GTE2K;
1163 		txflags |= TX_BD_LONG_TYPE_TX_BD_LONG |
1164 		    TX_BD_LONG_FLAGS_NO_CMPL |
1165 		    (BNXT_TX_SLOTS(bs) << TX_BD_LONG_FLAGS_BD_CNT_SFT);
1166 		if (map->dm_nsegs == 1)
1167 			txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1168 		txring[idx].flags_type = htole16(txflags);
1169 
1170 		idx++;
1171 		if (idx == sc->sc_tx_ring.ring_size)
1172 			idx = 0;
1173 
1174 		/* long tx descriptor */
1175 		txhi = (struct tx_bd_long_hi *)&txring[idx];
1176 		memset(txhi, 0, sizeof(*txhi));
1177 		txflags = 0;
1178 		if (m->m_pkthdr.csum_flags & (M_UDP_CSUM_OUT | M_TCP_CSUM_OUT))
1179 			txflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
1180 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1181 			txflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
1182 		txhi->lflags = htole16(txflags);
1183 
1184 #if NVLAN > 0
1185 		if (m->m_flags & M_VLANTAG) {
1186 			txhi->cfa_meta = htole32(m->m_pkthdr.ether_vtag |
1187 			    TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 |
1188 			    TX_BD_LONG_CFA_META_KEY_VLAN_TAG);
1189 		}
1190 #endif
1191 
1192 		idx++;
1193 		if (idx == sc->sc_tx_ring.ring_size)
1194 			idx = 0;
1195 
1196 		/* remaining segments */
1197 		txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT;
1198 		for (i = 1; i < map->dm_nsegs; i++) {
1199 			if (i == map->dm_nsegs - 1)
1200 				txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1201 			txring[idx].flags_type = htole16(txflags);
1202 
1203 			txring[idx].len =
1204 			    htole16(bs->bs_map->dm_segs[i].ds_len);
1205 			txring[idx].opaque = sc->sc_tx_prod;
1206 			txring[idx].addr =
1207 			    htole64(bs->bs_map->dm_segs[i].ds_addr);
1208 
1209 			idx++;
1210 			if (idx == sc->sc_tx_ring.ring_size)
1211 				idx = 0;
1212 		}
1213 
1214 		if (++sc->sc_tx_prod >= sc->sc_tx_ring.ring_size)
1215 			sc->sc_tx_prod = 0;
1216 	}
1217 
1218 	/* unset NO_CMPL on the first bd of the last packet */
1219 	if (used != 0) {
1220 		txring[laststart].flags_type &=
1221 		    ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL);
1222 	}
1223 
1224 	bnxt_write_tx_doorbell(sc, &sc->sc_tx_ring, idx);
1225 	sc->sc_tx_ring_prod = idx;
1226 }
1227 
1228 void
1229 bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1230 {
1231 	struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1232 	uint16_t type = le16toh(ae->event_id);
1233 
1234 	switch (type) {
1235 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1236 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1237 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1238 		bnxt_hwrm_port_phy_qcfg(sc, NULL);
1239 		break;
1240 
1241 	default:
1242 		printf("%s: unexpected async event %x\n", DEVNAME(sc), type);
1243 		break;
1244 	}
1245 }
1246 
1247 struct cmpl_base *
1248 bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1249 {
1250 	struct cmpl_base *cmpl;
1251 	uint32_t cons;
1252 	int v_bit;
1253 
1254 	cons = cpr->cons + 1;
1255 	v_bit = cpr->v_bit;
1256 	if (cons == cpr->ring.ring_size) {
1257 		cons = 0;
1258 		v_bit = !v_bit;
1259 	}
1260 	cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1261 
1262 	if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V))) != (!!v_bit))
1263 		return (NULL);
1264 
1265 	cpr->cons = cons;
1266 	cpr->v_bit = v_bit;
1267 	return (cmpl);
1268 }
1269 
1270 void
1271 bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1272 {
1273 	cpr->commit_cons = cpr->cons;
1274 	cpr->commit_v_bit = cpr->v_bit;
1275 }
1276 
1277 void
1278 bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1279 {
1280 	cpr->cons = cpr->commit_cons;
1281 	cpr->v_bit = cpr->commit_v_bit;
1282 }
1283 
1284 
1285 int
1286 bnxt_intr(void *xsc)
1287 {
1288 	struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1289 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1290 	struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1291 	struct cmpl_base *cmpl;
1292 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1293 	uint16_t type;
1294 	int rxfree, txfree, agfree, rv, rollback;
1295 
1296 	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1297 	rxfree = 0;
1298 	txfree = 0;
1299 	agfree = 0;
1300 	rv = -1;
1301 	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1302 	while (cmpl != NULL) {
1303 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1304 		rollback = 0;
1305 		switch (type) {
1306 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1307 			bnxt_handle_async_event(sc, cmpl);
1308 			break;
1309 		case CMPL_BASE_TYPE_RX_L2:
1310 			rollback = bnxt_rx(sc, cpr, &ml, &rxfree, &agfree, cmpl);
1311 			break;
1312 		case CMPL_BASE_TYPE_TX_L2:
1313 			bnxt_txeof(sc, &txfree, cmpl);
1314 			break;
1315 		default:
1316 			printf("%s: unexpected completion type %u\n",
1317 			    DEVNAME(sc), type);
1318 		}
1319 
1320 		if (rollback) {
1321 			bnxt_cpr_rollback(sc, cpr);
1322 			break;
1323 		}
1324 		rv = 1;
1325 		bnxt_cpr_commit(sc, cpr);
1326 		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1327 	}
1328 
1329 	/*
1330 	 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1331 	 * but writing cpr->cons + 1 makes it stop interrupting.
1332 	 */
1333 	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1334 	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1335 
1336 	if (rxfree != 0) {
1337 		sc->sc_rx_cons += rxfree;
1338 		if (sc->sc_rx_cons >= sc->sc_rx_ring.ring_size)
1339 			sc->sc_rx_cons -= sc->sc_rx_ring.ring_size;
1340 
1341 		sc->sc_rx_ag_cons += agfree;
1342 		if (sc->sc_rx_ag_cons >= sc->sc_rx_ag_ring.ring_size)
1343 			sc->sc_rx_ag_cons -= sc->sc_rx_ag_ring.ring_size;
1344 
1345 		if_rxr_put(&sc->sc_rxr[0], rxfree);
1346 		if_rxr_put(&sc->sc_rxr[1], agfree);
1347 
1348 		if (ifiq_input(&sc->sc_ac.ac_if.if_rcv, &ml)) {
1349 			if_rxr_livelocked(&sc->sc_rxr[0]);
1350 			if_rxr_livelocked(&sc->sc_rxr[1]);
1351 		}
1352 
1353 		bnxt_rx_fill(sc);
1354 		if ((sc->sc_rx_cons == sc->sc_rx_prod) ||
1355 		    (sc->sc_rx_ag_cons == sc->sc_rx_ag_prod))
1356 			timeout_add(&sc->sc_rx_refill, 0);
1357 	}
1358 	if (txfree != 0) {
1359 		if (ifq_is_oactive(&ifp->if_snd))
1360 			ifq_restart(&ifp->if_snd);
1361 	}
1362 	return (rv);
1363 }
1364 
1365 void
1366 bnxt_watchdog(struct ifnet *ifp)
1367 {
1368 }
1369 
1370 void
1371 bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1372 {
1373 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1374 	bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1375 }
1376 
1377 uint64_t
1378 bnxt_get_media_type(uint64_t speed, int phy_type)
1379 {
1380 	switch (phy_type) {
1381 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
1382 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
1383 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
1384 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
1385 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
1386 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
1387 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
1388 		switch (speed) {
1389 		case IF_Gbps(1):
1390 			return IFM_1000_T;
1391 		case IF_Gbps(10):
1392 			return IFM_10G_SFP_CU;
1393 		case IF_Gbps(25):
1394 			return IFM_25G_CR;
1395 		case IF_Gbps(40):
1396 			return IFM_40G_CR4;
1397 		case IF_Gbps(50):
1398 			return IFM_50G_CR2;
1399 		case IF_Gbps(100):
1400 			return IFM_100G_CR4;
1401 		}
1402 		break;
1403 
1404 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
1405 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
1406 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
1407 		switch (speed) {
1408 		case IF_Gbps(1):
1409 			return IFM_1000_LX;
1410 		case IF_Gbps(10):
1411 			return IFM_10G_LR;
1412 		case IF_Gbps(25):
1413 			return IFM_25G_LR;
1414 		case IF_Gbps(40):
1415 			return IFM_40G_LR4;
1416 		case IF_Gbps(100):
1417 			return IFM_100G_LR4;
1418 		}
1419 		break;
1420 
1421 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
1422 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
1423 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
1424 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
1425 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
1426 		switch (speed) {
1427 		case IF_Gbps(1):
1428 			return IFM_1000_SX;
1429 		case IF_Gbps(10):
1430 			return IFM_10G_SR;
1431 		case IF_Gbps(25):
1432 			return IFM_25G_SR;
1433 		case IF_Gbps(40):
1434 			return IFM_40G_SR4;
1435 		case IF_Gbps(100):
1436 			return IFM_100G_SR4;
1437 		}
1438 		break;
1439 
1440 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
1441 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
1442 		switch (speed) {
1443 		case IF_Gbps(10):
1444 			return IFM_10G_ER;
1445 		case IF_Gbps(25):
1446 			return IFM_25G_ER;
1447 		}
1448 		/* missing IFM_40G_ER4, IFM_100G_ER4 */
1449 		break;
1450 
1451 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
1452 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
1453 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
1454 		switch (speed) {
1455 		case IF_Gbps(10):
1456 			return IFM_10G_KR;
1457 		case IF_Gbps(20):
1458 			return IFM_20G_KR2;
1459 		case IF_Gbps(25):
1460 			return IFM_25G_KR;
1461 		case IF_Gbps(40):
1462 			return IFM_40G_KR4;
1463 		case IF_Gbps(50):
1464 			return IFM_50G_KR2;
1465 		case IF_Gbps(100):
1466 			return IFM_100G_KR4;
1467 		}
1468 		break;
1469 
1470 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
1471 		switch (speed) {
1472 		case IF_Gbps(1):
1473 			return IFM_1000_KX;
1474 		case IF_Mbps(2500):
1475 			return IFM_2500_KX;
1476 		case IF_Gbps(10):
1477 			return IFM_10G_KX4;
1478 		}
1479 		break;
1480 
1481 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
1482 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
1483 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
1484 		switch (speed) {
1485 		case IF_Mbps(10):
1486 			return IFM_10_T;
1487 		case IF_Mbps(100):
1488 			return IFM_100_TX;
1489 		case IF_Gbps(1):
1490 			return IFM_1000_T;
1491 		case IF_Mbps(2500):
1492 			return IFM_2500_T;
1493 		case IF_Gbps(10):
1494 			return IFM_10G_T;
1495 		}
1496 		break;
1497 
1498 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
1499 		switch (speed) {
1500 		case IF_Gbps(1):
1501 			return IFM_1000_SGMII;
1502 		}
1503 		break;
1504 
1505 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
1506 		switch (speed) {
1507 		case IF_Gbps(10):
1508 			return IFM_10G_AOC;
1509 		case IF_Gbps(25):
1510 			return IFM_25G_AOC;
1511 		case IF_Gbps(40):
1512 			return IFM_40G_AOC;
1513 		case IF_Gbps(100):
1514 			return IFM_100G_AOC;
1515 		}
1516 		break;
1517 	}
1518 
1519 	return 0;
1520 }
1521 
1522 void
1523 bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1524 {
1525 	int speed_bit = 0;
1526 	switch (speed) {
1527 	case IF_Gbps(1):
1528 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB;
1529 		break;
1530 	case IF_Gbps(2):
1531 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB;
1532 		break;
1533 	case IF_Mbps(2500):
1534 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB;
1535 		break;
1536 	case IF_Gbps(10):
1537 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB;
1538 		break;
1539 	case IF_Gbps(20):
1540 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB;
1541 		break;
1542 	case IF_Gbps(25):
1543 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB;
1544 		break;
1545 	case IF_Gbps(40):
1546 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB;
1547 		break;
1548 	case IF_Gbps(50):
1549 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB;
1550 		break;
1551 	case IF_Gbps(100):
1552 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB;
1553 		break;
1554 	}
1555 	if (supported_speeds & speed_bit)
1556 		ifmedia_add(&sc->sc_media, IFM_ETHER | ifmt, 0, NULL);
1557 }
1558 
1559 int
1560 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1561 {
1562 	struct ifnet *ifp = &softc->sc_ac.ac_if;
1563 	struct hwrm_port_phy_qcfg_input req = {0};
1564 	struct hwrm_port_phy_qcfg_output *resp =
1565 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
1566 	int link_state = LINK_STATE_DOWN;
1567 	uint64_t speeds[] = {
1568 		IF_Gbps(1), IF_Gbps(2), IF_Mbps(2500), IF_Gbps(10), IF_Gbps(20),
1569 		IF_Gbps(25), IF_Gbps(40), IF_Gbps(50), IF_Gbps(100)
1570 	};
1571 	uint64_t media_type;
1572 	int duplex;
1573 	int rc = 0;
1574 	int i;
1575 
1576 	BNXT_HWRM_LOCK(softc);
1577 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1578 
1579 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1580 	if (rc) {
1581 		printf("%s: failed to query port phy config\n", DEVNAME(softc));
1582 		goto exit;
1583 	}
1584 
1585 	if (softc->sc_hwrm_ver > 0x10800)
1586 		duplex = resp->duplex_state;
1587 	else
1588 		duplex = resp->duplex_cfg;
1589 
1590 	if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
1591 		if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1592 			link_state = LINK_STATE_HALF_DUPLEX;
1593 		else
1594 			link_state = LINK_STATE_FULL_DUPLEX;
1595 
1596 		switch (resp->link_speed) {
1597 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
1598 			ifp->if_baudrate = IF_Mbps(10);
1599 			break;
1600 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1601 			ifp->if_baudrate = IF_Mbps(100);
1602 			break;
1603 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1604 			ifp->if_baudrate = IF_Gbps(1);
1605 			break;
1606 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1607 			ifp->if_baudrate = IF_Gbps(2);
1608 			break;
1609 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1610 			ifp->if_baudrate = IF_Mbps(2500);
1611 			break;
1612 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1613 			ifp->if_baudrate = IF_Gbps(10);
1614 			break;
1615 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1616 			ifp->if_baudrate = IF_Gbps(20);
1617 			break;
1618 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1619 			ifp->if_baudrate = IF_Gbps(25);
1620 			break;
1621 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1622 			ifp->if_baudrate = IF_Gbps(40);
1623 			break;
1624 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1625 			ifp->if_baudrate = IF_Gbps(50);
1626 			break;
1627 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
1628 			ifp->if_baudrate = IF_Gbps(100);
1629 			break;
1630 		}
1631 	}
1632 
1633 	ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY);
1634 	for (i = 0; i < nitems(speeds); i++) {
1635 		media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1636 		if (media_type != 0)
1637 			bnxt_add_media_type(softc, resp->support_speeds,
1638 			    speeds[i], media_type);
1639 	}
1640 	ifmedia_add(&softc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1641 	ifmedia_set(&softc->sc_media, IFM_ETHER|IFM_AUTO);
1642 
1643 	if (ifmr != NULL) {
1644 		ifmr->ifm_status = IFM_AVALID;
1645 		if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1646 			ifmr->ifm_status |= IFM_ACTIVE;
1647 			ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
1648 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
1649 				ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1650 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
1651 				ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1652 			if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1653 				ifmr->ifm_active |= IFM_HDX;
1654 			else
1655 				ifmr->ifm_active |= IFM_FDX;
1656 
1657 			media_type = bnxt_get_media_type(ifp->if_baudrate, resp->phy_type);
1658 			if (media_type != 0)
1659 				ifmr->ifm_active |= media_type;
1660 		}
1661 	}
1662 
1663 exit:
1664 	BNXT_HWRM_UNLOCK(softc);
1665 
1666 	if (rc == 0 && (link_state != ifp->if_link_state)) {
1667 		ifp->if_link_state = link_state;
1668 		if_link_state_change(ifp);
1669 	}
1670 
1671 	return rc;
1672 }
1673 
1674 int
1675 bnxt_media_change(struct ifnet *ifp)
1676 {
1677 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1678 	struct hwrm_port_phy_cfg_input req = {0};
1679 	uint64_t link_speed;
1680 
1681 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1682 		return EINVAL;
1683 
1684 	if (sc->sc_flags & BNXT_FLAG_NPAR)
1685 		return ENODEV;
1686 
1687 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1688 
1689 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
1690 	case IFM_100G_CR4:
1691 	case IFM_100G_SR4:
1692 	case IFM_100G_KR4:
1693 	case IFM_100G_LR4:
1694 	case IFM_100G_AOC:
1695 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB;
1696 		break;
1697 
1698 	case IFM_50G_CR2:
1699 	case IFM_50G_KR2:
1700 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB;
1701 		break;
1702 
1703 	case IFM_40G_CR4:
1704 	case IFM_40G_SR4:
1705 	case IFM_40G_LR4:
1706 	case IFM_40G_KR4:
1707 	case IFM_40G_AOC:
1708 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB;
1709 		break;
1710 
1711 	case IFM_25G_CR:
1712 	case IFM_25G_KR:
1713 	case IFM_25G_SR:
1714 	case IFM_25G_LR:
1715 	case IFM_25G_ER:
1716 	case IFM_25G_AOC:
1717 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB;
1718 		break;
1719 
1720 	case IFM_10G_LR:
1721 	case IFM_10G_SR:
1722 	case IFM_10G_CX4:
1723 	case IFM_10G_T:
1724 	case IFM_10G_SFP_CU:
1725 	case IFM_10G_LRM:
1726 	case IFM_10G_KX4:
1727 	case IFM_10G_KR:
1728 	case IFM_10G_CR1:
1729 	case IFM_10G_ER:
1730 	case IFM_10G_AOC:
1731 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB;
1732 		break;
1733 
1734 	case IFM_2500_SX:
1735 	case IFM_2500_KX:
1736 	case IFM_2500_T:
1737 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB;
1738 		break;
1739 
1740 	case IFM_1000_T:
1741 	case IFM_1000_LX:
1742 	case IFM_1000_SX:
1743 	case IFM_1000_CX:
1744 	case IFM_1000_KX:
1745 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB;
1746 		break;
1747 
1748 	case IFM_100_TX:
1749 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB;
1750 		break;
1751 
1752 	default:
1753 		link_speed = 0;
1754 	}
1755 
1756 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
1757 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1758 	if (link_speed == 0) {
1759 		req.auto_mode |=
1760 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1761 		req.flags |=
1762 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1763 		req.enables |=
1764 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
1765 	} else {
1766 		req.force_link_speed = htole16(link_speed);
1767 		req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
1768 	}
1769 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1770 
1771 	return hwrm_send_message(sc, &req, sizeof(req));
1772 }
1773 
1774 int
1775 bnxt_media_autonegotiate(struct bnxt_softc *sc)
1776 {
1777 	struct hwrm_port_phy_cfg_input req = {0};
1778 
1779 	if (sc->sc_flags & BNXT_FLAG_NPAR)
1780 		return ENODEV;
1781 
1782 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1783 	req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1784 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1785 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |
1786 	    HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
1787 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1788 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1789 
1790 	return hwrm_send_message(sc, &req, sizeof(req));
1791 }
1792 
1793 
1794 void
1795 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
1796 {
1797 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
1798 	int i;
1799 
1800 	for (i = 0; i < cpr->ring.ring_size; i++)
1801 		cmp[i].info3_v = !cpr->v_bit;
1802 }
1803 
1804 void
1805 bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
1806     int enable)
1807 {
1808 	uint32_t val = CMPL_DOORBELL_KEY_CMPL;
1809 	if (enable == 0)
1810 		val |= CMPL_DOORBELL_MASK;
1811 
1812 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1813 	    BUS_SPACE_BARRIER_WRITE);
1814 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
1815 	    BUS_SPACE_BARRIER_WRITE);
1816 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1817 	    htole32(val));
1818 }
1819 
1820 void
1821 bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
1822     uint32_t index, int enable)
1823 {
1824 	uint32_t val = CMPL_DOORBELL_KEY_CMPL | CMPL_DOORBELL_IDX_VALID |
1825 	    (index & CMPL_DOORBELL_IDX_MASK);
1826 	if (enable == 0)
1827 		val |= CMPL_DOORBELL_MASK;
1828 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1829 	    BUS_SPACE_BARRIER_WRITE);
1830 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1831 	    htole32(val));
1832 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
1833 	    BUS_SPACE_BARRIER_WRITE);
1834 }
1835 
1836 void
1837 bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
1838 {
1839 	uint32_t val = RX_DOORBELL_KEY_RX | index;
1840 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1841 	    BUS_SPACE_BARRIER_WRITE);
1842 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1843 	    htole32(val));
1844 
1845 	/* second write isn't necessary on all hardware */
1846 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1847 	    BUS_SPACE_BARRIER_WRITE);
1848 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1849 	    htole32(val));
1850 }
1851 
1852 void
1853 bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
1854 {
1855 	uint32_t val = TX_DOORBELL_KEY_TX | index;
1856 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1857 	    BUS_SPACE_BARRIER_WRITE);
1858 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1859 	    htole32(val));
1860 
1861 	/* second write isn't necessary on all hardware */
1862 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1863 	    BUS_SPACE_BARRIER_WRITE);
1864 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1865 	    htole32(val));
1866 }
1867 
1868 u_int
1869 bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
1870     struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
1871     u_int nslots)
1872 {
1873 	struct rx_prod_pkt_bd *rxring;
1874 	struct bnxt_slot *bs;
1875 	struct mbuf *m;
1876 	uint p, fills;
1877 
1878 	rxring = (struct rx_prod_pkt_bd *)ring_mem;
1879 	p = *prod;
1880 	for (fills = 0; fills < nslots; fills++) {
1881 		bs = &slots[p];
1882 		m = MCLGETL(NULL, M_DONTWAIT, bufsize);
1883 		if (m == NULL)
1884 			break;
1885 
1886 		m->m_len = m->m_pkthdr.len = bufsize;
1887 		if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1888 		    BUS_DMA_NOWAIT) != 0) {
1889 			m_freem(m);
1890 			break;
1891 		}
1892 		bs->bs_m = m;
1893 
1894 		rxring[p].flags_type = htole16(bdtype);
1895 		rxring[p].len = htole16(bufsize);
1896 		rxring[p].opaque = p;
1897 		rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr);
1898 
1899 		if (++p >= ring->ring_size)
1900 			p = 0;
1901 	}
1902 
1903 	if (fills != 0)
1904 		bnxt_write_rx_doorbell(sc, ring, p);
1905 	*prod = p;
1906 
1907 	return (nslots - fills);
1908 }
1909 
1910 int
1911 bnxt_rx_fill(struct bnxt_softc *sc)
1912 {
1913 	u_int slots;
1914 	int rv = 0;
1915 
1916 	slots = if_rxr_get(&sc->sc_rxr[0], sc->sc_rx_ring.ring_size);
1917 	if (slots > 0) {
1918 		slots = bnxt_rx_fill_slots(sc, &sc->sc_rx_ring,
1919 		    BNXT_DMA_KVA(sc->sc_rx_ring_mem), sc->sc_rx_slots,
1920 		    &sc->sc_rx_prod, MCLBYTES,
1921 		    RX_PROD_PKT_BD_TYPE_RX_PROD_PKT, slots);
1922 		if_rxr_put(&sc->sc_rxr[0], slots);
1923 	} else
1924 		rv = 1;
1925 
1926 	slots = if_rxr_get(&sc->sc_rxr[1],  sc->sc_rx_ag_ring.ring_size);
1927 	if (slots > 0) {
1928 		slots = bnxt_rx_fill_slots(sc, &sc->sc_rx_ag_ring,
1929 		    BNXT_DMA_KVA(sc->sc_rx_ring_mem) + PAGE_SIZE,
1930 		    sc->sc_rx_ag_slots, &sc->sc_rx_ag_prod,
1931 		    BNXT_AG_BUFFER_SIZE,
1932 		    RX_PROD_AGG_BD_TYPE_RX_PROD_AGG, slots);
1933 		if_rxr_put(&sc->sc_rxr[1], slots);
1934 	} else
1935 		rv = 1;
1936 
1937 	return (rv);
1938 }
1939 
1940 void
1941 bnxt_refill(void *xsc)
1942 {
1943 	struct bnxt_softc *sc = xsc;
1944 
1945 	bnxt_rx_fill(sc);
1946 
1947 	if (sc->sc_rx_cons == sc->sc_rx_prod)
1948 		timeout_add(&sc->sc_rx_refill, 1);
1949 }
1950 
1951 int
1952 bnxt_rx(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr, struct mbuf_list *ml,
1953     int *slots, int *agslots, struct cmpl_base *cmpl)
1954 {
1955 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1956 	struct mbuf *m, *am;
1957 	struct bnxt_slot *bs;
1958 	struct rx_pkt_cmpl *rx = (struct rx_pkt_cmpl *)cmpl;
1959 	struct rx_pkt_cmpl_hi *rxhi;
1960 	struct rx_abuf_cmpl *ag;
1961 	uint32_t flags;
1962 	uint16_t errors;
1963 
1964 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1965 		return (0);
1966 
1967 	/* second part of the rx completion */
1968 	rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
1969 	if (rxhi == NULL) {
1970 		return (1);
1971 	}
1972 
1973 	/* packets over 2k in size use an aggregation buffer completion too */
1974 	ag = NULL;
1975 	if ((rx->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT) != 0) {
1976 		ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
1977 		if (ag == NULL) {
1978 			return (1);
1979 		}
1980 	}
1981 
1982 	bs = &sc->sc_rx_slots[rx->opaque];
1983 	bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,
1984 	    BUS_DMASYNC_POSTREAD);
1985 	bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
1986 
1987 	m = bs->bs_m;
1988 	bs->bs_m = NULL;
1989 	m->m_pkthdr.len = m->m_len = letoh16(rx->len);
1990 	(*slots)++;
1991 
1992 	/* checksum flags */
1993 	flags = lemtoh32(&rxhi->flags2);
1994 	errors = lemtoh16(&rxhi->errors_v2);
1995 	if ((flags & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) != 0 &&
1996 	    (errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR) == 0)
1997 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1998 
1999 	if ((flags & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) != 0 &&
2000 	    (errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR) == 0)
2001 		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
2002 		    M_UDP_CSUM_IN_OK;
2003 
2004 #if NVLAN > 0
2005 	if ((flags & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
2006 	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
2007 		m->m_pkthdr.ether_vtag = lemtoh16(&rxhi->metadata);
2008 		m->m_flags |= M_VLANTAG;
2009 	}
2010 #endif
2011 
2012 	if (ag != NULL) {
2013 		bs = &sc->sc_rx_ag_slots[ag->opaque];
2014 		bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
2015 		    bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2016 		bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2017 
2018 		am = bs->bs_m;
2019 		bs->bs_m = NULL;
2020 		am->m_len = letoh16(ag->len);
2021 		m->m_next = am;
2022 		m->m_pkthdr.len += am->m_len;
2023 		(*agslots)++;
2024 	}
2025 
2026 	ml_enqueue(ml, m);
2027 	return (0);
2028 }
2029 
2030 void
2031 bnxt_txeof(struct bnxt_softc *sc, int *txfree, struct cmpl_base *cmpl)
2032 {
2033 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2034 	struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
2035 	struct bnxt_slot *bs;
2036 	bus_dmamap_t map;
2037 	u_int idx, segs, last;
2038 
2039 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2040 		return;
2041 
2042 	idx = sc->sc_tx_ring_cons;
2043 	last = sc->sc_tx_cons;
2044 	do {
2045 		bs = &sc->sc_tx_slots[sc->sc_tx_cons];
2046 		map = bs->bs_map;
2047 
2048 		segs = BNXT_TX_SLOTS(bs);
2049 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2050 		    BUS_DMASYNC_POSTWRITE);
2051 		bus_dmamap_unload(sc->sc_dmat, map);
2052 		m_freem(bs->bs_m);
2053 		bs->bs_m = NULL;
2054 
2055 		idx += segs;
2056 		(*txfree) += segs;
2057 		if (idx >= sc->sc_tx_ring.ring_size)
2058 			idx -= sc->sc_tx_ring.ring_size;
2059 
2060 		last = sc->sc_tx_cons;
2061 		if (++sc->sc_tx_cons >= sc->sc_tx_ring.ring_size)
2062 			sc->sc_tx_cons = 0;
2063 
2064 	} while (last != txcmpl->opaque);
2065 	sc->sc_tx_ring_cons = idx;
2066 }
2067 
2068 /* bnxt_hwrm.c */
2069 
2070 int
2071 bnxt_hwrm_err_map(uint16_t err)
2072 {
2073 	int rc;
2074 
2075 	switch (err) {
2076 	case HWRM_ERR_CODE_SUCCESS:
2077 		return 0;
2078 	case HWRM_ERR_CODE_INVALID_PARAMS:
2079 	case HWRM_ERR_CODE_INVALID_FLAGS:
2080 	case HWRM_ERR_CODE_INVALID_ENABLES:
2081 		return EINVAL;
2082 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
2083 		return EACCES;
2084 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
2085 		return ENOMEM;
2086 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
2087 		return ENOSYS;
2088 	case HWRM_ERR_CODE_FAIL:
2089 		return EIO;
2090 	case HWRM_ERR_CODE_HWRM_ERROR:
2091 	case HWRM_ERR_CODE_UNKNOWN_ERR:
2092 	default:
2093 		return EIO;
2094 	}
2095 
2096 	return rc;
2097 }
2098 
2099 void
2100 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2101     uint16_t req_type)
2102 {
2103 	struct input *req = request;
2104 
2105 	req->req_type = htole16(req_type);
2106 	req->cmpl_ring = 0xffff;
2107 	req->target_id = 0xffff;
2108 	req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2109 }
2110 
2111 int
2112 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2113 {
2114 	struct input *req = msg;
2115 	struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2116 	uint32_t *data = msg;
2117 	int i;
2118 	uint8_t *valid;
2119 	uint16_t err;
2120 	uint16_t max_req_len = HWRM_MAX_REQ_LEN;
2121 	struct hwrm_short_input short_input = {0};
2122 
2123 	/* TODO: DMASYNC in here. */
2124 	req->seq_id = htole16(softc->sc_cmd_seq++);
2125 	memset(resp, 0, PAGE_SIZE);
2126 
2127 	if (softc->sc_flags & BNXT_FLAG_SHORT_CMD) {
2128 		void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp);
2129 
2130 		memcpy(short_cmd_req, req, msg_len);
2131 		memset((uint8_t *) short_cmd_req + msg_len, 0,
2132 		    softc->sc_max_req_len - msg_len);
2133 
2134 		short_input.req_type = req->req_type;
2135 		short_input.signature =
2136 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
2137 		short_input.size = htole16(msg_len);
2138 		short_input.req_addr =
2139 		    htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2140 
2141 		data = (uint32_t *)&short_input;
2142 		msg_len = sizeof(short_input);
2143 
2144 		/* Sync memory write before updating doorbell */
2145 		membar_sync();
2146 
2147 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
2148 	}
2149 
2150 	/* Write request msg to hwrm channel */
2151 	for (i = 0; i < msg_len; i += 4) {
2152 		bus_space_write_4(softc->sc_hwrm_t,
2153 				  softc->sc_hwrm_h,
2154 				  i, *data);
2155 		data++;
2156 	}
2157 
2158 	/* Clear to the end of the request buffer */
2159 	for (i = msg_len; i < max_req_len; i += 4)
2160 		bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,
2161 		    i, 0);
2162 
2163 	/* Ring channel doorbell */
2164 	bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,
2165 	    htole32(1));
2166 
2167 	/* Check if response len is updated */
2168 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2169 		if (resp->resp_len && resp->resp_len <= 4096)
2170 			break;
2171 		DELAY(1000);
2172 	}
2173 	if (i >= softc->sc_cmd_timeo) {
2174 		printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2175 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2176 		    softc->sc_cmd_timeo,
2177 		    le16toh(req->seq_id));
2178 		return ETIMEDOUT;
2179 	}
2180 	/* Last byte of resp contains the valid key */
2181 	valid = (uint8_t *)resp + resp->resp_len - 1;
2182 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2183 		if (*valid == HWRM_RESP_VALID_KEY)
2184 			break;
2185 		DELAY(1000);
2186 	}
2187 	if (i >= softc->sc_cmd_timeo) {
2188 		printf("%s: timeout sending %s: "
2189 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2190 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2191 		    softc->sc_cmd_timeo, le16toh(req->req_type),
2192 		    le16toh(req->seq_id), msg_len,
2193 		    *valid);
2194 		return ETIMEDOUT;
2195 	}
2196 
2197 	err = le16toh(resp->error_code);
2198 	if (err) {
2199 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2200 		if (err != HWRM_ERR_CODE_FAIL) {
2201 			printf("%s: %s command returned %s error.\n",
2202 			    DEVNAME(softc),
2203 			    GET_HWRM_REQ_TYPE(req->req_type),
2204 			    GET_HWRM_ERROR_CODE(err));
2205 		}
2206 		return bnxt_hwrm_err_map(err);
2207 	}
2208 
2209 	return 0;
2210 }
2211 
2212 
2213 int
2214 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2215 {
2216 	int rc;
2217 
2218 	BNXT_HWRM_LOCK(softc);
2219 	rc = _hwrm_send_message(softc, msg, msg_len);
2220 	BNXT_HWRM_UNLOCK(softc);
2221 	return rc;
2222 }
2223 
2224 
2225 int
2226 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2227 {
2228 	struct hwrm_queue_qportcfg_input req = {0};
2229 	struct hwrm_queue_qportcfg_output *resp =
2230 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2231 
2232 	int	i, rc = 0;
2233 	uint8_t	*qptr;
2234 
2235 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
2236 
2237 	BNXT_HWRM_LOCK(softc);
2238 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2239 	if (rc)
2240 		goto qportcfg_exit;
2241 
2242 	if (!resp->max_configurable_queues) {
2243 		rc = -EINVAL;
2244 		goto qportcfg_exit;
2245 	}
2246 	softc->sc_max_tc = resp->max_configurable_queues;
2247 	if (softc->sc_max_tc > BNXT_MAX_QUEUE)
2248 		softc->sc_max_tc = BNXT_MAX_QUEUE;
2249 
2250 	qptr = &resp->queue_id0;
2251 	for (i = 0; i < softc->sc_max_tc; i++) {
2252 		softc->sc_q_info[i].id = *qptr++;
2253 		softc->sc_q_info[i].profile = *qptr++;
2254 	}
2255 
2256 qportcfg_exit:
2257 	BNXT_HWRM_UNLOCK(softc);
2258 	return rc;
2259 }
2260 
2261 int
2262 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2263 {
2264 	struct hwrm_ver_get_input	req = {0};
2265 	struct hwrm_ver_get_output	*resp =
2266 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2267 	int				rc;
2268 #if 0
2269 	const char nastr[] = "<not installed>";
2270 	const char naver[] = "<N/A>";
2271 #endif
2272 	uint32_t dev_caps_cfg;
2273 
2274 	softc->sc_max_req_len = HWRM_MAX_REQ_LEN;
2275 	softc->sc_cmd_timeo = 1000;
2276 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
2277 
2278 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
2279 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
2280 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
2281 
2282 	BNXT_HWRM_LOCK(softc);
2283 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2284 	if (rc)
2285 		goto fail;
2286 
2287 	printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2288 	    resp->hwrm_fw_bld);
2289 
2290 	softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2291 	    (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2292 #if 0
2293 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2294 	    resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2295 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2296 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2297 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2298 	snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2299 	    resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2300 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
2301 	    BNXT_VERSTR_SIZE);
2302 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2303 	    BNXT_NAME_SIZE);
2304 
2305 	if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2306 	    resp->mgmt_fw_bld == 0) {
2307 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2308 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2309 	}
2310 	else {
2311 		snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2312 		    "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2313 		    resp->mgmt_fw_bld);
2314 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2315 		    BNXT_NAME_SIZE);
2316 	}
2317 	if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2318 	    resp->netctrl_fw_bld == 0) {
2319 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2320 		    BNXT_VERSTR_SIZE);
2321 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2322 		    BNXT_NAME_SIZE);
2323 	}
2324 	else {
2325 		snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2326 		    "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2327 		    resp->netctrl_fw_bld);
2328 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2329 		    BNXT_NAME_SIZE);
2330 	}
2331 	if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2332 	    resp->roce_fw_bld == 0) {
2333 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2334 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2335 	}
2336 	else {
2337 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2338 		    "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2339 		    resp->roce_fw_bld);
2340 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2341 		    BNXT_NAME_SIZE);
2342 	}
2343 	softc->ver_info->chip_num = le16toh(resp->chip_num);
2344 	softc->ver_info->chip_rev = resp->chip_rev;
2345 	softc->ver_info->chip_metal = resp->chip_metal;
2346 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
2347 	softc->ver_info->chip_type = resp->chip_platform_type;
2348 #endif
2349 
2350 	if (resp->max_req_win_len)
2351 		softc->sc_max_req_len = le16toh(resp->max_req_win_len);
2352 	if (resp->def_req_timeout)
2353 		softc->sc_cmd_timeo = le16toh(resp->def_req_timeout);
2354 
2355 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
2356 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
2357 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
2358 		softc->sc_flags |= BNXT_FLAG_SHORT_CMD;
2359 
2360 fail:
2361 	BNXT_HWRM_UNLOCK(softc);
2362 	return rc;
2363 }
2364 
2365 
2366 int
2367 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2368 {
2369 	struct hwrm_func_drv_rgtr_input req = {0};
2370 
2371 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2372 
2373 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
2374 	    HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
2375 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
2376 
2377 	req.ver_maj = 6;
2378 	req.ver_min = 4;
2379 	req.ver_upd = 0;
2380 
2381 	return hwrm_send_message(softc, &req, sizeof(req));
2382 }
2383 
2384 #if 0
2385 
2386 int
2387 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
2388 {
2389 	struct hwrm_func_drv_unrgtr_input req = {0};
2390 
2391 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
2392 	if (shutdown == true)
2393 		req.flags |=
2394 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
2395 	return hwrm_send_message(softc, &req, sizeof(req));
2396 }
2397 
2398 #endif
2399 
2400 int
2401 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2402 {
2403 	int rc = 0;
2404 	struct hwrm_func_qcaps_input req = {0};
2405 	struct hwrm_func_qcaps_output *resp =
2406 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2407 	/* struct bnxt_func_info *func = &softc->func; */
2408 
2409 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
2410 	req.fid = htole16(0xffff);
2411 
2412 	BNXT_HWRM_LOCK(softc);
2413 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2414 	if (rc)
2415 		goto fail;
2416 
2417 	if (resp->flags &
2418 	    htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
2419 		softc->sc_flags |= BNXT_FLAG_WOL_CAP;
2420 
2421 	memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6);
2422 	/*
2423 	func->fw_fid = le16toh(resp->fid);
2424 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2425 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2426 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2427 	func->max_tx_rings = le16toh(resp->max_tx_rings);
2428 	func->max_rx_rings = le16toh(resp->max_rx_rings);
2429 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2430 	if (!func->max_hw_ring_grps)
2431 		func->max_hw_ring_grps = func->max_tx_rings;
2432 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2433 	func->max_vnics = le16toh(resp->max_vnics);
2434 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2435 	if (BNXT_PF(softc)) {
2436 		struct bnxt_pf_info *pf = &softc->pf;
2437 
2438 		pf->port_id = le16toh(resp->port_id);
2439 		pf->first_vf_id = le16toh(resp->first_vf_id);
2440 		pf->max_vfs = le16toh(resp->max_vfs);
2441 		pf->max_encap_records = le32toh(resp->max_encap_records);
2442 		pf->max_decap_records = le32toh(resp->max_decap_records);
2443 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2444 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2445 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2446 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2447 	}
2448 	if (!_is_valid_ether_addr(func->mac_addr)) {
2449 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2450 		get_random_ether_addr(func->mac_addr);
2451 	}
2452 	*/
2453 
2454 fail:
2455 	BNXT_HWRM_UNLOCK(softc);
2456 	return rc;
2457 }
2458 
2459 
2460 int
2461 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2462 {
2463         struct hwrm_func_qcfg_input req = {0};
2464         /* struct hwrm_func_qcfg_output *resp =
2465 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2466 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2467         int rc;
2468 
2469 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
2470         req.fid = htole16(0xffff);
2471 	BNXT_HWRM_LOCK(softc);
2472 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2473         if (rc)
2474 		goto fail;
2475 
2476 	/*
2477 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2478 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2479 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2480 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2481 	*/
2482 fail:
2483 	BNXT_HWRM_UNLOCK(softc);
2484         return rc;
2485 }
2486 
2487 
2488 int
2489 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2490 {
2491 	struct hwrm_func_reset_input req = {0};
2492 
2493 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
2494 	req.enables = 0;
2495 
2496 	return hwrm_send_message(softc, &req, sizeof(req));
2497 }
2498 
2499 int
2500 bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2501     struct bnxt_vnic_info *vnic)
2502 {
2503 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
2504 
2505 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
2506 
2507 	req.flags = htole32(
2508 	    HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2509 	req.enables = htole32(
2510 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2511 	req.vnic_id = htole16(vnic->id);
2512 	req.jumbo_thresh = htole16(MCLBYTES);
2513 
2514 	return hwrm_send_message(softc, &req, sizeof(req));
2515 }
2516 
2517 int
2518 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2519 {
2520 	struct hwrm_vnic_cfg_input req = {0};
2521 
2522 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
2523 
2524 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2525 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2526 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
2527 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2528 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
2529 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2530 	req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
2531 	    HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
2532 	    HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
2533 	req.vnic_id = htole16(vnic->id);
2534 	req.dflt_ring_grp = htole16(vnic->def_ring_grp);
2535 	req.rss_rule = htole16(vnic->rss_id);
2536 	req.cos_rule = htole16(vnic->cos_rule);
2537 	req.lb_rule = htole16(vnic->lb_rule);
2538 	req.mru = htole16(vnic->mru);
2539 
2540 	return hwrm_send_message(softc, &req, sizeof(req));
2541 }
2542 
2543 int
2544 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2545 {
2546 	struct hwrm_vnic_alloc_input req = {0};
2547 	struct hwrm_vnic_alloc_output *resp =
2548 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2549 	int rc;
2550 
2551 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
2552 		printf("%s: attempt to re-allocate vnic %04x\n",
2553 		    DEVNAME(softc), vnic->id);
2554 		return EINVAL;
2555 	}
2556 
2557 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
2558 
2559 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2560 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2561 
2562 	BNXT_HWRM_LOCK(softc);
2563 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2564 	if (rc)
2565 		goto fail;
2566 
2567 	vnic->id = le32toh(resp->vnic_id);
2568 
2569 fail:
2570 	BNXT_HWRM_UNLOCK(softc);
2571 	return rc;
2572 }
2573 
2574 int
2575 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2576 {
2577 	struct hwrm_vnic_free_input req = {0};
2578 	int rc;
2579 
2580 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE) {
2581 		printf("%s: attempt to deallocate vnic %04x\n",
2582 		    DEVNAME(softc), vnic->id);
2583 		return (EINVAL);
2584 	}
2585 
2586 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
2587 	req.vnic_id = htole16(vnic->id);
2588 
2589 	BNXT_HWRM_LOCK(softc);
2590 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2591 	if (rc == 0)
2592 		vnic->id = (uint16_t)HWRM_NA_SIGNATURE;
2593 	BNXT_HWRM_UNLOCK(softc);
2594 
2595 	return (rc);
2596 }
2597 
2598 int
2599 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2600 {
2601 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2602 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2603 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2604 	int rc;
2605 
2606 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
2607 		printf("%s: attempt to re-allocate vnic ctx %04x\n",
2608 		    DEVNAME(softc), *ctx_id);
2609 		return EINVAL;
2610 	}
2611 
2612 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
2613 
2614 	BNXT_HWRM_LOCK(softc);
2615 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2616 	if (rc)
2617 		goto fail;
2618 
2619 	*ctx_id = letoh16(resp->rss_cos_lb_ctx_id);
2620 
2621 fail:
2622 	BNXT_HWRM_UNLOCK(softc);
2623 	return (rc);
2624 }
2625 
2626 int
2627 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2628 {
2629 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2630 	int rc;
2631 
2632 	if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE) {
2633 		printf("%s: attempt to deallocate vnic ctx %04x\n",
2634 		    DEVNAME(softc), *ctx_id);
2635 		return (EINVAL);
2636 	}
2637 
2638 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2639 	req.rss_cos_lb_ctx_id = htole32(*ctx_id);
2640 
2641 	BNXT_HWRM_LOCK(softc);
2642 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2643 	if (rc == 0)
2644 		*ctx_id = (uint16_t)HWRM_NA_SIGNATURE;
2645 	BNXT_HWRM_UNLOCK(softc);
2646 	return (rc);
2647 }
2648 
2649 int
2650 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2651 {
2652 	struct hwrm_ring_grp_alloc_input req = {0};
2653 	struct hwrm_ring_grp_alloc_output *resp;
2654 	int rc = 0;
2655 
2656 	if (grp->grp_id != HWRM_NA_SIGNATURE) {
2657 		printf("%s: attempt to re-allocate ring group %04x\n",
2658 		    DEVNAME(softc), grp->grp_id);
2659 		return EINVAL;
2660 	}
2661 
2662 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2663 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
2664 	req.cr = htole16(grp->cp_ring_id);
2665 	req.rr = htole16(grp->rx_ring_id);
2666 	req.ar = htole16(grp->ag_ring_id);
2667 	req.sc = htole16(grp->stats_ctx);
2668 
2669 	BNXT_HWRM_LOCK(softc);
2670 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2671 	if (rc)
2672 		goto fail;
2673 
2674 	grp->grp_id = letoh32(resp->ring_group_id);
2675 
2676 fail:
2677 	BNXT_HWRM_UNLOCK(softc);
2678 	return rc;
2679 }
2680 
2681 int
2682 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2683 {
2684 	struct hwrm_ring_grp_free_input req = {0};
2685 	int rc = 0;
2686 
2687 	if (grp->grp_id == HWRM_NA_SIGNATURE) {
2688 		printf("%s: attempt to free ring group %04x\n",
2689 		    DEVNAME(softc), grp->grp_id);
2690 		return EINVAL;
2691 	}
2692 
2693 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
2694 	req.ring_group_id = htole32(grp->grp_id);
2695 
2696 	BNXT_HWRM_LOCK(softc);
2697 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2698 	if (rc == 0)
2699 		grp->grp_id = HWRM_NA_SIGNATURE;
2700 
2701 	BNXT_HWRM_UNLOCK(softc);
2702 	return (rc);
2703 }
2704 
2705 /*
2706  * Ring allocation message to the firmware
2707  */
2708 int
2709 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
2710     struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
2711     int irq)
2712 {
2713 	struct hwrm_ring_alloc_input req = {0};
2714 	struct hwrm_ring_alloc_output *resp;
2715 	int rc;
2716 
2717 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
2718 		printf("%s: attempt to re-allocate ring %04x\n",
2719 		    DEVNAME(softc), ring->phys_id);
2720 		return EINVAL;
2721 	}
2722 
2723 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2724 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
2725 	req.enables = htole32(0);
2726 	req.fbo = htole32(0);
2727 
2728 	if (stat_ctx_id != HWRM_NA_SIGNATURE) {
2729 		req.enables |= htole32(
2730 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
2731 		req.stat_ctx_id = htole32(stat_ctx_id);
2732 	}
2733 	req.ring_type = type;
2734 	req.page_tbl_addr = htole64(ring->paddr);
2735 	req.length = htole32(ring->ring_size);
2736 	req.logical_id = htole16(ring->id);
2737 	req.cmpl_ring_id = htole16(cmpl_ring_id);
2738 	req.queue_id = htole16(softc->sc_q_info[0].id);
2739 	req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX) ?
2740 	    HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX :
2741 	    HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY;
2742 	BNXT_HWRM_LOCK(softc);
2743 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2744 	if (rc)
2745 		goto fail;
2746 
2747 	ring->phys_id = le16toh(resp->ring_id);
2748 
2749 fail:
2750 	BNXT_HWRM_UNLOCK(softc);
2751 	return rc;
2752 }
2753 
2754 int
2755 bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
2756 {
2757 	struct hwrm_ring_free_input req = {0};
2758 	int rc;
2759 
2760 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE) {
2761 		printf("%s: attempt to deallocate ring %04x\n",
2762 		    DEVNAME(softc), ring->phys_id);
2763 		return (EINVAL);
2764 	}
2765 
2766 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
2767 	req.ring_type = type;
2768 	req.ring_id = htole16(ring->phys_id);
2769 	BNXT_HWRM_LOCK(softc);
2770 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2771 	if (rc)
2772 		goto fail;
2773 
2774 	ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2775 fail:
2776 	BNXT_HWRM_UNLOCK(softc);
2777 	return (rc);
2778 }
2779 
2780 
2781 int
2782 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
2783     uint64_t paddr)
2784 {
2785 	struct hwrm_stat_ctx_alloc_input req = {0};
2786 	struct hwrm_stat_ctx_alloc_output *resp;
2787 	int rc = 0;
2788 
2789 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
2790 		printf("%s: attempt to re-allocate stats ctx %08x\n",
2791 		    DEVNAME(softc), cpr->stats_ctx_id);
2792 		return EINVAL;
2793 	}
2794 
2795 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2796 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
2797 
2798 	req.update_period_ms = htole32(1000);
2799 	req.stats_dma_addr = htole64(paddr);
2800 
2801 	BNXT_HWRM_LOCK(softc);
2802 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2803 	if (rc)
2804 		goto fail;
2805 
2806 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
2807 
2808 fail:
2809 	BNXT_HWRM_UNLOCK(softc);
2810 
2811 	return rc;
2812 }
2813 
2814 int
2815 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
2816 {
2817 	struct hwrm_stat_ctx_free_input req = {0};
2818 	int rc = 0;
2819 
2820 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE) {
2821 		printf("%s: attempt to free stats ctx %08x\n",
2822 		    DEVNAME(softc), cpr->stats_ctx_id);
2823 		return EINVAL;
2824 	}
2825 
2826 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
2827 	req.stat_ctx_id = htole32(cpr->stats_ctx_id);
2828 
2829 	BNXT_HWRM_LOCK(softc);
2830 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2831 	BNXT_HWRM_UNLOCK(softc);
2832 
2833 	if (rc == 0)
2834 		cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
2835 
2836 	return (rc);
2837 }
2838 
2839 #if 0
2840 
2841 int
2842 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
2843 {
2844 	struct hwrm_port_qstats_input req = {0};
2845 	int rc = 0;
2846 
2847 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
2848 
2849 	req.port_id = htole16(softc->pf.port_id);
2850 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
2851 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
2852 
2853 	BNXT_HWRM_LOCK(softc);
2854 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2855 	BNXT_HWRM_UNLOCK(softc);
2856 
2857 	return rc;
2858 }
2859 
2860 #endif
2861 
2862 int
2863 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
2864     uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
2865 {
2866 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2867 
2868 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
2869 
2870 	req.vnic_id = htole32(vnic_id);
2871 	req.mask = htole32(rx_mask);
2872 	req.mc_tbl_addr = htole64(mc_addr);
2873 	req.num_mc_entries = htole32(mc_count);
2874 	return hwrm_send_message(softc, &req, sizeof(req));
2875 }
2876 
2877 int
2878 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2879 {
2880 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
2881 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
2882 	uint32_t enables = 0;
2883 	int rc = 0;
2884 
2885 	if (vnic->filter_id != -1) {
2886 		printf("%s: attempt to re-allocate l2 ctx filter\n",
2887 		    DEVNAME(softc));
2888 		return EINVAL;
2889 	}
2890 
2891 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2892 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
2893 
2894 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
2895 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
2896 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
2897 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
2898 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
2899 	req.enables = htole32(enables);
2900 	req.dst_id = htole16(vnic->id);
2901 	memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2902 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
2903 
2904 	BNXT_HWRM_LOCK(softc);
2905 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2906 	if (rc)
2907 		goto fail;
2908 
2909 	vnic->filter_id = le64toh(resp->l2_filter_id);
2910 	vnic->flow_id = le64toh(resp->flow_id);
2911 
2912 fail:
2913 	BNXT_HWRM_UNLOCK(softc);
2914 	return (rc);
2915 }
2916 
2917 int
2918 bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2919 {
2920 	struct hwrm_cfa_l2_filter_free_input req = {0};
2921 	int rc = 0;
2922 
2923 	if (vnic->filter_id == -1) {
2924 		printf("%s: attempt to deallocate filter %llx\n",
2925 		     DEVNAME(softc), vnic->filter_id);
2926 		return (EINVAL);
2927 	}
2928 
2929 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
2930 	req.l2_filter_id = htole64(vnic->filter_id);
2931 
2932 	BNXT_HWRM_LOCK(softc);
2933 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2934 	if (rc == 0)
2935 		vnic->filter_id = -1;
2936 	BNXT_HWRM_UNLOCK(softc);
2937 
2938 	return (rc);
2939 }
2940 
2941 
2942 #if 0
2943 
2944 int
2945 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
2946     uint32_t hash_type)
2947 {
2948 	struct hwrm_vnic_rss_cfg_input	req = {0};
2949 
2950 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
2951 
2952 	req.hash_type = htole32(hash_type);
2953 	req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
2954 	req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
2955 	req.rss_ctx_idx = htole16(vnic->rss_id);
2956 
2957 	return hwrm_send_message(softc, &req, sizeof(req));
2958 }
2959 
2960 #endif
2961 
2962 int
2963 bnxt_cfg_async_cr(struct bnxt_softc *softc)
2964 {
2965 	int rc = 0;
2966 
2967 	if (1 /* BNXT_PF(softc) */) {
2968 		struct hwrm_func_cfg_input req = {0};
2969 
2970 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2971 
2972 		req.fid = htole16(0xffff);
2973 		req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2974 		req.async_event_cr = htole16(softc->sc_cp_ring.ring.phys_id);
2975 
2976 		rc = hwrm_send_message(softc, &req, sizeof(req));
2977 	} else {
2978 		struct hwrm_func_vf_cfg_input req = {0};
2979 
2980 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
2981 
2982 		req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2983 		req.async_event_cr = htole16(softc->sc_cp_ring.ring.phys_id);
2984 
2985 		rc = hwrm_send_message(softc, &req, sizeof(req));
2986 	}
2987 	return rc;
2988 }
2989 
2990 #if 0
2991 
2992 void
2993 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
2994 {
2995 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
2996 
2997         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
2998 
2999 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
3000 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
3001 
3002 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
3003 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
3004 
3005 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
3006 }
3007 
3008 int
3009 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
3010 {
3011 	struct hwrm_vnic_tpa_cfg_input req = {0};
3012 	uint32_t flags;
3013 
3014 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
3015 		return 0;
3016 	}
3017 
3018 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
3019 
3020 	if (softc->hw_lro.enable) {
3021 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
3022 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
3023 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
3024 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3025 
3026         	if (softc->hw_lro.is_mode_gro)
3027 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
3028 		else
3029 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
3030 
3031 		req.flags = htole32(flags);
3032 
3033 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
3034 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
3035 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
3036 
3037 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
3038 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
3039 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
3040 	}
3041 
3042 	req.vnic_id = htole16(softc->vnic_info.id);
3043 
3044 	return hwrm_send_message(softc, &req, sizeof(req));
3045 }
3046 
3047 
3048 int
3049 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
3050     uint8_t *selfreset)
3051 {
3052 	struct hwrm_fw_reset_input req = {0};
3053 	struct hwrm_fw_reset_output *resp =
3054 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3055 	int rc;
3056 
3057 	MPASS(selfreset);
3058 
3059 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
3060 	req.embedded_proc_type = processor;
3061 	req.selfrst_status = *selfreset;
3062 
3063 	BNXT_HWRM_LOCK(softc);
3064 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3065 	if (rc)
3066 		goto exit;
3067 	*selfreset = resp->selfrst_status;
3068 
3069 exit:
3070 	BNXT_HWRM_UNLOCK(softc);
3071 	return rc;
3072 }
3073 
3074 int
3075 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3076 {
3077 	struct hwrm_fw_qstatus_input req = {0};
3078 	struct hwrm_fw_qstatus_output *resp =
3079 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3080 	int rc;
3081 
3082 	MPASS(selfreset);
3083 
3084 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
3085 	req.embedded_proc_type = type;
3086 
3087 	BNXT_HWRM_LOCK(softc);
3088 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3089 	if (rc)
3090 		goto exit;
3091 	*selfreset = resp->selfrst_status;
3092 
3093 exit:
3094 	BNXT_HWRM_UNLOCK(softc);
3095 	return rc;
3096 }
3097 
3098 #endif
3099 
3100 int
3101 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3102     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3103     uint32_t *reserved_size, uint32_t *available_size)
3104 {
3105 	struct hwrm_nvm_get_dev_info_input req = {0};
3106 	struct hwrm_nvm_get_dev_info_output *resp =
3107 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
3108 	int rc;
3109 	uint32_t old_timeo;
3110 
3111 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
3112 
3113 	BNXT_HWRM_LOCK(softc);
3114 	old_timeo = softc->sc_cmd_timeo;
3115 	softc->sc_cmd_timeo = BNXT_NVM_TIMEO;
3116 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3117 	softc->sc_cmd_timeo = old_timeo;
3118 	if (rc)
3119 		goto exit;
3120 
3121 	if (mfg_id)
3122 		*mfg_id = le16toh(resp->manufacturer_id);
3123 	if (device_id)
3124 		*device_id = le16toh(resp->device_id);
3125 	if (sector_size)
3126 		*sector_size = le32toh(resp->sector_size);
3127 	if (nvram_size)
3128 		*nvram_size = le32toh(resp->nvram_size);
3129 	if (reserved_size)
3130 		*reserved_size = le32toh(resp->reserved_size);
3131 	if (available_size)
3132 		*available_size = le32toh(resp->available_size);
3133 
3134 exit:
3135 	BNXT_HWRM_UNLOCK(softc);
3136 	return rc;
3137 }
3138 
3139 #if 0
3140 
3141 int
3142 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3143     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3144     uint16_t *millisecond, uint16_t *zone)
3145 {
3146 	struct hwrm_fw_get_time_input req = {0};
3147 	struct hwrm_fw_get_time_output *resp =
3148 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3149 	int rc;
3150 
3151 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
3152 
3153 	BNXT_HWRM_LOCK(softc);
3154 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3155 	if (rc)
3156 		goto exit;
3157 
3158 	if (year)
3159 		*year = le16toh(resp->year);
3160 	if (month)
3161 		*month = resp->month;
3162 	if (day)
3163 		*day = resp->day;
3164 	if (hour)
3165 		*hour = resp->hour;
3166 	if (minute)
3167 		*minute = resp->minute;
3168 	if (second)
3169 		*second = resp->second;
3170 	if (millisecond)
3171 		*millisecond = le16toh(resp->millisecond);
3172 	if (zone)
3173 		*zone = le16toh(resp->zone);
3174 
3175 exit:
3176 	BNXT_HWRM_UNLOCK(softc);
3177 	return rc;
3178 }
3179 
3180 int
3181 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3182     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3183     uint16_t millisecond, uint16_t zone)
3184 {
3185 	struct hwrm_fw_set_time_input req = {0};
3186 
3187 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
3188 
3189 	req.year = htole16(year);
3190 	req.month = month;
3191 	req.day = day;
3192 	req.hour = hour;
3193 	req.minute = minute;
3194 	req.second = second;
3195 	req.millisecond = htole16(millisecond);
3196 	req.zone = htole16(zone);
3197 	return hwrm_send_message(softc, &req, sizeof(req));
3198 }
3199 
3200 #endif
3201 
3202 void
3203 _bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3204 {
3205 	req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3206 }
3207 
3208 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3209 {
3210 	struct hwrm_func_drv_rgtr_input req = {0};
3211 	int events[] = {
3212 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
3213 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
3214 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
3215 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
3216 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
3217 	};
3218 	int i;
3219 
3220 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
3221 
3222 	req.enables =
3223 		htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
3224 
3225 	for (i = 0; i < nitems(events); i++)
3226 		_bnxt_hwrm_set_async_event_bit(&req, events[i]);
3227 
3228 	return hwrm_send_message(softc, &req, sizeof(req));
3229 }
3230 
3231 int
3232 bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3233 {
3234 	struct hwrm_port_phy_i2c_read_input req;
3235 	struct hwrm_port_phy_i2c_read_output *out;
3236 	int offset;
3237 
3238 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
3239 	req.i2c_slave_addr = sff->sff_addr;
3240 	req.page_number = htole16(sff->sff_page);
3241 
3242 	for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3243 		req.page_offset = htole16(offset);
3244 		req.data_length = sizeof(out->data);
3245 		req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET);
3246 
3247 		if (hwrm_send_message(softc, &req, sizeof(req))) {
3248 			printf("%s: failed to read i2c data\n", DEVNAME(softc));
3249 			return 1;
3250 		}
3251 
3252 		out = (struct hwrm_port_phy_i2c_read_output *)
3253 		    BNXT_DMA_KVA(softc->sc_cmd_resp);
3254 		memcpy(sff->sff_data + offset, out->data, sizeof(out->data));
3255 	}
3256 
3257 	return 0;
3258 }
3259