xref: /openbsd-src/sys/dev/pci/if_bnxt.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_bnxt.c,v 1.21 2019/09/03 09:00:44 sf Exp $	*/
2 /*-
3  * Broadcom NetXtreme-C/E network driver.
4  *
5  * Copyright (c) 2016 Broadcom, All Rights Reserved.
6  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45 
46 
47 #include "bpfilter.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/device.h>
55 #include <sys/stdint.h>
56 #include <sys/sockio.h>
57 #include <sys/atomic.h>
58 
59 #include <machine/bus.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64 
65 #include <dev/pci/if_bnxtreg.h>
66 
67 #include <net/if.h>
68 #include <net/if_media.h>
69 
70 #if NBPFILTER > 0
71 #include <net/bpf.h>
72 #endif
73 
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 
77 #define BNXT_HWRM_BAR		0x10
78 #define BNXT_DOORBELL_BAR	0x18
79 
80 #define BNXT_RX_RING_ID		0
81 #define BNXT_AG_RING_ID		1
82 #define BNXT_TX_RING_ID		3
83 
84 #define BNXT_MAX_QUEUE		8
85 #define BNXT_MAX_MTU		9500
86 #define BNXT_AG_BUFFER_SIZE	8192
87 
88 #define BNXT_CP_PAGES		4
89 
90 #define BNXT_MAX_TX_SEGS	32	/* a bit much? */
91 
92 #define BNXT_HWRM_SHORT_REQ_LEN	sizeof(struct hwrm_short_input)
93 
94 #define BNXT_HWRM_LOCK_INIT(_sc, _name)	\
95 	mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)
96 #define BNXT_HWRM_LOCK(_sc) 		mtx_enter(&_sc->sc_lock)
97 #define BNXT_HWRM_UNLOCK(_sc) 		mtx_leave(&_sc->sc_lock)
98 #define BNXT_HWRM_LOCK_DESTROY(_sc)	/* nothing */
99 #define BNXT_HWRM_LOCK_ASSERT(_sc)	MUTEX_ASSERT_LOCKED(&_sc->sc_lock)
100 
101 #define BNXT_FLAG_VF            0x0001
102 #define BNXT_FLAG_NPAR          0x0002
103 #define BNXT_FLAG_WOL_CAP       0x0004
104 #define BNXT_FLAG_SHORT_CMD     0x0008
105 #define BNXT_FLAG_MSIX          0x0010
106 
107 /* NVRam stuff has a five minute timeout */
108 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
109 
110 #define NEXT_CP_CONS_V(_ring, _cons, _v_bit)		\
111 do {	 						\
112 	if (++(_cons) == (_ring)->ring_size)		\
113 		((_cons) = 0, (_v_bit) = !_v_bit);	\
114 } while (0);
115 
116 struct bnxt_cos_queue {
117 	uint8_t			id;
118 	uint8_t			profile;
119 };
120 
121 struct bnxt_ring {
122 	uint64_t		paddr;
123 	uint64_t		doorbell;
124 	caddr_t			vaddr;
125 	uint32_t		ring_size;
126 	uint16_t		id;
127 	uint16_t		phys_id;
128 };
129 
130 struct bnxt_cp_ring {
131 	struct bnxt_ring	ring;
132 	void			*irq;
133 	struct bnxt_softc	*softc;
134 	uint32_t		cons;
135 	int			v_bit;
136 	uint32_t		commit_cons;
137 	int			commit_v_bit;
138 	struct ctx_hw_stats	*stats;
139 	uint32_t		stats_ctx_id;
140 };
141 
142 struct bnxt_grp_info {
143 	uint32_t		grp_id;
144 	uint16_t		stats_ctx;
145 	uint16_t		rx_ring_id;
146 	uint16_t		cp_ring_id;
147 	uint16_t		ag_ring_id;
148 };
149 
150 struct bnxt_vnic_info {
151 	uint16_t		id;
152 	uint16_t		def_ring_grp;
153 	uint16_t		cos_rule;
154 	uint16_t		lb_rule;
155 	uint16_t		mru;
156 
157 	uint32_t		flags;
158 #define BNXT_VNIC_FLAG_DEFAULT		0x01
159 #define BNXT_VNIC_FLAG_BD_STALL		0x02
160 #define BNXT_VNIC_FLAG_VLAN_STRIP	0x04
161 
162 	uint64_t		filter_id;
163 	uint32_t		flow_id;
164 
165 	uint16_t		rss_id;
166 	/* rss things */
167 };
168 
169 struct bnxt_slot {
170 	bus_dmamap_t		bs_map;
171 	struct mbuf		*bs_m;
172 };
173 
174 struct bnxt_dmamem {
175 	bus_dmamap_t		bdm_map;
176 	bus_dma_segment_t	bdm_seg;
177 	size_t			bdm_size;
178 	caddr_t			bdm_kva;
179 };
180 #define BNXT_DMA_MAP(_bdm)	((_bdm)->bdm_map)
181 #define BNXT_DMA_LEN(_bdm)	((_bdm)->bdm_size)
182 #define BNXT_DMA_DVA(_bdm)	((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
183 #define BNXT_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
184 
185 struct bnxt_softc {
186 	struct device		sc_dev;
187 	struct arpcom		sc_ac;
188 	struct ifmedia		sc_media;
189 
190 	struct mutex		sc_lock;
191 
192 	pci_chipset_tag_t	sc_pc;
193 	pcitag_t		sc_tag;
194 	bus_dma_tag_t		sc_dmat;
195 
196 	bus_space_tag_t		sc_hwrm_t;
197 	bus_space_handle_t	sc_hwrm_h;
198 	bus_size_t		sc_hwrm_s;
199 
200 	struct bnxt_dmamem	*sc_cmd_resp;
201 	uint16_t		sc_cmd_seq;
202 	uint16_t		sc_max_req_len;
203 	uint32_t		sc_cmd_timeo;
204 	uint32_t		sc_flags;
205 
206 	bus_space_tag_t		sc_db_t;
207 	bus_space_handle_t	sc_db_h;
208 	bus_size_t		sc_db_s;
209 
210 	void			*sc_ih;
211 
212 	int			sc_hwrm_ver;
213 	int			sc_max_tc;
214 	struct bnxt_cos_queue	sc_q_info[BNXT_MAX_QUEUE];
215 
216 	struct bnxt_vnic_info	sc_vnic;
217 	struct bnxt_dmamem	*sc_stats_ctx_mem;
218 
219 	struct bnxt_cp_ring	sc_cp_ring;
220 	struct bnxt_dmamem	*sc_cp_ring_mem;
221 
222 	/* rx */
223 	struct bnxt_dmamem	*sc_rx_ring_mem;	/* rx and ag */
224 	struct bnxt_dmamem	*sc_rx_mcast;
225 	struct bnxt_ring	sc_rx_ring;
226 	struct bnxt_ring	sc_rx_ag_ring;
227 	struct bnxt_grp_info	sc_ring_group;
228 	struct if_rxring	sc_rxr[2];
229 	struct bnxt_slot	*sc_rx_slots;
230 	struct bnxt_slot	*sc_rx_ag_slots;
231 	int			sc_rx_prod;
232 	int			sc_rx_cons;
233 	int			sc_rx_ag_prod;
234 	int			sc_rx_ag_cons;
235 	struct timeout		sc_rx_refill;
236 
237 	/* tx */
238 	struct bnxt_dmamem	*sc_tx_ring_mem;
239 	struct bnxt_ring	sc_tx_ring;
240 	struct bnxt_slot	*sc_tx_slots;
241 	int			sc_tx_prod;
242 	int			sc_tx_cons;
243 	int			sc_tx_ring_prod;
244 	int			sc_tx_ring_cons;
245 };
246 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
247 
248 const struct pci_matchid bnxt_devices[] = {
249 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57301 },
250 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57302 },
251 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57304 },
252 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57311 },
253 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57312 },
254 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57314 },
255 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57402 },
256 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57404 },
257 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57406 },
258 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57407 },
259 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57412 },
260 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57414 },
261 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416 },
262 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416_SFP },
263 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417 },
264 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417_SFP }
265 };
266 
267 int		bnxt_match(struct device *, void *, void *);
268 void		bnxt_attach(struct device *, struct device *, void *);
269 
270 void		bnxt_up(struct bnxt_softc *);
271 void		bnxt_down(struct bnxt_softc *);
272 void		bnxt_iff(struct bnxt_softc *);
273 int		bnxt_ioctl(struct ifnet *, u_long, caddr_t);
274 int		bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
275 void		bnxt_start(struct ifqueue *);
276 int		bnxt_intr(void *);
277 void		bnxt_watchdog(struct ifnet *);
278 void		bnxt_media_status(struct ifnet *, struct ifmediareq *);
279 int		bnxt_media_change(struct ifnet *);
280 int		bnxt_media_autonegotiate(struct bnxt_softc *);
281 
282 struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
283 void		bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
284 void		bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
285 
286 void		bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
287 void		bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
288 		    int);
289 void		bnxt_write_cp_doorbell_index(struct bnxt_softc *,
290 		    struct bnxt_ring *, uint32_t, int);
291 void		bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
292 		    int);
293 void		bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
294 		    int);
295 
296 int		bnxt_rx_fill(struct bnxt_softc *);
297 u_int		bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
298 		    struct bnxt_slot *, uint *, int, uint16_t, u_int);
299 void		bnxt_refill(void *);
300 int		bnxt_rx(struct bnxt_softc *, struct bnxt_cp_ring *,
301 		    struct mbuf_list *, int *, int *, struct cmpl_base *);
302 
303 void		bnxt_txeof(struct bnxt_softc *, int *, struct cmpl_base *);
304 
305 int		_hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
306 int		hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
307 void		bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
308 int 		bnxt_hwrm_err_map(uint16_t err);
309 
310 /* HWRM Function Prototypes */
311 int		bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
312 		    struct bnxt_ring *, uint16_t, uint32_t, int);
313 int		bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
314 		    struct bnxt_ring *);
315 int		bnxt_hwrm_ver_get(struct bnxt_softc *);
316 int		bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
317 int		bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
318 int		bnxt_hwrm_func_qcaps(struct bnxt_softc *);
319 int		bnxt_hwrm_func_qcfg(struct bnxt_softc *);
320 int		bnxt_hwrm_func_reset(struct bnxt_softc *);
321 int		bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
322 int		bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
323 int		bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
324 		    struct bnxt_vnic_info *);
325 int		bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
326 		    struct bnxt_vnic_info *vnic);
327 int		bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
328 		    struct bnxt_cp_ring *, uint64_t);
329 int		bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
330 		    struct bnxt_cp_ring *);
331 int		bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
332 		    struct bnxt_grp_info *);
333 int		bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
334 		    struct bnxt_grp_info *);
335 int		bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
336 		    struct bnxt_vnic_info *);
337 int		bnxt_hwrm_vnic_free(struct bnxt_softc *,
338 		    struct bnxt_vnic_info *);
339 int		bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
340 		    uint32_t, uint32_t, uint64_t, uint32_t);
341 int		bnxt_hwrm_set_filter(struct bnxt_softc *,
342 		    struct bnxt_vnic_info *);
343 int		bnxt_hwrm_free_filter(struct bnxt_softc *,
344 		    struct bnxt_vnic_info *);
345 int		bnxt_cfg_async_cr(struct bnxt_softc *);
346 int		bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
347 		    uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
348 int		bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
349 		    struct ifmediareq *);
350 int		bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
351 int		bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
352 
353 /* not used yet: */
354 #if 0
355 int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
356 
357 int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
358 
359 int bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
360     uint32_t hash_type);
361 
362 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
363 void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
364 int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
365     uint8_t *selfreset);
366 int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
367     uint8_t *selfreset);
368 int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
369     uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
370     uint8_t *second, uint16_t *millisecond, uint16_t *zone);
371 int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
372     uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
373     uint16_t millisecond, uint16_t zone);
374 
375 #endif
376 
377 
378 struct cfattach bnxt_ca = {
379 	sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
380 };
381 
382 struct cfdriver bnxt_cd = {
383 	NULL, "bnxt", DV_IFNET
384 };
385 
386 struct bnxt_dmamem *
387 bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
388 {
389 	struct bnxt_dmamem *m;
390 	int nsegs;
391 
392 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
393 	if (m == NULL)
394 		return (NULL);
395 
396 	m->bdm_size = size;
397 
398 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
399 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map) != 0)
400 		goto bdmfree;
401 
402 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,
403 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
404 		goto destroy;
405 
406 	if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,
407 	    BUS_DMA_NOWAIT) != 0)
408 		goto free;
409 
410 	if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,
411 	    BUS_DMA_NOWAIT) != 0)
412 		goto unmap;
413 
414 	return (m);
415 
416 unmap:
417 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
418 free:
419 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
420 destroy:
421 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
422 bdmfree:
423 	free(m, M_DEVBUF, sizeof *m);
424 
425 	return (NULL);
426 }
427 
428 void
429 bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
430 {
431 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
432 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
433 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
434 	free(m, M_DEVBUF, sizeof *m);
435 }
436 
437 int
438 bnxt_match(struct device *parent, void *match, void *aux)
439 {
440 	return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)));
441 }
442 
443 void
444 bnxt_attach(struct device *parent, struct device *self, void *aux)
445 {
446 	struct bnxt_softc *sc = (struct bnxt_softc *)self;
447 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
448 	struct ifnet *ifp = &sc->sc_ac.ac_if;
449 	struct pci_attach_args *pa = aux;
450 	pci_intr_handle_t ih;
451 	const char *intrstr;
452 	u_int memtype;
453 
454 	/* enable busmaster? */
455 
456 	sc->sc_pc = pa->pa_pc;
457 	sc->sc_tag = pa->pa_tag;
458 	sc->sc_dmat = pa->pa_dmat;
459 
460 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR);
461 	if (pci_mapreg_map(pa, BNXT_HWRM_BAR, memtype, 0, &sc->sc_hwrm_t,
462 	    &sc->sc_hwrm_h, NULL, &sc->sc_hwrm_s, 0)) {
463 		printf(": failed to map hwrm\n");
464 		return;
465 	}
466 
467 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR);
468 	if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR, memtype, 0, &sc->sc_db_t,
469 	    &sc->sc_db_h, NULL, &sc->sc_db_s, 0)) {
470 		printf(": failed to map doorbell\n");
471 		goto unmap_1;
472 	}
473 
474 	BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc));
475 	sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE);
476 	if (sc->sc_cmd_resp == NULL) {
477 		printf(": failed to allocate command response buffer\n");
478 		goto unmap_2;
479 	}
480 
481 	if (bnxt_hwrm_ver_get(sc) != 0) {
482 		printf(": failed to query version info\n");
483 		goto free_resp;
484 	}
485 
486 	if (bnxt_hwrm_nvm_get_dev_info(sc, NULL, NULL, NULL, NULL, NULL, NULL)
487 	    != 0) {
488 		printf(": failed to get nvram info\n");
489 		goto free_resp;
490 	}
491 
492 	if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
493 		printf(": failed to register driver with firmware\n");
494 		goto free_resp;
495 	}
496 
497 	if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
498 		printf(": failed to register async events\n");
499 		goto free_resp;
500 	}
501 
502 	if (bnxt_hwrm_func_qcaps(sc) != 0) {
503 		printf(": failed to get queue capabilities\n");
504 		goto free_resp;
505 	}
506 
507 	/*
508 	 * devices advertise msi support, but there's no way to tell a
509 	 * completion queue to use msi mode, only legacy or msi-x.
510 	 */
511 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
512 		sc->sc_flags |= BNXT_FLAG_MSIX;
513 	} else if (pci_intr_map(pa, &ih) != 0) {
514 		printf(": unable to map interrupt\n");
515 		goto free_resp;
516 	}
517 	intrstr = pci_intr_string(sc->sc_pc, ih);
518 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
519 	    bnxt_intr, sc, DEVNAME(sc));
520 	if (sc->sc_ih == NULL) {
521 		printf(": unable to establish interrupt");
522 		if (intrstr != NULL)
523 			printf(" at %s", intrstr);
524 		printf("\n");
525 		goto deintr;
526 	}
527 	printf("%s, address %s\n", intrstr, ether_sprintf(sc->sc_ac.ac_enaddr));
528 
529 	if (bnxt_hwrm_func_qcfg(sc) != 0) {
530 		printf("%s: failed to query function config\n", DEVNAME(sc));
531 		goto deintr;
532 	}
533 
534 	if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
535 		printf("%s: failed to query port config\n", DEVNAME(sc));
536 		goto deintr;
537 	}
538 
539 	if (bnxt_hwrm_func_reset(sc) != 0) {
540 		printf("%s: reset failed\n", DEVNAME(sc));
541 		goto deintr;
542 	}
543 
544 	sc->sc_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
545 	sc->sc_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
546 	sc->sc_cp_ring.softc = sc;
547 	sc->sc_cp_ring.ring.id = 0;
548 	sc->sc_cp_ring.ring.doorbell = sc->sc_cp_ring.ring.id * 0x80;
549 	sc->sc_cp_ring.ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
550 	    sizeof(struct cmpl_base);
551 	sc->sc_cp_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * BNXT_CP_PAGES);
552 	if (sc->sc_cp_ring_mem == NULL) {
553 		printf("%s: failed to allocate completion queue memory\n",
554 		    DEVNAME(sc));
555 		goto deintr;
556 	}
557 	sc->sc_cp_ring.ring.vaddr = BNXT_DMA_KVA(sc->sc_cp_ring_mem);
558 	sc->sc_cp_ring.ring.paddr = BNXT_DMA_DVA(sc->sc_cp_ring_mem);
559 	sc->sc_cp_ring.cons = UINT32_MAX;
560 	sc->sc_cp_ring.v_bit = 1;
561 	bnxt_mark_cpr_invalid(&sc->sc_cp_ring);
562 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
563 	    &sc->sc_cp_ring.ring, (uint16_t)HWRM_NA_SIGNATURE,
564 	    HWRM_NA_SIGNATURE, 1) != 0) {
565 		printf("%s: failed to allocate completion queue\n",
566 		    DEVNAME(sc));
567 		goto free_cp_mem;
568 	}
569 	if (bnxt_cfg_async_cr(sc) != 0) {
570 		printf("%s: failed to set async completion ring\n",
571 		    DEVNAME(sc));
572 		goto free_cp_mem;
573 	}
574 	bnxt_write_cp_doorbell(sc, &sc->sc_cp_ring.ring, 1);
575 
576 	/*
577 	 * set interrupt aggregation parameters for around 10k interrupts
578 	 * per second.  the timers are in units of 80usec, and the counters
579 	 * are based on the minimum rx ring size of 32.
580 	 */
581 	memset(&aggint, 0, sizeof(aggint));
582         bnxt_hwrm_cmd_hdr_init(sc, &aggint,
583 	    HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
584 	aggint.ring_id = htole16(sc->sc_cp_ring.ring.phys_id);
585 	aggint.num_cmpl_dma_aggr = htole16(32);
586 	aggint.num_cmpl_dma_aggr_during_int  = aggint.num_cmpl_dma_aggr;
587 	aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80);
588 	aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
589 	aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80);
590 	aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80);
591 	aggint.num_cmpl_aggr_int = htole16(16);
592 	if (hwrm_send_message(sc, &aggint, sizeof(aggint)))
593 		goto free_cp_mem;
594 
595 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
596 	ifp->if_softc = sc;
597 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
598 	ifp->if_xflags = IFXF_MPSAFE;
599 	ifp->if_ioctl = bnxt_ioctl;
600 	ifp->if_qstart = bnxt_start;
601 	ifp->if_watchdog = bnxt_watchdog;
602 	ifp->if_hardmtu = BNXT_MAX_MTU;
603 	ifp->if_capabilities = IFCAP_VLAN_MTU;	 /* ? */
604 	/* checksum flags, hwtagging? */
605 	IFQ_SET_MAXLEN(&ifp->if_snd, 1024);	/* ? */
606 
607 	ifmedia_init(&sc->sc_media, IFM_IMASK, bnxt_media_change,
608 	    bnxt_media_status);
609 
610 	if_attach(ifp);
611 	ether_ifattach(ifp);
612 
613 	timeout_set(&sc->sc_rx_refill, bnxt_refill, sc);
614 
615 	bnxt_media_autonegotiate(sc);
616 	bnxt_hwrm_port_phy_qcfg(sc, NULL);
617 	return;
618 
619 free_cp_mem:
620 	bnxt_dmamem_free(sc, sc->sc_cp_ring_mem);
621 deintr:
622 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
623 	sc->sc_ih = NULL;
624 free_resp:
625 	bnxt_dmamem_free(sc, sc->sc_cmd_resp);
626 unmap_2:
627 	bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
628 	sc->sc_hwrm_s = 0;
629 unmap_1:
630 	bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
631 	sc->sc_db_s = 0;
632 }
633 
634 void
635 bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
636     int total)
637 {
638 	struct bnxt_slot *bs;
639 
640 	int i = allocated;
641 	while (i-- > 0) {
642 		bs = &slots[i];
643 		bus_dmamap_destroy(sc->sc_dmat, bs->bs_map);
644 	}
645 	free(slots, M_DEVBUF, total * sizeof(*bs));
646 }
647 
648 void
649 bnxt_up(struct bnxt_softc *sc)
650 {
651 	struct ifnet *ifp = &sc->sc_ac.ac_if;
652 	struct bnxt_slot *bs;
653 	int i;
654 
655 	sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
656 	    sizeof(struct ctx_hw_stats));
657 	if (sc->sc_stats_ctx_mem == NULL) {
658 		printf("%s: failed to allocate stats contexts\n", DEVNAME(sc));
659 		return;
660 	}
661 
662 	sc->sc_tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE);
663 	if (sc->sc_tx_ring_mem == NULL) {
664 		printf("%s: failed to allocate tx ring\n", DEVNAME(sc));
665 		goto free_stats;
666 	}
667 
668 	sc->sc_rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
669 	if (sc->sc_rx_ring_mem == NULL) {
670 		printf("%s: failed to allocate rx ring\n", DEVNAME(sc));
671 		goto free_tx;
672 	}
673 
674 	sc->sc_rx_mcast = bnxt_dmamem_alloc(sc, PAGE_SIZE);
675 	if (sc->sc_rx_mcast == NULL) {
676 		printf("%s: failed to allocate multicast address table\n",
677 		    DEVNAME(sc));
678 		goto free_rx;
679 	}
680 
681 	if (bnxt_hwrm_stat_ctx_alloc(sc, &sc->sc_cp_ring,
682 	    BNXT_DMA_DVA(sc->sc_stats_ctx_mem)) != 0) {
683 		printf("%s: failed to set up stats context\n", DEVNAME(sc));
684 		goto free_mc;
685 	}
686 
687 	sc->sc_tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
688 	sc->sc_tx_ring.id = BNXT_TX_RING_ID;
689 	sc->sc_tx_ring.doorbell = sc->sc_tx_ring.id * 0x80;
690 	sc->sc_tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
691 	sc->sc_tx_ring.vaddr = BNXT_DMA_KVA(sc->sc_tx_ring_mem);
692 	sc->sc_tx_ring.paddr = BNXT_DMA_DVA(sc->sc_tx_ring_mem);
693 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
694 	    &sc->sc_tx_ring, sc->sc_cp_ring.ring.phys_id,
695 	    HWRM_NA_SIGNATURE, 1) != 0) {
696 		printf("%s: failed to set up tx ring\n",
697 		    DEVNAME(sc));
698 		goto dealloc_stats;
699 	}
700 	bnxt_write_tx_doorbell(sc, &sc->sc_tx_ring, 0);
701 
702 	sc->sc_rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
703 	sc->sc_rx_ring.id = BNXT_RX_RING_ID;
704 	sc->sc_rx_ring.doorbell = sc->sc_rx_ring.id * 0x80;
705 	sc->sc_rx_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
706 	sc->sc_rx_ring.vaddr = BNXT_DMA_KVA(sc->sc_rx_ring_mem);
707 	sc->sc_rx_ring.paddr = BNXT_DMA_DVA(sc->sc_rx_ring_mem);
708 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
709 	    &sc->sc_rx_ring, sc->sc_cp_ring.ring.phys_id,
710 	    HWRM_NA_SIGNATURE, 1) != 0) {
711 		printf("%s: failed to set up rx ring\n",
712 		    DEVNAME(sc));
713 		goto dealloc_tx;
714 	}
715 	bnxt_write_rx_doorbell(sc, &sc->sc_rx_ring, 0);
716 
717 	sc->sc_rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
718 	sc->sc_rx_ag_ring.id = BNXT_AG_RING_ID;
719 	sc->sc_rx_ag_ring.doorbell = sc->sc_rx_ag_ring.id * 0x80;
720 	sc->sc_rx_ag_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
721 	sc->sc_rx_ag_ring.vaddr = BNXT_DMA_KVA(sc->sc_rx_ring_mem) + PAGE_SIZE;
722 	sc->sc_rx_ag_ring.paddr = BNXT_DMA_DVA(sc->sc_rx_ring_mem) + PAGE_SIZE;
723 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
724 	    &sc->sc_rx_ag_ring, sc->sc_cp_ring.ring.phys_id,
725 	    HWRM_NA_SIGNATURE, 1) != 0) {
726 		printf("%s: failed to set up rx ag ring\n",
727 		    DEVNAME(sc));
728 		goto dealloc_rx;
729 	}
730 	bnxt_write_rx_doorbell(sc, &sc->sc_rx_ag_ring, 0);
731 
732 	sc->sc_ring_group.grp_id = HWRM_NA_SIGNATURE;
733 	sc->sc_ring_group.stats_ctx = sc->sc_cp_ring.stats_ctx_id;
734 	sc->sc_ring_group.rx_ring_id = sc->sc_rx_ring.phys_id;
735 	sc->sc_ring_group.ag_ring_id = sc->sc_rx_ag_ring.phys_id;
736 	sc->sc_ring_group.cp_ring_id = sc->sc_cp_ring.ring.phys_id;
737 	if (bnxt_hwrm_ring_grp_alloc(sc, &sc->sc_ring_group) != 0) {
738 		printf("%s: failed to allocate ring group\n",
739 		    DEVNAME(sc));
740 		goto dealloc_ag;
741 	}
742 
743 	sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
744 	if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
745 		printf("%s: failed to allocate vnic rss context\n",
746 		    DEVNAME(sc));
747 		goto dealloc_ring_group;
748 	}
749 
750 	sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE;
751 	sc->sc_vnic.def_ring_grp = sc->sc_ring_group.grp_id;
752 	sc->sc_vnic.mru = BNXT_MAX_MTU;
753 	sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
754 	sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
755 	sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT;
756 	if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
757 		printf("%s: failed to allocate vnic\n", DEVNAME(sc));
758 		goto dealloc_vnic_ctx;
759 	}
760 
761 	if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
762 		printf("%s: failed to configure vnic\n", DEVNAME(sc));
763 		goto dealloc_vnic;
764 	}
765 
766 	if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
767 		printf("%s: failed to configure vnic placement mode\n",
768 		    DEVNAME(sc));
769 		goto dealloc_vnic;
770 	}
771 
772 	sc->sc_vnic.filter_id = -1;
773 	if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
774 		printf("%s: failed to set vnic filter\n", DEVNAME(sc));
775 		goto dealloc_vnic;
776 	}
777 
778 	/* don't configure rss or tpa yet */
779 
780 	sc->sc_rx_slots = mallocarray(sizeof(*bs), sc->sc_rx_ring.ring_size,
781 	    M_DEVBUF, M_WAITOK | M_ZERO);
782 	if (sc->sc_rx_slots == NULL) {
783 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
784 		goto dealloc_filter;
785 	}
786 
787 	for (i = 0; i < sc->sc_rx_ring.ring_size; i++) {
788 		bs = &sc->sc_rx_slots[i];
789 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
790 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map) != 0) {
791 			printf("%s: failed to allocate rx dma maps\n",
792 			    DEVNAME(sc));
793 			goto destroy_rx_slots;
794 		}
795 	}
796 
797 	sc->sc_rx_ag_slots = mallocarray(sizeof(*bs), sc->sc_rx_ag_ring.ring_size,
798 	    M_DEVBUF, M_WAITOK | M_ZERO);
799 	if (sc->sc_rx_ag_slots == NULL) {
800 		printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc));
801 		goto destroy_rx_slots;
802 	}
803 
804 	for (i = 0; i < sc->sc_rx_ag_ring.ring_size; i++) {
805 		bs = &sc->sc_rx_ag_slots[i];
806 		if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,
807 		    BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
808 		    &bs->bs_map) != 0) {
809 			printf("%s: failed to allocate rx ag dma maps\n",
810 			    DEVNAME(sc));
811 			goto destroy_rx_ag_slots;
812 		}
813 	}
814 
815 	sc->sc_tx_slots = mallocarray(sizeof(*bs), sc->sc_tx_ring.ring_size,
816 	    M_DEVBUF, M_WAITOK | M_ZERO);
817 	if (sc->sc_tx_slots == NULL) {
818 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
819 		goto destroy_rx_ag_slots;
820 	}
821 
822 	for (i = 0; i < sc->sc_tx_ring.ring_size; i++) {
823 		bs = &sc->sc_tx_slots[i];
824 		if (bus_dmamap_create(sc->sc_dmat, BNXT_MAX_MTU, BNXT_MAX_TX_SEGS,
825 		    BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
826 		    &bs->bs_map) != 0) {
827 			printf("%s: failed to allocate tx dma maps\n",
828 			    DEVNAME(sc));
829 			goto destroy_tx_slots;
830 		}
831 	}
832 
833 	bnxt_iff(sc);
834 
835 	/*
836 	 * initially, the rx ring must be filled at least some distance beyond
837 	 * the current consumer index, as it looks like the firmware assumes the
838 	 * ring is full on creation, but doesn't prefetch the whole thing.
839 	 * once the whole ring has been used once, we should be able to back off
840 	 * to 2 or so slots, but we currently don't have a way of doing that.
841 	 */
842 	if_rxr_init(&sc->sc_rxr[0], 32, sc->sc_rx_ring.ring_size - 1);
843 	if_rxr_init(&sc->sc_rxr[1], 32, sc->sc_rx_ag_ring.ring_size - 1);
844 	sc->sc_rx_prod = 0;
845 	sc->sc_rx_cons = 0;
846 	sc->sc_rx_ag_prod = 0;
847 	sc->sc_rx_ag_cons = 0;
848 	bnxt_rx_fill(sc);
849 
850 	SET(ifp->if_flags, IFF_RUNNING);
851 
852 	sc->sc_tx_cons = 0;
853 	sc->sc_tx_prod = 0;
854 	sc->sc_tx_ring_cons = 0;
855 	sc->sc_tx_ring_prod = 0;
856 	ifq_clr_oactive(&ifp->if_snd);
857 	ifq_restart(&ifp->if_snd);
858 
859 	return;
860 
861 destroy_tx_slots:
862 	bnxt_free_slots(sc, sc->sc_tx_slots, i, sc->sc_tx_ring.ring_size);
863 	sc->sc_tx_slots = NULL;
864 
865 	i = sc->sc_rx_ag_ring.ring_size;
866 destroy_rx_ag_slots:
867 	bnxt_free_slots(sc, sc->sc_rx_ag_slots, i, sc->sc_rx_ag_ring.ring_size);
868 	sc->sc_rx_ag_slots = NULL;
869 
870 	i = sc->sc_rx_ring.ring_size;
871 destroy_rx_slots:
872 	bnxt_free_slots(sc, sc->sc_rx_slots, i, sc->sc_rx_ring.ring_size);
873 	sc->sc_rx_slots = NULL;
874 dealloc_filter:
875 	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
876 dealloc_vnic:
877 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
878 dealloc_vnic_ctx:
879 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
880 dealloc_ring_group:
881 	bnxt_hwrm_ring_grp_free(sc, &sc->sc_ring_group);
882 dealloc_ag:
883 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
884 	    &sc->sc_rx_ag_ring);
885 dealloc_tx:
886 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
887 	    &sc->sc_tx_ring);
888 dealloc_rx:
889 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
890 	    &sc->sc_rx_ring);
891 dealloc_stats:
892 	bnxt_hwrm_stat_ctx_free(sc, &sc->sc_cp_ring);
893 free_mc:
894 	bnxt_dmamem_free(sc, sc->sc_rx_mcast);
895 	sc->sc_rx_mcast = NULL;
896 free_rx:
897 	bnxt_dmamem_free(sc, sc->sc_rx_ring_mem);
898 	sc->sc_rx_ring_mem = NULL;
899 free_tx:
900 	bnxt_dmamem_free(sc, sc->sc_tx_ring_mem);
901 	sc->sc_tx_ring_mem = NULL;
902 free_stats:
903 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
904 	sc->sc_stats_ctx_mem = NULL;
905 }
906 
907 void
908 bnxt_down(struct bnxt_softc *sc)
909 {
910 	struct ifnet *ifp = &sc->sc_ac.ac_if;
911 
912 	CLR(ifp->if_flags, IFF_RUNNING);
913 
914 	ifq_clr_oactive(&ifp->if_snd);
915 	ifq_barrier(&ifp->if_snd);
916 
917 	timeout_del(&sc->sc_rx_refill);
918 
919 	/* empty rx ring first i guess */
920 
921 	bnxt_free_slots(sc, sc->sc_tx_slots, sc->sc_tx_ring.ring_size,
922 	    sc->sc_tx_ring.ring_size);
923 	sc->sc_tx_slots = NULL;
924 
925 	bnxt_free_slots(sc, sc->sc_rx_ag_slots, sc->sc_rx_ag_ring.ring_size,
926 	    sc->sc_rx_ag_ring.ring_size);
927 	sc->sc_rx_ag_slots = NULL;
928 
929 	bnxt_free_slots(sc, sc->sc_rx_slots, sc->sc_rx_ring.ring_size,
930 	    sc->sc_rx_ring.ring_size);
931 	sc->sc_rx_slots = NULL;
932 
933 	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
934 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
935 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
936 	bnxt_hwrm_ring_grp_free(sc, &sc->sc_ring_group);
937 	bnxt_hwrm_stat_ctx_free(sc, &sc->sc_cp_ring);
938 
939 	/* may need to wait for 500ms here before we can free the rings */
940 
941 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
942 	    &sc->sc_tx_ring);
943 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
944 	    &sc->sc_rx_ag_ring);
945 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
946 	    &sc->sc_rx_ring);
947 
948 	bnxt_dmamem_free(sc, sc->sc_rx_mcast);
949 	sc->sc_rx_mcast = NULL;
950 
951 	bnxt_dmamem_free(sc, sc->sc_rx_ring_mem);
952 	sc->sc_rx_ring_mem = NULL;
953 
954 	bnxt_dmamem_free(sc, sc->sc_tx_ring_mem);
955 	sc->sc_tx_ring_mem = NULL;
956 
957 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
958 	sc->sc_stats_ctx_mem = NULL;
959 }
960 
961 void
962 bnxt_iff(struct bnxt_softc *sc)
963 {
964 	struct ifnet *ifp = &sc->sc_ac.ac_if;
965 	struct ether_multi *enm;
966 	struct ether_multistep step;
967 	char *mc_list;
968 	uint32_t rx_mask, mc_count;
969 
970 	rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
971 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
972 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
973 
974 	mc_list = BNXT_DMA_KVA(sc->sc_rx_mcast);
975 	mc_count = 0;
976 
977 	if (ifp->if_flags & IFF_PROMISC) {
978 		SET(ifp->if_flags, IFF_ALLMULTI);
979 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
980 	} else if ((sc->sc_ac.ac_multirangecnt > 0) ||
981 	    (sc->sc_ac.ac_multicnt > (PAGE_SIZE / ETHER_ADDR_LEN))) {
982 		SET(ifp->if_flags, IFF_ALLMULTI);
983 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
984 	} else {
985 		CLR(ifp->if_flags, IFF_ALLMULTI);
986 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
987 		while (enm != NULL) {
988 			memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN);
989 			mc_list += ETHER_ADDR_LEN;
990 			mc_count++;
991 
992 			ETHER_NEXT_MULTI(step, enm);
993 		}
994 	}
995 
996 	bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
997 	    BNXT_DMA_DVA(sc->sc_rx_mcast), mc_count);
998 }
999 
1000 int
1001 bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1002 {
1003 	struct bnxt_softc 	*sc = (struct bnxt_softc *)ifp->if_softc;
1004 	struct ifreq		*ifr = (struct ifreq *)data;
1005 	int			s, error = 0;
1006 
1007 	s = splnet();
1008 	switch (cmd) {
1009 	case SIOCSIFADDR:
1010 		ifp->if_flags |= IFF_UP;
1011 		/* FALLTHROUGH */
1012 
1013 	case SIOCSIFFLAGS:
1014 		if (ISSET(ifp->if_flags, IFF_UP)) {
1015 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1016 				error = ENETRESET;
1017 			else
1018 				bnxt_up(sc);
1019 		} else {
1020 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1021 				bnxt_down(sc);
1022 		}
1023 		break;
1024 
1025 	case SIOCGIFMEDIA:
1026 	case SIOCSIFMEDIA:
1027 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1028 		break;
1029 
1030 	case SIOCGIFRXR:
1031 		error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1032 		break;
1033 
1034 	case SIOCGIFSFFPAGE:
1035 		error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1036 		break;
1037 
1038 	default:
1039 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1040 	}
1041 
1042 	if (error == ENETRESET) {
1043 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1044 		    (IFF_UP | IFF_RUNNING))
1045 			bnxt_iff(sc);
1046 		error = 0;
1047 	}
1048 
1049 	splx(s);
1050 
1051 	return (error);
1052 }
1053 
1054 int
1055 bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1056 {
1057 	struct if_rxring_info ifr[2];
1058 
1059 	memset(&ifr, 0, sizeof(ifr));
1060 	ifr[0].ifr_size = MCLBYTES;
1061 	ifr[0].ifr_info = sc->sc_rxr[0];
1062 
1063 	ifr[1].ifr_size = BNXT_AG_BUFFER_SIZE;
1064 	ifr[1].ifr_info = sc->sc_rxr[1];
1065 
1066 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1067 }
1068 
1069 int
1070 bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1071 {
1072 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1073 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1074 	case 0:
1075 		break;
1076 
1077 	case EFBIG:
1078 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1079 		    bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1080 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1081 			break;
1082 
1083 	default:
1084 		return (1);
1085 	}
1086 
1087 	bs->bs_m = m;
1088 	return (0);
1089 }
1090 
1091 void
1092 bnxt_start(struct ifqueue *ifq)
1093 {
1094 	struct ifnet *ifp = ifq->ifq_if;
1095 	struct tx_bd_short *txring;
1096 	struct bnxt_softc *sc = ifp->if_softc;
1097 	struct bnxt_slot *bs;
1098 	bus_dmamap_t map;
1099 	struct mbuf *m;
1100 	u_int idx, free, used, laststart;
1101 	uint16_t txflags;
1102 	int i;
1103 
1104 	txring = (struct tx_bd_short *)BNXT_DMA_KVA(sc->sc_tx_ring_mem);
1105 
1106 	idx = sc->sc_tx_ring_prod;
1107 	free = sc->sc_tx_ring_cons;
1108 	if (free <= idx)
1109 		free += sc->sc_tx_ring.ring_size;
1110 	free -= idx;
1111 
1112 	used = 0;
1113 
1114 	for (;;) {
1115 		if (used + BNXT_MAX_TX_SEGS > free) {
1116 			ifq_set_oactive(ifq);
1117 			break;
1118 		}
1119 
1120 		m = ifq_dequeue(ifq);
1121 		if (m == NULL)
1122 			break;
1123 
1124 		bs = &sc->sc_tx_slots[sc->sc_tx_prod];
1125 		if (bnxt_load_mbuf(sc, bs, m) != 0) {
1126 			m_freem(m);
1127 			ifp->if_oerrors++;
1128 			continue;
1129 		}
1130 
1131 #if NBPFILTER > 0
1132 		if (ifp->if_bpf)
1133 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1134 #endif
1135 		map = bs->bs_map;
1136 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1137 		    BUS_DMASYNC_PREWRITE);
1138 		used += map->dm_nsegs;
1139 
1140 		if (map->dm_mapsize < 512)
1141 			txflags = TX_BD_SHORT_FLAGS_LHINT_LT512;
1142 		else if (map->dm_mapsize < 1024)
1143 			txflags = TX_BD_SHORT_FLAGS_LHINT_LT1K;
1144 		else if (map->dm_mapsize < 2048)
1145 			txflags = TX_BD_SHORT_FLAGS_LHINT_LT2K;
1146 		else
1147 			txflags = TX_BD_SHORT_FLAGS_LHINT_GTE2K;
1148 
1149 		txflags |= TX_BD_SHORT_TYPE_TX_BD_SHORT |
1150 		    TX_BD_SHORT_FLAGS_NO_CMPL |
1151 		    (map->dm_nsegs << TX_BD_SHORT_FLAGS_BD_CNT_SFT);
1152 		laststart = idx;
1153 
1154 		for (i = 0; i < map->dm_nsegs; i++) {
1155 			txring[idx].flags_type = htole16(txflags);
1156 			if (i == map->dm_nsegs - 1)
1157 				txring[idx].flags_type |=
1158 				    TX_BD_SHORT_FLAGS_PACKET_END;
1159 			txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT;
1160 
1161 			txring[idx].len =
1162 			    htole16(bs->bs_map->dm_segs[i].ds_len);
1163 			txring[idx].opaque = sc->sc_tx_prod;
1164 			txring[idx].addr =
1165 			    htole64(bs->bs_map->dm_segs[i].ds_addr);
1166 
1167 			idx++;
1168 			if (idx == sc->sc_tx_ring.ring_size)
1169 				idx = 0;
1170 		}
1171 
1172 		if (++sc->sc_tx_prod >= sc->sc_tx_ring.ring_size)
1173 			sc->sc_tx_prod = 0;
1174 	}
1175 
1176 	/* unset NO_CMPL on the first bd of the last packet */
1177 	if (used != 0) {
1178 		txring[laststart].flags_type &=
1179 		    ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL);
1180 	}
1181 
1182 	bnxt_write_tx_doorbell(sc, &sc->sc_tx_ring, idx);
1183 	sc->sc_tx_ring_prod = idx;
1184 }
1185 
1186 void
1187 bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1188 {
1189 	struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1190 	uint16_t type = le16toh(ae->event_id);
1191 
1192 	switch (type) {
1193 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1194 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1195 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1196 		bnxt_hwrm_port_phy_qcfg(sc, NULL);
1197 		break;
1198 
1199 	default:
1200 		printf("%s: unexpected async event %x\n", DEVNAME(sc), type);
1201 		break;
1202 	}
1203 }
1204 
1205 struct cmpl_base *
1206 bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1207 {
1208 	struct cmpl_base *cmpl;
1209 	uint32_t cons;
1210 	int v_bit;
1211 
1212 	cons = cpr->cons + 1;
1213 	v_bit = cpr->v_bit;
1214 	if (cons == cpr->ring.ring_size) {
1215 		cons = 0;
1216 		v_bit = !v_bit;
1217 	}
1218 	cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1219 
1220 	if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V))) != (!!v_bit))
1221 		return (NULL);
1222 
1223 	cpr->cons = cons;
1224 	cpr->v_bit = v_bit;
1225 	return (cmpl);
1226 }
1227 
1228 void
1229 bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1230 {
1231 	cpr->commit_cons = cpr->cons;
1232 	cpr->commit_v_bit = cpr->v_bit;
1233 }
1234 
1235 void
1236 bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1237 {
1238 	cpr->cons = cpr->commit_cons;
1239 	cpr->v_bit = cpr->commit_v_bit;
1240 }
1241 
1242 
1243 int
1244 bnxt_intr(void *xsc)
1245 {
1246 	struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1247 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1248 	struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1249 	struct cmpl_base *cmpl;
1250 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1251 	uint16_t type;
1252 	int rxfree, txfree, agfree, rv, rollback;
1253 
1254 	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1255 	rxfree = 0;
1256 	txfree = 0;
1257 	agfree = 0;
1258 	rv = -1;
1259 	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1260 	while (cmpl != NULL) {
1261 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1262 		rollback = 0;
1263 		switch (type) {
1264 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1265 			bnxt_handle_async_event(sc, cmpl);
1266 			break;
1267 		case CMPL_BASE_TYPE_RX_L2:
1268 			rollback = bnxt_rx(sc, cpr, &ml, &rxfree, &agfree, cmpl);
1269 			break;
1270 		case CMPL_BASE_TYPE_TX_L2:
1271 			bnxt_txeof(sc, &txfree, cmpl);
1272 			break;
1273 		default:
1274 			printf("%s: unexpected completion type %u\n",
1275 			    DEVNAME(sc), type);
1276 		}
1277 
1278 		if (rollback) {
1279 			bnxt_cpr_rollback(sc, cpr);
1280 			break;
1281 		}
1282 		rv = 1;
1283 		bnxt_cpr_commit(sc, cpr);
1284 		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1285 	}
1286 
1287 	/*
1288 	 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1289 	 * but writing cpr->cons + 1 makes it stop interrupting.
1290 	 */
1291 	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1292 	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1293 
1294 	if (rxfree != 0) {
1295 		sc->sc_rx_cons += rxfree;
1296 		if (sc->sc_rx_cons >= sc->sc_rx_ring.ring_size)
1297 			sc->sc_rx_cons -= sc->sc_rx_ring.ring_size;
1298 
1299 		sc->sc_rx_ag_cons += agfree;
1300 		if (sc->sc_rx_ag_cons >= sc->sc_rx_ag_ring.ring_size)
1301 			sc->sc_rx_ag_cons -= sc->sc_rx_ag_ring.ring_size;
1302 
1303 		if_rxr_put(&sc->sc_rxr[0], rxfree);
1304 		if_rxr_put(&sc->sc_rxr[1], agfree);
1305 
1306 		bnxt_rx_fill(sc);
1307 		if ((sc->sc_rx_cons == sc->sc_rx_prod) ||
1308 		    (sc->sc_rx_ag_cons == sc->sc_rx_ag_prod))
1309 			timeout_add(&sc->sc_rx_refill, 0);
1310 
1311 		if_input(&sc->sc_ac.ac_if, &ml);
1312 	}
1313 	if (txfree != 0) {
1314 		if (ifq_is_oactive(&ifp->if_snd))
1315 			ifq_restart(&ifp->if_snd);
1316 	}
1317 	return (rv);
1318 }
1319 
1320 void
1321 bnxt_watchdog(struct ifnet *ifp)
1322 {
1323 }
1324 
1325 void
1326 bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1327 {
1328 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1329 	bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1330 }
1331 
1332 uint64_t
1333 bnxt_get_media_type(uint64_t speed, int phy_type)
1334 {
1335 	switch (phy_type) {
1336 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
1337 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
1338 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
1339 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
1340 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
1341 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
1342 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
1343 		switch (speed) {
1344 		case IF_Gbps(1):
1345 			return IFM_1000_T;
1346 		case IF_Gbps(10):
1347 			return IFM_10G_SFP_CU;
1348 		case IF_Gbps(25):
1349 			return IFM_25G_CR;
1350 		case IF_Gbps(40):
1351 			return IFM_40G_CR4;
1352 		case IF_Gbps(50):
1353 			return IFM_50G_CR2;
1354 		case IF_Gbps(100):
1355 			return IFM_100G_CR4;
1356 		}
1357 		break;
1358 
1359 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
1360 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
1361 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
1362 		switch (speed) {
1363 		case IF_Gbps(1):
1364 			return IFM_1000_LX;
1365 		case IF_Gbps(10):
1366 			return IFM_10G_LR;
1367 		case IF_Gbps(25):
1368 			return IFM_25G_LR;
1369 		case IF_Gbps(40):
1370 			return IFM_40G_LR4;
1371 		case IF_Gbps(100):
1372 			return IFM_100G_LR4;
1373 		}
1374 		break;
1375 
1376 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
1377 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
1378 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
1379 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
1380 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
1381 		switch (speed) {
1382 		case IF_Gbps(1):
1383 			return IFM_1000_SX;
1384 		case IF_Gbps(10):
1385 			return IFM_10G_SR;
1386 		case IF_Gbps(25):
1387 			return IFM_25G_SR;
1388 		case IF_Gbps(40):
1389 			return IFM_40G_SR4;
1390 		case IF_Gbps(100):
1391 			return IFM_100G_SR4;
1392 		}
1393 		break;
1394 
1395 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
1396 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
1397 		switch (speed) {
1398 		case IF_Gbps(10):
1399 			return IFM_10G_ER;
1400 		case IF_Gbps(25):
1401 			return IFM_25G_ER;
1402 		}
1403 		/* missing IFM_40G_ER4, IFM_100G_ER4 */
1404 		break;
1405 
1406 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
1407 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
1408 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
1409 		switch (speed) {
1410 		case IF_Gbps(10):
1411 			return IFM_10G_KR;
1412 		case IF_Gbps(20):
1413 			return IFM_20G_KR2;
1414 		case IF_Gbps(25):
1415 			return IFM_25G_KR;
1416 		case IF_Gbps(40):
1417 			return IFM_40G_KR4;
1418 		case IF_Gbps(50):
1419 			return IFM_50G_KR2;
1420 		case IF_Gbps(100):
1421 			return IFM_100G_KR4;
1422 		}
1423 		break;
1424 
1425 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
1426 		switch (speed) {
1427 		case IF_Gbps(1):
1428 			return IFM_1000_KX;
1429 		case IF_Mbps(2500):
1430 			return IFM_2500_KX;
1431 		case IF_Gbps(10):
1432 			return IFM_10G_KX4;
1433 		}
1434 		break;
1435 
1436 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
1437 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
1438 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
1439 		switch (speed) {
1440 		case IF_Mbps(10):
1441 			return IFM_10_T;
1442 		case IF_Mbps(100):
1443 			return IFM_100_TX;
1444 		case IF_Gbps(1):
1445 			return IFM_1000_T;
1446 		case IF_Mbps(2500):
1447 			return IFM_2500_T;
1448 		case IF_Gbps(10):
1449 			return IFM_10G_T;
1450 		}
1451 		break;
1452 
1453 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
1454 		switch (speed) {
1455 		case IF_Gbps(1):
1456 			return IFM_1000_SGMII;
1457 		}
1458 		break;
1459 
1460 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
1461 		switch (speed) {
1462 		case IF_Gbps(10):
1463 			return IFM_10G_AOC;
1464 		case IF_Gbps(25):
1465 			return IFM_25G_AOC;
1466 		case IF_Gbps(40):
1467 			return IFM_40G_AOC;
1468 		case IF_Gbps(100):
1469 			return IFM_100G_AOC;
1470 		}
1471 		break;
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 void
1478 bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1479 {
1480 	int speed_bit = 0;
1481 	switch (speed) {
1482 	case IF_Gbps(1):
1483 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB;
1484 		break;
1485 	case IF_Gbps(2):
1486 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB;
1487 		break;
1488 	case IF_Mbps(2500):
1489 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB;
1490 		break;
1491 	case IF_Gbps(10):
1492 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB;
1493 		break;
1494 	case IF_Gbps(20):
1495 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB;
1496 		break;
1497 	case IF_Gbps(25):
1498 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB;
1499 		break;
1500 	case IF_Gbps(40):
1501 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB;
1502 		break;
1503 	case IF_Gbps(50):
1504 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB;
1505 		break;
1506 	case IF_Gbps(100):
1507 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB;
1508 		break;
1509 	}
1510 	if (supported_speeds & speed_bit)
1511 		ifmedia_add(&sc->sc_media, IFM_ETHER | ifmt, 0, NULL);
1512 }
1513 
1514 int
1515 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1516 {
1517 	struct ifnet *ifp = &softc->sc_ac.ac_if;
1518 	struct hwrm_port_phy_qcfg_input req = {0};
1519 	struct hwrm_port_phy_qcfg_output *resp =
1520 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
1521 	int link_state = LINK_STATE_DOWN;
1522 	uint64_t speeds[] = {
1523 		IF_Gbps(1), IF_Gbps(2), IF_Mbps(2500), IF_Gbps(10), IF_Gbps(20),
1524 		IF_Gbps(25), IF_Gbps(40), IF_Gbps(50), IF_Gbps(100)
1525 	};
1526 	uint64_t media_type;
1527 	int duplex;
1528 	int rc = 0;
1529 	int i;
1530 
1531 	BNXT_HWRM_LOCK(softc);
1532 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1533 
1534 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1535 	if (rc) {
1536 		printf("%s: failed to query port phy config\n", DEVNAME(softc));
1537 		goto exit;
1538 	}
1539 
1540 	if (softc->sc_hwrm_ver > 0x10800)
1541 		duplex = resp->duplex_state;
1542 	else
1543 		duplex = resp->duplex_cfg;
1544 
1545 	if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
1546 		if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1547 			link_state = LINK_STATE_HALF_DUPLEX;
1548 		else
1549 			link_state = LINK_STATE_FULL_DUPLEX;
1550 
1551 		switch (resp->link_speed) {
1552 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
1553 			ifp->if_baudrate = IF_Mbps(10);
1554 			break;
1555 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1556 			ifp->if_baudrate = IF_Mbps(100);
1557 			break;
1558 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1559 			ifp->if_baudrate = IF_Gbps(1);
1560 			break;
1561 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1562 			ifp->if_baudrate = IF_Gbps(2);
1563 			break;
1564 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1565 			ifp->if_baudrate = IF_Mbps(2500);
1566 			break;
1567 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1568 			ifp->if_baudrate = IF_Gbps(10);
1569 			break;
1570 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1571 			ifp->if_baudrate = IF_Gbps(20);
1572 			break;
1573 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1574 			ifp->if_baudrate = IF_Gbps(25);
1575 			break;
1576 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1577 			ifp->if_baudrate = IF_Gbps(40);
1578 			break;
1579 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1580 			ifp->if_baudrate = IF_Gbps(50);
1581 			break;
1582 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
1583 			ifp->if_baudrate = IF_Gbps(100);
1584 			break;
1585 		}
1586 	}
1587 
1588 	ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY);
1589 	for (i = 0; i < nitems(speeds); i++) {
1590 		media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1591 		if (media_type != 0)
1592 			bnxt_add_media_type(softc, resp->support_speeds,
1593 			    speeds[i], media_type);
1594 	}
1595 	ifmedia_add(&softc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1596 	ifmedia_set(&softc->sc_media, IFM_ETHER|IFM_AUTO);
1597 
1598 	if (ifmr != NULL) {
1599 		ifmr->ifm_status = IFM_AVALID;
1600 		if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1601 			ifmr->ifm_status |= IFM_ACTIVE;
1602 			ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
1603 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
1604 				ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1605 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
1606 				ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1607 			if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1608 				ifmr->ifm_active |= IFM_HDX;
1609 			else
1610 				ifmr->ifm_active |= IFM_FDX;
1611 
1612 			media_type = bnxt_get_media_type(ifp->if_baudrate, resp->phy_type);
1613 			if (media_type != 0)
1614 				ifmr->ifm_active |= media_type;
1615 		}
1616 	}
1617 
1618 exit:
1619 	BNXT_HWRM_UNLOCK(softc);
1620 
1621 	if (rc == 0 && (link_state != ifp->if_link_state)) {
1622 		ifp->if_link_state = link_state;
1623 		if_link_state_change(ifp);
1624 	}
1625 
1626 	return rc;
1627 }
1628 
1629 int
1630 bnxt_media_change(struct ifnet *ifp)
1631 {
1632 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1633 	struct hwrm_port_phy_cfg_input req = {0};
1634 	uint64_t link_speed;
1635 
1636 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1637 		return EINVAL;
1638 
1639 	if (sc->sc_flags & BNXT_FLAG_NPAR)
1640 		return ENODEV;
1641 
1642 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1643 
1644 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
1645 	case IFM_100G_CR4:
1646 	case IFM_100G_SR4:
1647 	case IFM_100G_KR4:
1648 	case IFM_100G_LR4:
1649 	case IFM_100G_AOC:
1650 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB;
1651 		break;
1652 
1653 	case IFM_50G_CR2:
1654 	case IFM_50G_KR2:
1655 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB;
1656 		break;
1657 
1658 	case IFM_40G_CR4:
1659 	case IFM_40G_SR4:
1660 	case IFM_40G_LR4:
1661 	case IFM_40G_KR4:
1662 	case IFM_40G_AOC:
1663 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB;
1664 		break;
1665 
1666 	case IFM_25G_CR:
1667 	case IFM_25G_KR:
1668 	case IFM_25G_SR:
1669 	case IFM_25G_LR:
1670 	case IFM_25G_ER:
1671 	case IFM_25G_AOC:
1672 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB;
1673 		break;
1674 
1675 	case IFM_10G_LR:
1676 	case IFM_10G_SR:
1677 	case IFM_10G_CX4:
1678 	case IFM_10G_T:
1679 	case IFM_10G_SFP_CU:
1680 	case IFM_10G_LRM:
1681 	case IFM_10G_KX4:
1682 	case IFM_10G_KR:
1683 	case IFM_10G_CR1:
1684 	case IFM_10G_ER:
1685 	case IFM_10G_AOC:
1686 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB;
1687 		break;
1688 
1689 	case IFM_2500_SX:
1690 	case IFM_2500_KX:
1691 	case IFM_2500_T:
1692 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB;
1693 		break;
1694 
1695 	case IFM_1000_T:
1696 	case IFM_1000_LX:
1697 	case IFM_1000_SX:
1698 	case IFM_1000_CX:
1699 	case IFM_1000_KX:
1700 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB;
1701 		break;
1702 
1703 	case IFM_100_TX:
1704 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB;
1705 		break;
1706 
1707 	default:
1708 		link_speed = 0;
1709 	}
1710 
1711 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
1712 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1713 	if (link_speed == 0) {
1714 		req.auto_mode |=
1715 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1716 		req.flags |=
1717 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1718 		req.enables |=
1719 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
1720 	} else {
1721 		req.force_link_speed = htole16(link_speed);
1722 		req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
1723 	}
1724 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1725 
1726 	return hwrm_send_message(sc, &req, sizeof(req));
1727 }
1728 
1729 int
1730 bnxt_media_autonegotiate(struct bnxt_softc *sc)
1731 {
1732 	struct hwrm_port_phy_cfg_input req = {0};
1733 
1734 	if (sc->sc_flags & BNXT_FLAG_NPAR)
1735 		return ENODEV;
1736 
1737 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1738 	req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1739 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1740 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |
1741 	    HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
1742 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
1743 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
1744 
1745 	return hwrm_send_message(sc, &req, sizeof(req));
1746 }
1747 
1748 
1749 void
1750 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
1751 {
1752 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
1753 	int i;
1754 
1755 	for (i = 0; i < cpr->ring.ring_size; i++)
1756 		cmp[i].info3_v = !cpr->v_bit;
1757 }
1758 
1759 void
1760 bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
1761     int enable)
1762 {
1763 	uint32_t val = CMPL_DOORBELL_KEY_CMPL;
1764 	if (enable == 0)
1765 		val |= CMPL_DOORBELL_MASK;
1766 
1767 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1768 	    BUS_SPACE_BARRIER_WRITE);
1769 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
1770 	    BUS_SPACE_BARRIER_WRITE);
1771 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1772 	    htole32(val));
1773 }
1774 
1775 void
1776 bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
1777     uint32_t index, int enable)
1778 {
1779 	uint32_t val = CMPL_DOORBELL_KEY_CMPL | CMPL_DOORBELL_IDX_VALID |
1780 	    (index & CMPL_DOORBELL_IDX_MASK);
1781 	if (enable == 0)
1782 		val |= CMPL_DOORBELL_MASK;
1783 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1784 	    BUS_SPACE_BARRIER_WRITE);
1785 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1786 	    htole32(val));
1787 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
1788 	    BUS_SPACE_BARRIER_WRITE);
1789 }
1790 
1791 void
1792 bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
1793 {
1794 	uint32_t val = RX_DOORBELL_KEY_RX | index;
1795 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1796 	    BUS_SPACE_BARRIER_WRITE);
1797 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1798 	    htole32(val));
1799 
1800 	/* second write isn't necessary on all hardware */
1801 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1802 	    BUS_SPACE_BARRIER_WRITE);
1803 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1804 	    htole32(val));
1805 }
1806 
1807 void
1808 bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
1809 {
1810 	uint32_t val = TX_DOORBELL_KEY_TX | index;
1811 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1812 	    BUS_SPACE_BARRIER_WRITE);
1813 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1814 	    htole32(val));
1815 
1816 	/* second write isn't necessary on all hardware */
1817 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
1818 	    BUS_SPACE_BARRIER_WRITE);
1819 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
1820 	    htole32(val));
1821 }
1822 
1823 u_int
1824 bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
1825     struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
1826     u_int nslots)
1827 {
1828 	struct rx_prod_pkt_bd *rxring;
1829 	struct bnxt_slot *bs;
1830 	struct mbuf *m;
1831 	uint p, fills;
1832 
1833 	rxring = (struct rx_prod_pkt_bd *)ring_mem;
1834 	p = *prod;
1835 	for (fills = 0; fills < nslots; fills++) {
1836 		bs = &slots[p];
1837 		m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize);
1838 		if (m == NULL)
1839 			break;
1840 
1841 		m->m_len = m->m_pkthdr.len = bufsize;
1842 		if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1843 		    BUS_DMA_NOWAIT) != 0) {
1844 			m_freem(m);
1845 			break;
1846 		}
1847 		bs->bs_m = m;
1848 
1849 		rxring[p].flags_type = htole16(bdtype);
1850 		rxring[p].len = htole16(bufsize);
1851 		rxring[p].opaque = p;
1852 		rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr);
1853 
1854 		if (++p >= ring->ring_size)
1855 			p = 0;
1856 	}
1857 
1858 	if (fills != 0)
1859 		bnxt_write_rx_doorbell(sc, ring, p);
1860 	*prod = p;
1861 
1862 	return (nslots - fills);
1863 }
1864 
1865 int
1866 bnxt_rx_fill(struct bnxt_softc *sc)
1867 {
1868 	u_int slots;
1869 	int rv = 0;
1870 
1871 	slots = if_rxr_get(&sc->sc_rxr[0], sc->sc_rx_ring.ring_size);
1872 	if (slots > 0) {
1873 		slots = bnxt_rx_fill_slots(sc, &sc->sc_rx_ring,
1874 		    BNXT_DMA_KVA(sc->sc_rx_ring_mem), sc->sc_rx_slots,
1875 		    &sc->sc_rx_prod, MCLBYTES,
1876 		    RX_PROD_PKT_BD_TYPE_RX_PROD_PKT, slots);
1877 		if_rxr_put(&sc->sc_rxr[0], slots);
1878 	} else
1879 		rv = 1;
1880 
1881 	slots = if_rxr_get(&sc->sc_rxr[1],  sc->sc_rx_ag_ring.ring_size);
1882 	if (slots > 0) {
1883 		slots = bnxt_rx_fill_slots(sc, &sc->sc_rx_ag_ring,
1884 		    BNXT_DMA_KVA(sc->sc_rx_ring_mem) + PAGE_SIZE,
1885 		    sc->sc_rx_ag_slots, &sc->sc_rx_ag_prod,
1886 		    BNXT_AG_BUFFER_SIZE,
1887 		    RX_PROD_AGG_BD_TYPE_RX_PROD_AGG, slots);
1888 		if_rxr_put(&sc->sc_rxr[1], slots);
1889 	} else
1890 		rv = 1;
1891 
1892 	return (rv);
1893 }
1894 
1895 void
1896 bnxt_refill(void *xsc)
1897 {
1898 	struct bnxt_softc *sc = xsc;
1899 
1900 	bnxt_rx_fill(sc);
1901 
1902 	if (sc->sc_rx_cons == sc->sc_rx_prod)
1903 		timeout_add(&sc->sc_rx_refill, 1);
1904 }
1905 
1906 int
1907 bnxt_rx(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr, struct mbuf_list *ml,
1908     int *slots, int *agslots, struct cmpl_base *cmpl)
1909 {
1910 	struct mbuf *m, *am;
1911 	struct bnxt_slot *bs;
1912 	struct rx_pkt_cmpl *rx = (struct rx_pkt_cmpl *)cmpl;
1913 	struct rx_pkt_cmpl_hi *rxhi;
1914 	struct rx_abuf_cmpl *ag;
1915 
1916 	/* second part of the rx completion */
1917 	rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
1918 	if (rxhi == NULL) {
1919 		return (1);
1920 	}
1921 
1922 	/* packets over 2k in size use an aggregation buffer completion too */
1923 	ag = NULL;
1924 	if ((rx->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT) != 0) {
1925 		ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
1926 		if (ag == NULL) {
1927 			return (1);
1928 		}
1929 	}
1930 
1931 	bs = &sc->sc_rx_slots[rx->opaque];
1932 	bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,
1933 	    BUS_DMASYNC_POSTREAD);
1934 	bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
1935 
1936 	m = bs->bs_m;
1937 	bs->bs_m = NULL;
1938 	m->m_pkthdr.len = m->m_len = letoh16(rx->len);
1939 	(*slots)++;
1940 
1941 	if (ag != NULL) {
1942 		bs = &sc->sc_rx_ag_slots[ag->opaque];
1943 		bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
1944 		    bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1945 		bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
1946 
1947 		am = bs->bs_m;
1948 		bs->bs_m = NULL;
1949 		am->m_len = letoh16(ag->len);
1950 		m->m_next = am;
1951 		m->m_pkthdr.len += am->m_len;
1952 		(*agslots)++;
1953 	}
1954 
1955 	ml_enqueue(ml, m);
1956 	return (0);
1957 }
1958 
1959 void
1960 bnxt_txeof(struct bnxt_softc *sc, int *txfree, struct cmpl_base *cmpl)
1961 {
1962 	struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
1963 	struct bnxt_slot *bs;
1964 	bus_dmamap_t map;
1965 	u_int idx, segs, last;
1966 
1967 	idx = sc->sc_tx_ring_cons;
1968 	last = sc->sc_tx_cons;
1969 	do {
1970 		bs = &sc->sc_tx_slots[sc->sc_tx_cons];
1971 		map = bs->bs_map;
1972 
1973 		segs = map->dm_nsegs;
1974 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1975 		    BUS_DMASYNC_POSTWRITE);
1976 		bus_dmamap_unload(sc->sc_dmat, map);
1977 		m_freem(bs->bs_m);
1978 		bs->bs_m = NULL;
1979 
1980 		idx += segs;
1981 		(*txfree) += segs;
1982 		if (idx >= sc->sc_tx_ring.ring_size)
1983 			idx -= sc->sc_tx_ring.ring_size;
1984 
1985 		last = sc->sc_tx_cons;
1986 		if (++sc->sc_tx_cons >= sc->sc_tx_ring.ring_size)
1987 			sc->sc_tx_cons = 0;
1988 
1989 	} while (last != txcmpl->opaque);
1990 	sc->sc_tx_ring_cons = idx;
1991 }
1992 
1993 /* bnxt_hwrm.c */
1994 
1995 int
1996 bnxt_hwrm_err_map(uint16_t err)
1997 {
1998 	int rc;
1999 
2000 	switch (err) {
2001 	case HWRM_ERR_CODE_SUCCESS:
2002 		return 0;
2003 	case HWRM_ERR_CODE_INVALID_PARAMS:
2004 	case HWRM_ERR_CODE_INVALID_FLAGS:
2005 	case HWRM_ERR_CODE_INVALID_ENABLES:
2006 		return EINVAL;
2007 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
2008 		return EACCES;
2009 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
2010 		return ENOMEM;
2011 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
2012 		return ENOSYS;
2013 	case HWRM_ERR_CODE_FAIL:
2014 		return EIO;
2015 	case HWRM_ERR_CODE_HWRM_ERROR:
2016 	case HWRM_ERR_CODE_UNKNOWN_ERR:
2017 	default:
2018 		return EIO;
2019 	}
2020 
2021 	return rc;
2022 }
2023 
2024 void
2025 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2026     uint16_t req_type)
2027 {
2028 	struct input *req = request;
2029 
2030 	req->req_type = htole16(req_type);
2031 	req->cmpl_ring = 0xffff;
2032 	req->target_id = 0xffff;
2033 	req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2034 }
2035 
2036 int
2037 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2038 {
2039 	struct input *req = msg;
2040 	struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2041 	uint32_t *data = msg;
2042 	int i;
2043 	uint8_t *valid;
2044 	uint16_t err;
2045 	uint16_t max_req_len = HWRM_MAX_REQ_LEN;
2046 	struct hwrm_short_input short_input = {0};
2047 
2048 	/* TODO: DMASYNC in here. */
2049 	req->seq_id = htole16(softc->sc_cmd_seq++);
2050 	memset(resp, 0, PAGE_SIZE);
2051 
2052 	if (softc->sc_flags & BNXT_FLAG_SHORT_CMD) {
2053 		void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp);
2054 
2055 		memcpy(short_cmd_req, req, msg_len);
2056 		memset((uint8_t *) short_cmd_req + msg_len, 0,
2057 		    softc->sc_max_req_len - msg_len);
2058 
2059 		short_input.req_type = req->req_type;
2060 		short_input.signature =
2061 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
2062 		short_input.size = htole16(msg_len);
2063 		short_input.req_addr =
2064 		    htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2065 
2066 		data = (uint32_t *)&short_input;
2067 		msg_len = sizeof(short_input);
2068 
2069 		/* Sync memory write before updating doorbell */
2070 		membar_sync();
2071 
2072 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
2073 	}
2074 
2075 	/* Write request msg to hwrm channel */
2076 	for (i = 0; i < msg_len; i += 4) {
2077 		bus_space_write_4(softc->sc_hwrm_t,
2078 				  softc->sc_hwrm_h,
2079 				  i, *data);
2080 		data++;
2081 	}
2082 
2083 	/* Clear to the end of the request buffer */
2084 	for (i = msg_len; i < max_req_len; i += 4)
2085 		bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,
2086 		    i, 0);
2087 
2088 	/* Ring channel doorbell */
2089 	bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,
2090 	    htole32(1));
2091 
2092 	/* Check if response len is updated */
2093 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2094 		if (resp->resp_len && resp->resp_len <= 4096)
2095 			break;
2096 		DELAY(1000);
2097 	}
2098 	if (i >= softc->sc_cmd_timeo) {
2099 		printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2100 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2101 		    softc->sc_cmd_timeo,
2102 		    le16toh(req->seq_id));
2103 		return ETIMEDOUT;
2104 	}
2105 	/* Last byte of resp contains the valid key */
2106 	valid = (uint8_t *)resp + resp->resp_len - 1;
2107 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2108 		if (*valid == HWRM_RESP_VALID_KEY)
2109 			break;
2110 		DELAY(1000);
2111 	}
2112 	if (i >= softc->sc_cmd_timeo) {
2113 		printf("%s: timeout sending %s: "
2114 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2115 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2116 		    softc->sc_cmd_timeo, le16toh(req->req_type),
2117 		    le16toh(req->seq_id), msg_len,
2118 		    *valid);
2119 		return ETIMEDOUT;
2120 	}
2121 
2122 	err = le16toh(resp->error_code);
2123 	if (err) {
2124 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2125 		if (err != HWRM_ERR_CODE_FAIL) {
2126 			printf("%s: %s command returned %s error.\n",
2127 			    DEVNAME(softc),
2128 			    GET_HWRM_REQ_TYPE(req->req_type),
2129 			    GET_HWRM_ERROR_CODE(err));
2130 		}
2131 		return bnxt_hwrm_err_map(err);
2132 	}
2133 
2134 	return 0;
2135 }
2136 
2137 
2138 int
2139 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2140 {
2141 	int rc;
2142 
2143 	BNXT_HWRM_LOCK(softc);
2144 	rc = _hwrm_send_message(softc, msg, msg_len);
2145 	BNXT_HWRM_UNLOCK(softc);
2146 	return rc;
2147 }
2148 
2149 
2150 int
2151 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2152 {
2153 	struct hwrm_queue_qportcfg_input req = {0};
2154 	struct hwrm_queue_qportcfg_output *resp =
2155 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2156 
2157 	int	i, rc = 0;
2158 	uint8_t	*qptr;
2159 
2160 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
2161 
2162 	BNXT_HWRM_LOCK(softc);
2163 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2164 	if (rc)
2165 		goto qportcfg_exit;
2166 
2167 	if (!resp->max_configurable_queues) {
2168 		rc = -EINVAL;
2169 		goto qportcfg_exit;
2170 	}
2171 	softc->sc_max_tc = resp->max_configurable_queues;
2172 	if (softc->sc_max_tc > BNXT_MAX_QUEUE)
2173 		softc->sc_max_tc = BNXT_MAX_QUEUE;
2174 
2175 	qptr = &resp->queue_id0;
2176 	for (i = 0; i < softc->sc_max_tc; i++) {
2177 		softc->sc_q_info[i].id = *qptr++;
2178 		softc->sc_q_info[i].profile = *qptr++;
2179 	}
2180 
2181 qportcfg_exit:
2182 	BNXT_HWRM_UNLOCK(softc);
2183 	return rc;
2184 }
2185 
2186 int
2187 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2188 {
2189 	struct hwrm_ver_get_input	req = {0};
2190 	struct hwrm_ver_get_output	*resp =
2191 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2192 	int				rc;
2193 #if 0
2194 	const char nastr[] = "<not installed>";
2195 	const char naver[] = "<N/A>";
2196 #endif
2197 	uint32_t dev_caps_cfg;
2198 
2199 	softc->sc_max_req_len = HWRM_MAX_REQ_LEN;
2200 	softc->sc_cmd_timeo = 1000;
2201 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
2202 
2203 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
2204 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
2205 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
2206 
2207 	BNXT_HWRM_LOCK(softc);
2208 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2209 	if (rc)
2210 		goto fail;
2211 
2212 	printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2213 	    resp->hwrm_fw_bld);
2214 
2215 	softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2216 	    (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2217 #if 0
2218 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2219 	    resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2220 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2221 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2222 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2223 	snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2224 	    resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2225 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
2226 	    BNXT_VERSTR_SIZE);
2227 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2228 	    BNXT_NAME_SIZE);
2229 
2230 	if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2231 	    resp->mgmt_fw_bld == 0) {
2232 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2233 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2234 	}
2235 	else {
2236 		snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2237 		    "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2238 		    resp->mgmt_fw_bld);
2239 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2240 		    BNXT_NAME_SIZE);
2241 	}
2242 	if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2243 	    resp->netctrl_fw_bld == 0) {
2244 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2245 		    BNXT_VERSTR_SIZE);
2246 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2247 		    BNXT_NAME_SIZE);
2248 	}
2249 	else {
2250 		snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2251 		    "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2252 		    resp->netctrl_fw_bld);
2253 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2254 		    BNXT_NAME_SIZE);
2255 	}
2256 	if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2257 	    resp->roce_fw_bld == 0) {
2258 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2259 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2260 	}
2261 	else {
2262 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2263 		    "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2264 		    resp->roce_fw_bld);
2265 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2266 		    BNXT_NAME_SIZE);
2267 	}
2268 	softc->ver_info->chip_num = le16toh(resp->chip_num);
2269 	softc->ver_info->chip_rev = resp->chip_rev;
2270 	softc->ver_info->chip_metal = resp->chip_metal;
2271 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
2272 	softc->ver_info->chip_type = resp->chip_platform_type;
2273 #endif
2274 
2275 	if (resp->max_req_win_len)
2276 		softc->sc_max_req_len = le16toh(resp->max_req_win_len);
2277 	if (resp->def_req_timeout)
2278 		softc->sc_cmd_timeo = le16toh(resp->def_req_timeout);
2279 
2280 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
2281 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
2282 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
2283 		softc->sc_flags |= BNXT_FLAG_SHORT_CMD;
2284 
2285 fail:
2286 	BNXT_HWRM_UNLOCK(softc);
2287 	return rc;
2288 }
2289 
2290 
2291 int
2292 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2293 {
2294 	struct hwrm_func_drv_rgtr_input req = {0};
2295 
2296 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2297 
2298 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
2299 	    HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
2300 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
2301 
2302 	req.ver_maj = 6;
2303 	req.ver_min = 4;
2304 	req.ver_upd = 0;
2305 
2306 	return hwrm_send_message(softc, &req, sizeof(req));
2307 }
2308 
2309 #if 0
2310 
2311 int
2312 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
2313 {
2314 	struct hwrm_func_drv_unrgtr_input req = {0};
2315 
2316 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
2317 	if (shutdown == true)
2318 		req.flags |=
2319 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
2320 	return hwrm_send_message(softc, &req, sizeof(req));
2321 }
2322 
2323 #endif
2324 
2325 int
2326 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2327 {
2328 	int rc = 0;
2329 	struct hwrm_func_qcaps_input req = {0};
2330 	struct hwrm_func_qcaps_output *resp =
2331 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2332 	/* struct bnxt_func_info *func = &softc->func; */
2333 
2334 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
2335 	req.fid = htole16(0xffff);
2336 
2337 	BNXT_HWRM_LOCK(softc);
2338 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2339 	if (rc)
2340 		goto fail;
2341 
2342 	if (resp->flags &
2343 	    htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
2344 		softc->sc_flags |= BNXT_FLAG_WOL_CAP;
2345 
2346 	memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6);
2347 	/*
2348 	func->fw_fid = le16toh(resp->fid);
2349 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2350 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2351 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2352 	func->max_tx_rings = le16toh(resp->max_tx_rings);
2353 	func->max_rx_rings = le16toh(resp->max_rx_rings);
2354 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2355 	if (!func->max_hw_ring_grps)
2356 		func->max_hw_ring_grps = func->max_tx_rings;
2357 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2358 	func->max_vnics = le16toh(resp->max_vnics);
2359 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2360 	if (BNXT_PF(softc)) {
2361 		struct bnxt_pf_info *pf = &softc->pf;
2362 
2363 		pf->port_id = le16toh(resp->port_id);
2364 		pf->first_vf_id = le16toh(resp->first_vf_id);
2365 		pf->max_vfs = le16toh(resp->max_vfs);
2366 		pf->max_encap_records = le32toh(resp->max_encap_records);
2367 		pf->max_decap_records = le32toh(resp->max_decap_records);
2368 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2369 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2370 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2371 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2372 	}
2373 	if (!_is_valid_ether_addr(func->mac_addr)) {
2374 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2375 		get_random_ether_addr(func->mac_addr);
2376 	}
2377 	*/
2378 
2379 fail:
2380 	BNXT_HWRM_UNLOCK(softc);
2381 	return rc;
2382 }
2383 
2384 
2385 int
2386 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2387 {
2388         struct hwrm_func_qcfg_input req = {0};
2389         /* struct hwrm_func_qcfg_output *resp =
2390 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2391 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2392         int rc;
2393 
2394 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
2395         req.fid = htole16(0xffff);
2396 	BNXT_HWRM_LOCK(softc);
2397 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2398         if (rc)
2399 		goto fail;
2400 
2401 	/*
2402 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2403 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2404 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2405 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2406 	*/
2407 fail:
2408 	BNXT_HWRM_UNLOCK(softc);
2409         return rc;
2410 }
2411 
2412 
2413 int
2414 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2415 {
2416 	struct hwrm_func_reset_input req = {0};
2417 
2418 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
2419 	req.enables = 0;
2420 
2421 	return hwrm_send_message(softc, &req, sizeof(req));
2422 }
2423 
2424 int
2425 bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2426     struct bnxt_vnic_info *vnic)
2427 {
2428 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
2429 
2430 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
2431 
2432 	req.flags = htole32(
2433 	    HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2434 	req.enables = htole32(
2435 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2436 	req.vnic_id = htole16(vnic->id);
2437 	req.jumbo_thresh = htole16(MCLBYTES);
2438 
2439 	return hwrm_send_message(softc, &req, sizeof(req));
2440 }
2441 
2442 int
2443 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2444 {
2445 	struct hwrm_vnic_cfg_input req = {0};
2446 
2447 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
2448 
2449 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2450 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2451 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
2452 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2453 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
2454 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2455 	req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
2456 	    HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
2457 	    HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
2458 	req.vnic_id = htole16(vnic->id);
2459 	req.dflt_ring_grp = htole16(vnic->def_ring_grp);
2460 	req.rss_rule = htole16(vnic->rss_id);
2461 	req.cos_rule = htole16(vnic->cos_rule);
2462 	req.lb_rule = htole16(vnic->lb_rule);
2463 	req.mru = htole16(vnic->mru);
2464 
2465 	return hwrm_send_message(softc, &req, sizeof(req));
2466 }
2467 
2468 int
2469 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2470 {
2471 	struct hwrm_vnic_alloc_input req = {0};
2472 	struct hwrm_vnic_alloc_output *resp =
2473 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2474 	int rc;
2475 
2476 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
2477 		printf("%s: attempt to re-allocate vnic %04x\n",
2478 		    DEVNAME(softc), vnic->id);
2479 		return EINVAL;
2480 	}
2481 
2482 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
2483 
2484 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2485 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2486 
2487 	BNXT_HWRM_LOCK(softc);
2488 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2489 	if (rc)
2490 		goto fail;
2491 
2492 	vnic->id = le32toh(resp->vnic_id);
2493 
2494 fail:
2495 	BNXT_HWRM_UNLOCK(softc);
2496 	return rc;
2497 }
2498 
2499 int
2500 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2501 {
2502 	struct hwrm_vnic_free_input req = {0};
2503 	int rc;
2504 
2505 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE) {
2506 		printf("%s: attempt to deallocate vnic %04x\n",
2507 		    DEVNAME(softc), vnic->id);
2508 		return (EINVAL);
2509 	}
2510 
2511 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
2512 	req.vnic_id = htole16(vnic->id);
2513 
2514 	BNXT_HWRM_LOCK(softc);
2515 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2516 	if (rc == 0)
2517 		vnic->id = (uint16_t)HWRM_NA_SIGNATURE;
2518 	BNXT_HWRM_UNLOCK(softc);
2519 
2520 	return (rc);
2521 }
2522 
2523 int
2524 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2525 {
2526 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2527 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2528 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2529 	int rc;
2530 
2531 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
2532 		printf("%s: attempt to re-allocate vnic ctx %04x\n",
2533 		    DEVNAME(softc), *ctx_id);
2534 		return EINVAL;
2535 	}
2536 
2537 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
2538 
2539 	BNXT_HWRM_LOCK(softc);
2540 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2541 	if (rc)
2542 		goto fail;
2543 
2544 	*ctx_id = letoh16(resp->rss_cos_lb_ctx_id);
2545 
2546 fail:
2547 	BNXT_HWRM_UNLOCK(softc);
2548 	return (rc);
2549 }
2550 
2551 int
2552 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2553 {
2554 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2555 	int rc;
2556 
2557 	if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE) {
2558 		printf("%s: attempt to deallocate vnic ctx %04x\n",
2559 		    DEVNAME(softc), *ctx_id);
2560 		return (EINVAL);
2561 	}
2562 
2563 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2564 	req.rss_cos_lb_ctx_id = htole32(*ctx_id);
2565 
2566 	BNXT_HWRM_LOCK(softc);
2567 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2568 	if (rc == 0)
2569 		*ctx_id = (uint16_t)HWRM_NA_SIGNATURE;
2570 	BNXT_HWRM_UNLOCK(softc);
2571 	return (rc);
2572 }
2573 
2574 int
2575 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2576 {
2577 	struct hwrm_ring_grp_alloc_input req = {0};
2578 	struct hwrm_ring_grp_alloc_output *resp;
2579 	int rc = 0;
2580 
2581 	if (grp->grp_id != HWRM_NA_SIGNATURE) {
2582 		printf("%s: attempt to re-allocate ring group %04x\n",
2583 		    DEVNAME(softc), grp->grp_id);
2584 		return EINVAL;
2585 	}
2586 
2587 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2588 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
2589 	req.cr = htole16(grp->cp_ring_id);
2590 	req.rr = htole16(grp->rx_ring_id);
2591 	req.ar = htole16(grp->ag_ring_id);
2592 	req.sc = htole16(grp->stats_ctx);
2593 
2594 	BNXT_HWRM_LOCK(softc);
2595 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2596 	if (rc)
2597 		goto fail;
2598 
2599 	grp->grp_id = letoh32(resp->ring_group_id);
2600 
2601 fail:
2602 	BNXT_HWRM_UNLOCK(softc);
2603 	return rc;
2604 }
2605 
2606 int
2607 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2608 {
2609 	struct hwrm_ring_grp_free_input req = {0};
2610 	int rc = 0;
2611 
2612 	if (grp->grp_id == HWRM_NA_SIGNATURE) {
2613 		printf("%s: attempt to free ring group %04x\n",
2614 		    DEVNAME(softc), grp->grp_id);
2615 		return EINVAL;
2616 	}
2617 
2618 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
2619 	req.ring_group_id = htole32(grp->grp_id);
2620 
2621 	BNXT_HWRM_LOCK(softc);
2622 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2623 	if (rc == 0)
2624 		grp->grp_id = HWRM_NA_SIGNATURE;
2625 
2626 	BNXT_HWRM_UNLOCK(softc);
2627 	return (rc);
2628 }
2629 
2630 /*
2631  * Ring allocation message to the firmware
2632  */
2633 int
2634 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
2635     struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
2636     int irq)
2637 {
2638 	struct hwrm_ring_alloc_input req = {0};
2639 	struct hwrm_ring_alloc_output *resp;
2640 	int rc;
2641 
2642 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
2643 		printf("%s: attempt to re-allocate ring %04x\n",
2644 		    DEVNAME(softc), ring->phys_id);
2645 		return EINVAL;
2646 	}
2647 
2648 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2649 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
2650 	req.enables = htole32(0);
2651 	req.fbo = htole32(0);
2652 
2653 	if (stat_ctx_id != HWRM_NA_SIGNATURE) {
2654 		req.enables |= htole32(
2655 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
2656 		req.stat_ctx_id = htole32(stat_ctx_id);
2657 	}
2658 	req.ring_type = type;
2659 	req.page_tbl_addr = htole64(ring->paddr);
2660 	req.length = htole32(ring->ring_size);
2661 	req.logical_id = htole16(ring->id);
2662 	req.cmpl_ring_id = htole16(cmpl_ring_id);
2663 	req.queue_id = htole16(softc->sc_q_info[0].id);
2664 	req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX) ?
2665 	    HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX :
2666 	    HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY;
2667 	BNXT_HWRM_LOCK(softc);
2668 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2669 	if (rc)
2670 		goto fail;
2671 
2672 	ring->phys_id = le16toh(resp->ring_id);
2673 
2674 fail:
2675 	BNXT_HWRM_UNLOCK(softc);
2676 	return rc;
2677 }
2678 
2679 int
2680 bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
2681 {
2682 	struct hwrm_ring_free_input req = {0};
2683 	int rc;
2684 
2685 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE) {
2686 		printf("%s: attempt to deallocate ring %04x\n",
2687 		    DEVNAME(softc), ring->phys_id);
2688 		return (EINVAL);
2689 	}
2690 
2691 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
2692 	req.ring_type = type;
2693 	req.ring_id = htole16(ring->phys_id);
2694 	BNXT_HWRM_LOCK(softc);
2695 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2696 	if (rc)
2697 		goto fail;
2698 
2699 	ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2700 fail:
2701 	BNXT_HWRM_UNLOCK(softc);
2702 	return (rc);
2703 }
2704 
2705 
2706 int
2707 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
2708     uint64_t paddr)
2709 {
2710 	struct hwrm_stat_ctx_alloc_input req = {0};
2711 	struct hwrm_stat_ctx_alloc_output *resp;
2712 	int rc = 0;
2713 
2714 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
2715 		printf("%s: attempt to re-allocate stats ctx %08x\n",
2716 		    DEVNAME(softc), cpr->stats_ctx_id);
2717 		return EINVAL;
2718 	}
2719 
2720 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2721 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
2722 
2723 	req.update_period_ms = htole32(1000);
2724 	req.stats_dma_addr = htole64(paddr);
2725 
2726 	BNXT_HWRM_LOCK(softc);
2727 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2728 	if (rc)
2729 		goto fail;
2730 
2731 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
2732 
2733 fail:
2734 	BNXT_HWRM_UNLOCK(softc);
2735 
2736 	return rc;
2737 }
2738 
2739 int
2740 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
2741 {
2742 	struct hwrm_stat_ctx_free_input req = {0};
2743 	int rc = 0;
2744 
2745 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE) {
2746 		printf("%s: attempt to free stats ctx %08x\n",
2747 		    DEVNAME(softc), cpr->stats_ctx_id);
2748 		return EINVAL;
2749 	}
2750 
2751 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
2752 	req.stat_ctx_id = htole32(cpr->stats_ctx_id);
2753 
2754 	BNXT_HWRM_LOCK(softc);
2755 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2756 	BNXT_HWRM_UNLOCK(softc);
2757 
2758 	if (rc == 0)
2759 		cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
2760 
2761 	return (rc);
2762 }
2763 
2764 #if 0
2765 
2766 int
2767 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
2768 {
2769 	struct hwrm_port_qstats_input req = {0};
2770 	int rc = 0;
2771 
2772 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
2773 
2774 	req.port_id = htole16(softc->pf.port_id);
2775 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
2776 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
2777 
2778 	BNXT_HWRM_LOCK(softc);
2779 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2780 	BNXT_HWRM_UNLOCK(softc);
2781 
2782 	return rc;
2783 }
2784 
2785 #endif
2786 
2787 int
2788 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
2789     uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
2790 {
2791 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2792 
2793 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
2794 
2795 	req.vnic_id = htole32(vnic_id);
2796 	req.mask = htole32(rx_mask);
2797 	req.mc_tbl_addr = htole64(mc_addr);
2798 	req.num_mc_entries = htole32(mc_count);
2799 	return hwrm_send_message(softc, &req, sizeof(req));
2800 }
2801 
2802 int
2803 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2804 {
2805 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
2806 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
2807 	uint32_t enables = 0;
2808 	int rc = 0;
2809 
2810 	if (vnic->filter_id != -1) {
2811 		printf("%s: attempt to re-allocate l2 ctx filter\n",
2812 		    DEVNAME(softc));
2813 		return EINVAL;
2814 	}
2815 
2816 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2817 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
2818 
2819 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
2820 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
2821 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
2822 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
2823 	req.enables = htole32(enables);
2824 	req.dst_id = htole16(vnic->id);
2825 	memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2826 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
2827 
2828 	BNXT_HWRM_LOCK(softc);
2829 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2830 	if (rc)
2831 		goto fail;
2832 
2833 	vnic->filter_id = le64toh(resp->l2_filter_id);
2834 	vnic->flow_id = le64toh(resp->flow_id);
2835 
2836 fail:
2837 	BNXT_HWRM_UNLOCK(softc);
2838 	return (rc);
2839 }
2840 
2841 int
2842 bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2843 {
2844 	struct hwrm_cfa_l2_filter_free_input req = {0};
2845 	int rc = 0;
2846 
2847 	if (vnic->filter_id == -1) {
2848 		printf("%s: attempt to deallocate filter %llx\n",
2849 		     DEVNAME(softc), vnic->filter_id);
2850 		return (EINVAL);
2851 	}
2852 
2853 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
2854 	req.l2_filter_id = htole64(vnic->filter_id);
2855 
2856 	BNXT_HWRM_LOCK(softc);
2857 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2858 	if (rc == 0)
2859 		vnic->filter_id = -1;
2860 	BNXT_HWRM_UNLOCK(softc);
2861 
2862 	return (rc);
2863 }
2864 
2865 
2866 #if 0
2867 
2868 int
2869 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
2870     uint32_t hash_type)
2871 {
2872 	struct hwrm_vnic_rss_cfg_input	req = {0};
2873 
2874 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
2875 
2876 	req.hash_type = htole32(hash_type);
2877 	req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
2878 	req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
2879 	req.rss_ctx_idx = htole16(vnic->rss_id);
2880 
2881 	return hwrm_send_message(softc, &req, sizeof(req));
2882 }
2883 
2884 #endif
2885 
2886 int
2887 bnxt_cfg_async_cr(struct bnxt_softc *softc)
2888 {
2889 	int rc = 0;
2890 
2891 	if (1 /* BNXT_PF(softc) */) {
2892 		struct hwrm_func_cfg_input req = {0};
2893 
2894 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
2895 
2896 		req.fid = htole16(0xffff);
2897 		req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2898 		req.async_event_cr = htole16(softc->sc_cp_ring.ring.phys_id);
2899 
2900 		rc = hwrm_send_message(softc, &req, sizeof(req));
2901 	} else {
2902 		struct hwrm_func_vf_cfg_input req = {0};
2903 
2904 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
2905 
2906 		req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2907 		req.async_event_cr = htole16(softc->sc_cp_ring.ring.phys_id);
2908 
2909 		rc = hwrm_send_message(softc, &req, sizeof(req));
2910 	}
2911 	return rc;
2912 }
2913 
2914 #if 0
2915 
2916 void
2917 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
2918 {
2919 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
2920 
2921         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
2922 
2923 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
2924 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
2925 
2926 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
2927 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
2928 
2929 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
2930 }
2931 
2932 int
2933 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
2934 {
2935 	struct hwrm_vnic_tpa_cfg_input req = {0};
2936 	uint32_t flags;
2937 
2938 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
2939 		return 0;
2940 	}
2941 
2942 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
2943 
2944 	if (softc->hw_lro.enable) {
2945 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2946 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2947 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2948 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2949 
2950         	if (softc->hw_lro.is_mode_gro)
2951 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
2952 		else
2953 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
2954 
2955 		req.flags = htole32(flags);
2956 
2957 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2958 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2959 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2960 
2961 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
2962 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
2963 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
2964 	}
2965 
2966 	req.vnic_id = htole16(softc->vnic_info.id);
2967 
2968 	return hwrm_send_message(softc, &req, sizeof(req));
2969 }
2970 
2971 
2972 int
2973 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
2974     uint8_t *selfreset)
2975 {
2976 	struct hwrm_fw_reset_input req = {0};
2977 	struct hwrm_fw_reset_output *resp =
2978 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
2979 	int rc;
2980 
2981 	MPASS(selfreset);
2982 
2983 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
2984 	req.embedded_proc_type = processor;
2985 	req.selfrst_status = *selfreset;
2986 
2987 	BNXT_HWRM_LOCK(softc);
2988 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2989 	if (rc)
2990 		goto exit;
2991 	*selfreset = resp->selfrst_status;
2992 
2993 exit:
2994 	BNXT_HWRM_UNLOCK(softc);
2995 	return rc;
2996 }
2997 
2998 int
2999 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3000 {
3001 	struct hwrm_fw_qstatus_input req = {0};
3002 	struct hwrm_fw_qstatus_output *resp =
3003 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3004 	int rc;
3005 
3006 	MPASS(selfreset);
3007 
3008 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
3009 	req.embedded_proc_type = type;
3010 
3011 	BNXT_HWRM_LOCK(softc);
3012 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3013 	if (rc)
3014 		goto exit;
3015 	*selfreset = resp->selfrst_status;
3016 
3017 exit:
3018 	BNXT_HWRM_UNLOCK(softc);
3019 	return rc;
3020 }
3021 
3022 #endif
3023 
3024 int
3025 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3026     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3027     uint32_t *reserved_size, uint32_t *available_size)
3028 {
3029 	struct hwrm_nvm_get_dev_info_input req = {0};
3030 	struct hwrm_nvm_get_dev_info_output *resp =
3031 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
3032 	int rc;
3033 	uint32_t old_timeo;
3034 
3035 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
3036 
3037 	BNXT_HWRM_LOCK(softc);
3038 	old_timeo = softc->sc_cmd_timeo;
3039 	softc->sc_cmd_timeo = BNXT_NVM_TIMEO;
3040 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3041 	softc->sc_cmd_timeo = old_timeo;
3042 	if (rc)
3043 		goto exit;
3044 
3045 	if (mfg_id)
3046 		*mfg_id = le16toh(resp->manufacturer_id);
3047 	if (device_id)
3048 		*device_id = le16toh(resp->device_id);
3049 	if (sector_size)
3050 		*sector_size = le32toh(resp->sector_size);
3051 	if (nvram_size)
3052 		*nvram_size = le32toh(resp->nvram_size);
3053 	if (reserved_size)
3054 		*reserved_size = le32toh(resp->reserved_size);
3055 	if (available_size)
3056 		*available_size = le32toh(resp->available_size);
3057 
3058 exit:
3059 	BNXT_HWRM_UNLOCK(softc);
3060 	return rc;
3061 }
3062 
3063 #if 0
3064 
3065 int
3066 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3067     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3068     uint16_t *millisecond, uint16_t *zone)
3069 {
3070 	struct hwrm_fw_get_time_input req = {0};
3071 	struct hwrm_fw_get_time_output *resp =
3072 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3073 	int rc;
3074 
3075 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
3076 
3077 	BNXT_HWRM_LOCK(softc);
3078 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3079 	if (rc)
3080 		goto exit;
3081 
3082 	if (year)
3083 		*year = le16toh(resp->year);
3084 	if (month)
3085 		*month = resp->month;
3086 	if (day)
3087 		*day = resp->day;
3088 	if (hour)
3089 		*hour = resp->hour;
3090 	if (minute)
3091 		*minute = resp->minute;
3092 	if (second)
3093 		*second = resp->second;
3094 	if (millisecond)
3095 		*millisecond = le16toh(resp->millisecond);
3096 	if (zone)
3097 		*zone = le16toh(resp->zone);
3098 
3099 exit:
3100 	BNXT_HWRM_UNLOCK(softc);
3101 	return rc;
3102 }
3103 
3104 int
3105 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3106     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3107     uint16_t millisecond, uint16_t zone)
3108 {
3109 	struct hwrm_fw_set_time_input req = {0};
3110 
3111 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
3112 
3113 	req.year = htole16(year);
3114 	req.month = month;
3115 	req.day = day;
3116 	req.hour = hour;
3117 	req.minute = minute;
3118 	req.second = second;
3119 	req.millisecond = htole16(millisecond);
3120 	req.zone = htole16(zone);
3121 	return hwrm_send_message(softc, &req, sizeof(req));
3122 }
3123 
3124 #endif
3125 
3126 void
3127 _bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3128 {
3129 	req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3130 }
3131 
3132 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3133 {
3134 	struct hwrm_func_drv_rgtr_input req = {0};
3135 	int events[] = {
3136 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
3137 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
3138 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
3139 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
3140 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
3141 	};
3142 	int i;
3143 
3144 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
3145 
3146 	req.enables =
3147 		htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
3148 
3149 	for (i = 0; i < nitems(events); i++)
3150 		_bnxt_hwrm_set_async_event_bit(&req, events[i]);
3151 
3152 	return hwrm_send_message(softc, &req, sizeof(req));
3153 }
3154 
3155 int
3156 bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3157 {
3158 	struct hwrm_port_phy_i2c_read_input req;
3159 	struct hwrm_port_phy_i2c_read_output *out;
3160 	int offset;
3161 
3162 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
3163 	req.i2c_slave_addr = sff->sff_addr;
3164 	req.page_number = htole16(sff->sff_page);
3165 
3166 	for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3167 		req.page_offset = htole16(offset);
3168 		req.data_length = sizeof(out->data);
3169 		req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET);
3170 
3171 		if (hwrm_send_message(softc, &req, sizeof(req))) {
3172 			printf("%s: failed to read i2c data\n", DEVNAME(softc));
3173 			return 1;
3174 		}
3175 
3176 		out = (struct hwrm_port_phy_i2c_read_output *)
3177 		    BNXT_DMA_KVA(softc->sc_cmd_resp);
3178 		memcpy(sff->sff_data + offset, out->data, sizeof(out->data));
3179 	}
3180 
3181 	return 0;
3182 }
3183