xref: /openbsd-src/sys/dev/pci/if_bnxt.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: if_bnxt.c,v 1.32 2021/04/24 09:37:46 jmatthew Exp $	*/
2 /*-
3  * Broadcom NetXtreme-C/E network driver.
4  *
5  * Copyright (c) 2016 Broadcom, All Rights Reserved.
6  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45 
46 
47 #include "bpfilter.h"
48 #include "vlan.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/device.h>
56 #include <sys/stdint.h>
57 #include <sys/sockio.h>
58 #include <sys/atomic.h>
59 #include <sys/intrmap.h>
60 
61 #include <machine/bus.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcidevs.h>
66 
67 #include <dev/pci/if_bnxtreg.h>
68 
69 #include <net/if.h>
70 #include <net/if_media.h>
71 #include <net/toeplitz.h>
72 
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76 
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 
80 #define BNXT_HWRM_BAR		0x10
81 #define BNXT_DOORBELL_BAR	0x18
82 
83 #define BNXT_MAX_QUEUES		8
84 
85 #define BNXT_CP_RING_ID_BASE	0
86 #define BNXT_RX_RING_ID_BASE	(BNXT_MAX_QUEUES + 1)
87 #define BNXT_AG_RING_ID_BASE	((BNXT_MAX_QUEUES * 2) + 1)
88 #define BNXT_TX_RING_ID_BASE	((BNXT_MAX_QUEUES * 3) + 1)
89 
90 #define BNXT_MAX_MTU		9500
91 #define BNXT_AG_BUFFER_SIZE	8192
92 
93 #define BNXT_CP_PAGES		4
94 
95 #define BNXT_MAX_TX_SEGS	32	/* a bit much? */
96 #define BNXT_TX_SLOTS(bs)	(bs->bs_map->dm_nsegs + 1)
97 
98 #define BNXT_HWRM_SHORT_REQ_LEN	sizeof(struct hwrm_short_input)
99 
100 #define BNXT_HWRM_LOCK_INIT(_sc, _name)	\
101 	mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)
102 #define BNXT_HWRM_LOCK(_sc) 		mtx_enter(&_sc->sc_lock)
103 #define BNXT_HWRM_UNLOCK(_sc) 		mtx_leave(&_sc->sc_lock)
104 #define BNXT_HWRM_LOCK_DESTROY(_sc)	/* nothing */
105 #define BNXT_HWRM_LOCK_ASSERT(_sc)	MUTEX_ASSERT_LOCKED(&_sc->sc_lock)
106 
107 #define BNXT_FLAG_VF            0x0001
108 #define BNXT_FLAG_NPAR          0x0002
109 #define BNXT_FLAG_WOL_CAP       0x0004
110 #define BNXT_FLAG_SHORT_CMD     0x0008
111 #define BNXT_FLAG_MSIX          0x0010
112 
113 /* NVRam stuff has a five minute timeout */
114 #define BNXT_NVM_TIMEO	(5 * 60 * 1000)
115 
116 #define NEXT_CP_CONS_V(_ring, _cons, _v_bit)		\
117 do {	 						\
118 	if (++(_cons) == (_ring)->ring_size)		\
119 		((_cons) = 0, (_v_bit) = !_v_bit);	\
120 } while (0);
121 
122 struct bnxt_ring {
123 	uint64_t		paddr;
124 	uint64_t		doorbell;
125 	caddr_t			vaddr;
126 	uint32_t		ring_size;
127 	uint16_t		id;
128 	uint16_t		phys_id;
129 };
130 
131 struct bnxt_cp_ring {
132 	struct bnxt_ring	ring;
133 	void			*irq;
134 	struct bnxt_softc	*softc;
135 	uint32_t		cons;
136 	int			v_bit;
137 	uint32_t		commit_cons;
138 	int			commit_v_bit;
139 	struct ctx_hw_stats	*stats;
140 	uint32_t		stats_ctx_id;
141 	struct bnxt_dmamem	*ring_mem;
142 };
143 
144 struct bnxt_grp_info {
145 	uint32_t		grp_id;
146 	uint16_t		stats_ctx;
147 	uint16_t		rx_ring_id;
148 	uint16_t		cp_ring_id;
149 	uint16_t		ag_ring_id;
150 };
151 
152 struct bnxt_vnic_info {
153 	uint16_t		id;
154 	uint16_t		def_ring_grp;
155 	uint16_t		cos_rule;
156 	uint16_t		lb_rule;
157 	uint16_t		mru;
158 
159 	uint32_t		flags;
160 #define BNXT_VNIC_FLAG_DEFAULT		0x01
161 #define BNXT_VNIC_FLAG_BD_STALL		0x02
162 #define BNXT_VNIC_FLAG_VLAN_STRIP	0x04
163 
164 	uint64_t		filter_id;
165 	uint32_t		flow_id;
166 
167 	uint16_t		rss_id;
168 };
169 
170 struct bnxt_slot {
171 	bus_dmamap_t		bs_map;
172 	struct mbuf		*bs_m;
173 };
174 
175 struct bnxt_dmamem {
176 	bus_dmamap_t		bdm_map;
177 	bus_dma_segment_t	bdm_seg;
178 	size_t			bdm_size;
179 	caddr_t			bdm_kva;
180 };
181 #define BNXT_DMA_MAP(_bdm)	((_bdm)->bdm_map)
182 #define BNXT_DMA_LEN(_bdm)	((_bdm)->bdm_size)
183 #define BNXT_DMA_DVA(_bdm)	((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
184 #define BNXT_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
185 
186 struct bnxt_rx_queue {
187 	struct bnxt_softc	*rx_softc;
188 	struct ifiqueue		*rx_ifiq;
189 	struct bnxt_dmamem	*rx_ring_mem;	/* rx and ag */
190 	struct bnxt_ring	rx_ring;
191 	struct bnxt_ring	rx_ag_ring;
192 	struct if_rxring	rxr[2];
193 	struct bnxt_slot	*rx_slots;
194 	struct bnxt_slot	*rx_ag_slots;
195 	int			rx_prod;
196 	int			rx_cons;
197 	int			rx_ag_prod;
198 	int			rx_ag_cons;
199 	struct timeout		rx_refill;
200 };
201 
202 struct bnxt_tx_queue {
203 	struct bnxt_softc	*tx_softc;
204 	struct ifqueue		*tx_ifq;
205 	struct bnxt_dmamem	*tx_ring_mem;
206 	struct bnxt_ring	tx_ring;
207 	struct bnxt_slot	*tx_slots;
208 	int			tx_prod;
209 	int			tx_cons;
210 	int			tx_ring_prod;
211 	int			tx_ring_cons;
212 };
213 
214 struct bnxt_queue {
215 	char			q_name[8];
216 	int			q_index;
217 	void			*q_ihc;
218 	struct bnxt_softc	*q_sc;
219 	struct bnxt_cp_ring	q_cp;
220 	struct bnxt_rx_queue	q_rx;
221 	struct bnxt_tx_queue	q_tx;
222 	struct bnxt_grp_info	q_rg;
223 };
224 
225 struct bnxt_softc {
226 	struct device		sc_dev;
227 	struct arpcom		sc_ac;
228 	struct ifmedia		sc_media;
229 
230 	struct mutex		sc_lock;
231 
232 	pci_chipset_tag_t	sc_pc;
233 	pcitag_t		sc_tag;
234 	bus_dma_tag_t		sc_dmat;
235 
236 	bus_space_tag_t		sc_hwrm_t;
237 	bus_space_handle_t	sc_hwrm_h;
238 	bus_size_t		sc_hwrm_s;
239 
240 	struct bnxt_dmamem	*sc_cmd_resp;
241 	uint16_t		sc_cmd_seq;
242 	uint16_t		sc_max_req_len;
243 	uint32_t		sc_cmd_timeo;
244 	uint32_t		sc_flags;
245 
246 	bus_space_tag_t		sc_db_t;
247 	bus_space_handle_t	sc_db_h;
248 	bus_size_t		sc_db_s;
249 
250 	void			*sc_ih;
251 
252 	int			sc_hwrm_ver;
253 	int			sc_tx_queue_id;
254 
255 	struct bnxt_vnic_info	sc_vnic;
256 	struct bnxt_dmamem	*sc_stats_ctx_mem;
257 	struct bnxt_dmamem	*sc_rx_cfg;
258 
259 	struct bnxt_cp_ring	sc_cp_ring;
260 
261 	int			sc_nqueues;
262 	struct intrmap		*sc_intrmap;
263 	struct bnxt_queue	sc_queues[BNXT_MAX_QUEUES];
264 };
265 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
266 
267 const struct pci_matchid bnxt_devices[] = {
268 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57301 },
269 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57302 },
270 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57304 },
271 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57311 },
272 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57312 },
273 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57314 },
274 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57402 },
275 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57404 },
276 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57406 },
277 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57407 },
278 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57412 },
279 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57414 },
280 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416 },
281 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416_SFP },
282 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417 },
283 	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417_SFP }
284 };
285 
286 int		bnxt_match(struct device *, void *, void *);
287 void		bnxt_attach(struct device *, struct device *, void *);
288 
289 void		bnxt_up(struct bnxt_softc *);
290 void		bnxt_down(struct bnxt_softc *);
291 void		bnxt_iff(struct bnxt_softc *);
292 int		bnxt_ioctl(struct ifnet *, u_long, caddr_t);
293 int		bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
294 void		bnxt_start(struct ifqueue *);
295 int		bnxt_admin_intr(void *);
296 int		bnxt_intr(void *);
297 void		bnxt_watchdog(struct ifnet *);
298 void		bnxt_media_status(struct ifnet *, struct ifmediareq *);
299 int		bnxt_media_change(struct ifnet *);
300 int		bnxt_media_autonegotiate(struct bnxt_softc *);
301 
302 struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
303 void		bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
304 void		bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
305 
306 void		bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
307 void		bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
308 		    int);
309 void		bnxt_write_cp_doorbell_index(struct bnxt_softc *,
310 		    struct bnxt_ring *, uint32_t, int);
311 void		bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
312 		    int);
313 void		bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
314 		    int);
315 
316 int		bnxt_rx_fill(struct bnxt_queue *);
317 u_int		bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
318 		    struct bnxt_slot *, uint *, int, uint16_t, u_int);
319 void		bnxt_refill(void *);
320 int		bnxt_rx(struct bnxt_softc *, struct bnxt_rx_queue *,
321 		    struct bnxt_cp_ring *, struct mbuf_list *, int *, int *,
322 		    struct cmpl_base *);
323 
324 void		bnxt_txeof(struct bnxt_softc *, struct bnxt_tx_queue *, int *,
325 		    struct cmpl_base *);
326 
327 int		bnxt_set_cp_ring_aggint(struct bnxt_softc *, struct bnxt_cp_ring *);
328 
329 int		_hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
330 int		hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
331 void		bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
332 int 		bnxt_hwrm_err_map(uint16_t err);
333 
334 /* HWRM Function Prototypes */
335 int		bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
336 		    struct bnxt_ring *, uint16_t, uint32_t, int);
337 int		bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
338 		    struct bnxt_ring *);
339 int		bnxt_hwrm_ver_get(struct bnxt_softc *);
340 int		bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
341 int		bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
342 int		bnxt_hwrm_func_qcaps(struct bnxt_softc *);
343 int		bnxt_hwrm_func_qcfg(struct bnxt_softc *);
344 int		bnxt_hwrm_func_reset(struct bnxt_softc *);
345 int		bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
346 int		bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
347 int		bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
348 		    struct bnxt_vnic_info *);
349 int		bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
350 		    struct bnxt_vnic_info *vnic);
351 int		bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
352 		    struct bnxt_cp_ring *, uint64_t);
353 int		bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
354 		    struct bnxt_cp_ring *);
355 int		bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
356 		    struct bnxt_grp_info *);
357 int		bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
358 		    struct bnxt_grp_info *);
359 int		bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
360 		    struct bnxt_vnic_info *);
361 int		bnxt_hwrm_vnic_free(struct bnxt_softc *,
362 		    struct bnxt_vnic_info *);
363 int		bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
364 		    uint32_t, uint32_t, uint64_t, uint32_t);
365 int		bnxt_hwrm_set_filter(struct bnxt_softc *,
366 		    struct bnxt_vnic_info *);
367 int		bnxt_hwrm_free_filter(struct bnxt_softc *,
368 		    struct bnxt_vnic_info *);
369 int		bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *,
370 		    struct bnxt_vnic_info *, uint32_t, daddr_t, daddr_t);
371 int		bnxt_cfg_async_cr(struct bnxt_softc *, struct bnxt_cp_ring *);
372 int		bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
373 		    uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
374 int		bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
375 		    struct ifmediareq *);
376 int		bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
377 int		bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
378 
379 /* not used yet: */
380 #if 0
381 int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
382 
383 int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
384 
385 
386 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
387 void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
388 int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
389     uint8_t *selfreset);
390 int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
391     uint8_t *selfreset);
392 int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
393     uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
394     uint8_t *second, uint16_t *millisecond, uint16_t *zone);
395 int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
396     uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
397     uint16_t millisecond, uint16_t zone);
398 
399 #endif
400 
401 
402 struct cfattach bnxt_ca = {
403 	sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
404 };
405 
406 struct cfdriver bnxt_cd = {
407 	NULL, "bnxt", DV_IFNET
408 };
409 
410 struct bnxt_dmamem *
411 bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
412 {
413 	struct bnxt_dmamem *m;
414 	int nsegs;
415 
416 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
417 	if (m == NULL)
418 		return (NULL);
419 
420 	m->bdm_size = size;
421 
422 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
423 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map) != 0)
424 		goto bdmfree;
425 
426 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,
427 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
428 		goto destroy;
429 
430 	if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,
431 	    BUS_DMA_NOWAIT) != 0)
432 		goto free;
433 
434 	if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,
435 	    BUS_DMA_NOWAIT) != 0)
436 		goto unmap;
437 
438 	return (m);
439 
440 unmap:
441 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
442 free:
443 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
444 destroy:
445 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
446 bdmfree:
447 	free(m, M_DEVBUF, sizeof *m);
448 
449 	return (NULL);
450 }
451 
452 void
453 bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
454 {
455 	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
456 	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
457 	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
458 	free(m, M_DEVBUF, sizeof *m);
459 }
460 
461 int
462 bnxt_match(struct device *parent, void *match, void *aux)
463 {
464 	return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)));
465 }
466 
467 void
468 bnxt_attach(struct device *parent, struct device *self, void *aux)
469 {
470 	struct bnxt_softc *sc = (struct bnxt_softc *)self;
471 	struct ifnet *ifp = &sc->sc_ac.ac_if;
472 	struct pci_attach_args *pa = aux;
473 	struct bnxt_cp_ring *cpr;
474 	pci_intr_handle_t ih;
475 	const char *intrstr;
476 	u_int memtype;
477 	int i;
478 
479 	sc->sc_pc = pa->pa_pc;
480 	sc->sc_tag = pa->pa_tag;
481 	sc->sc_dmat = pa->pa_dmat;
482 
483 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR);
484 	if (pci_mapreg_map(pa, BNXT_HWRM_BAR, memtype, 0, &sc->sc_hwrm_t,
485 	    &sc->sc_hwrm_h, NULL, &sc->sc_hwrm_s, 0)) {
486 		printf(": failed to map hwrm\n");
487 		return;
488 	}
489 
490 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR);
491 	if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR, memtype, 0, &sc->sc_db_t,
492 	    &sc->sc_db_h, NULL, &sc->sc_db_s, 0)) {
493 		printf(": failed to map doorbell\n");
494 		goto unmap_1;
495 	}
496 
497 	BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc));
498 	sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE);
499 	if (sc->sc_cmd_resp == NULL) {
500 		printf(": failed to allocate command response buffer\n");
501 		goto unmap_2;
502 	}
503 
504 	if (bnxt_hwrm_ver_get(sc) != 0) {
505 		printf(": failed to query version info\n");
506 		goto free_resp;
507 	}
508 
509 	if (bnxt_hwrm_nvm_get_dev_info(sc, NULL, NULL, NULL, NULL, NULL, NULL)
510 	    != 0) {
511 		printf(": failed to get nvram info\n");
512 		goto free_resp;
513 	}
514 
515 	if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
516 		printf(": failed to register driver with firmware\n");
517 		goto free_resp;
518 	}
519 
520 	if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
521 		printf(": failed to register async events\n");
522 		goto free_resp;
523 	}
524 
525 	if (bnxt_hwrm_func_qcaps(sc) != 0) {
526 		printf(": failed to get queue capabilities\n");
527 		goto free_resp;
528 	}
529 
530 	/*
531 	 * devices advertise msi support, but there's no way to tell a
532 	 * completion queue to use msi mode, only legacy or msi-x.
533 	 */
534 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
535 		int nmsix;
536 
537 		sc->sc_flags |= BNXT_FLAG_MSIX;
538 		intrstr = pci_intr_string(sc->sc_pc, ih);
539 
540 		nmsix = pci_intr_msix_count(pa->pa_pc, pa->pa_tag);
541 		if (nmsix > 1) {
542 			sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
543 			    IPL_NET | IPL_MPSAFE, bnxt_admin_intr, sc, DEVNAME(sc));
544 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
545 			    nmsix - 1, BNXT_MAX_QUEUES, INTRMAP_POWEROF2);
546 			sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
547 			KASSERT(sc->sc_nqueues > 0);
548 			KASSERT(powerof2(sc->sc_nqueues));
549 		} else {
550 			sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
551 			    IPL_NET | IPL_MPSAFE, bnxt_intr, &sc->sc_queues[0],
552 			    DEVNAME(sc));
553 			sc->sc_nqueues = 1;
554 		}
555 	} else if (pci_intr_map(pa, &ih) == 0) {
556 		intrstr = pci_intr_string(sc->sc_pc, ih);
557 		sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
558 		    bnxt_intr, &sc->sc_queues[0], DEVNAME(sc));
559 		sc->sc_nqueues = 1;
560 	} else {
561 		printf(": unable to map interrupt\n");
562 		goto free_resp;
563 	}
564 	if (sc->sc_ih == NULL) {
565 		printf(": unable to establish interrupt");
566 		if (intrstr != NULL)
567 			printf(" at %s", intrstr);
568 		printf("\n");
569 		goto deintr;
570 	}
571 	printf("%s, %d queues, address %s\n", intrstr, sc->sc_nqueues,
572 	    ether_sprintf(sc->sc_ac.ac_enaddr));
573 
574 	if (bnxt_hwrm_func_qcfg(sc) != 0) {
575 		printf("%s: failed to query function config\n", DEVNAME(sc));
576 		goto deintr;
577 	}
578 
579 	if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
580 		printf("%s: failed to query port config\n", DEVNAME(sc));
581 		goto deintr;
582 	}
583 
584 	if (bnxt_hwrm_func_reset(sc) != 0) {
585 		printf("%s: reset failed\n", DEVNAME(sc));
586 		goto deintr;
587 	}
588 
589 	if (sc->sc_nqueues == 1)
590 		cpr = &sc->sc_queues[0].q_cp;
591 	else
592 		cpr = &sc->sc_cp_ring;
593 
594 	cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
595 	cpr->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
596 	cpr->softc = sc;
597 	cpr->ring.id = 0;
598 	cpr->ring.doorbell = cpr->ring.id * 0x80;
599 	cpr->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
600 	    sizeof(struct cmpl_base);
601 	cpr->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE *
602 	    BNXT_CP_PAGES);
603 	if (cpr->ring_mem == NULL) {
604 		printf("%s: failed to allocate completion queue memory\n",
605 		    DEVNAME(sc));
606 		goto deintr;
607 	}
608 	cpr->ring.vaddr = BNXT_DMA_KVA(cpr->ring_mem);
609 	cpr->ring.paddr = BNXT_DMA_DVA(cpr->ring_mem);
610 	cpr->cons = UINT32_MAX;
611 	cpr->v_bit = 1;
612 	bnxt_mark_cpr_invalid(cpr);
613 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
614 	    &cpr->ring, (uint16_t)HWRM_NA_SIGNATURE,
615 	    HWRM_NA_SIGNATURE, 1) != 0) {
616 		printf("%s: failed to allocate completion queue\n",
617 		    DEVNAME(sc));
618 		goto free_cp_mem;
619 	}
620 	if (bnxt_cfg_async_cr(sc, cpr) != 0) {
621 		printf("%s: failed to set async completion ring\n",
622 		    DEVNAME(sc));
623 		goto free_cp_mem;
624 	}
625 	bnxt_write_cp_doorbell(sc, &cpr->ring, 1);
626 
627 	if (bnxt_set_cp_ring_aggint(sc, cpr) != 0) {
628 		printf("%s: failed to set interrupt aggregation\n",
629 		    DEVNAME(sc));
630 		goto free_cp_mem;
631 	}
632 
633 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
634 	ifp->if_softc = sc;
635 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
636 	ifp->if_xflags = IFXF_MPSAFE;
637 	ifp->if_ioctl = bnxt_ioctl;
638 	ifp->if_qstart = bnxt_start;
639 	ifp->if_watchdog = bnxt_watchdog;
640 	ifp->if_hardmtu = BNXT_MAX_MTU;
641 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
642 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv6 |
643 	    IFCAP_CSUM_TCPv6;
644 #if NVLAN > 0
645 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
646 #endif
647 	ifq_set_maxlen(&ifp->if_snd, 1024);	/* ? */
648 
649 	ifmedia_init(&sc->sc_media, IFM_IMASK, bnxt_media_change,
650 	    bnxt_media_status);
651 
652 	if_attach(ifp);
653 	ether_ifattach(ifp);
654 
655 	if_attach_iqueues(ifp, sc->sc_nqueues);
656 	if_attach_queues(ifp, sc->sc_nqueues);
657 	for (i = 0; i < sc->sc_nqueues; i++) {
658 		struct ifiqueue *ifiq = ifp->if_iqs[i];
659 		struct ifqueue *ifq = ifp->if_ifqs[i];
660 		struct bnxt_queue *bq = &sc->sc_queues[i];
661 		struct bnxt_cp_ring *cp = &bq->q_cp;
662 		struct bnxt_rx_queue *rx = &bq->q_rx;
663 		struct bnxt_tx_queue *tx = &bq->q_tx;
664 
665 		bq->q_index = i;
666 		bq->q_sc = sc;
667 
668 		rx->rx_softc = sc;
669 		rx->rx_ifiq = ifiq;
670 		timeout_set(&rx->rx_refill, bnxt_refill, bq);
671 		ifiq->ifiq_softc = rx;
672 
673 		tx->tx_softc = sc;
674 		tx->tx_ifq = ifq;
675 		ifq->ifq_softc = tx;
676 
677 		if (sc->sc_nqueues > 1) {
678 			cp->stats_ctx_id = HWRM_NA_SIGNATURE;
679 			cp->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
680 			cp->ring.id = i + 1;	/* first cp ring is async only */
681 			cp->softc = sc;
682 			cp->ring.doorbell = bq->q_cp.ring.id * 0x80;
683 			cp->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
684 			    sizeof(struct cmpl_base);
685 			if (pci_intr_map_msix(pa, i + 1, &ih) != 0) {
686 				printf("%s: unable to map queue interrupt %d\n",
687 				    DEVNAME(sc), i);
688 				goto intrdisestablish;
689 			}
690 			snprintf(bq->q_name, sizeof(bq->q_name), "%s:%d",
691 			    DEVNAME(sc), i);
692 			bq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
693 			    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
694 			    bnxt_intr, bq, bq->q_name);
695 			if (bq->q_ihc == NULL) {
696 				printf("%s: unable to establish interrupt %d\n",
697 				    DEVNAME(sc), i);
698 				goto intrdisestablish;
699 			}
700 		}
701 	}
702 
703 	bnxt_media_autonegotiate(sc);
704 	bnxt_hwrm_port_phy_qcfg(sc, NULL);
705 	return;
706 
707 intrdisestablish:
708 	for (i = 0; i < sc->sc_nqueues; i++) {
709 		struct bnxt_queue *bq = &sc->sc_queues[i];
710 		if (bq->q_ihc == NULL)
711 			continue;
712 		pci_intr_disestablish(sc->sc_pc, bq->q_ihc);
713 		bq->q_ihc = NULL;
714 	}
715 free_cp_mem:
716 	bnxt_dmamem_free(sc, cpr->ring_mem);
717 deintr:
718 	if (sc->sc_intrmap != NULL) {
719 		intrmap_destroy(sc->sc_intrmap);
720 		sc->sc_intrmap = NULL;
721 	}
722 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
723 	sc->sc_ih = NULL;
724 free_resp:
725 	bnxt_dmamem_free(sc, sc->sc_cmd_resp);
726 unmap_2:
727 	bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
728 	sc->sc_hwrm_s = 0;
729 unmap_1:
730 	bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
731 	sc->sc_db_s = 0;
732 }
733 
734 void
735 bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
736     int total)
737 {
738 	struct bnxt_slot *bs;
739 
740 	int i = allocated;
741 	while (i-- > 0) {
742 		bs = &slots[i];
743 		bus_dmamap_destroy(sc->sc_dmat, bs->bs_map);
744 	}
745 	free(slots, M_DEVBUF, total * sizeof(*bs));
746 }
747 
748 int
749 bnxt_set_cp_ring_aggint(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
750 {
751 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
752 
753 	/*
754 	 * set interrupt aggregation parameters for around 10k interrupts
755 	 * per second.  the timers are in units of 80usec, and the counters
756 	 * are based on the minimum rx ring size of 32.
757 	 */
758 	memset(&aggint, 0, sizeof(aggint));
759         bnxt_hwrm_cmd_hdr_init(sc, &aggint,
760 	    HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
761 	aggint.ring_id = htole16(cpr->ring.phys_id);
762 	aggint.num_cmpl_dma_aggr = htole16(32);
763 	aggint.num_cmpl_dma_aggr_during_int  = aggint.num_cmpl_dma_aggr;
764 	aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80);
765 	aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
766 	aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80);
767 	aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80);
768 	aggint.num_cmpl_aggr_int = htole16(16);
769 	return (hwrm_send_message(sc, &aggint, sizeof(aggint)));
770 }
771 
772 int
773 bnxt_queue_up(struct bnxt_softc *sc, struct bnxt_queue *bq)
774 {
775 	struct ifnet *ifp = &sc->sc_ac.ac_if;
776 	struct bnxt_cp_ring *cp = &bq->q_cp;
777 	struct bnxt_rx_queue *rx = &bq->q_rx;
778 	struct bnxt_tx_queue *tx = &bq->q_tx;
779 	struct bnxt_grp_info *rg = &bq->q_rg;
780 	struct bnxt_slot *bs;
781 	int i;
782 
783 	tx->tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE);
784 	if (tx->tx_ring_mem == NULL) {
785 		printf("%s: failed to allocate tx ring %d\n", DEVNAME(sc), bq->q_index);
786 		return ENOMEM;
787 	}
788 
789 	rx->rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
790 	if (rx->rx_ring_mem == NULL) {
791 		printf("%s: failed to allocate rx ring %d\n", DEVNAME(sc), bq->q_index);
792 		goto free_tx;
793 	}
794 
795 	/* completion ring is already allocated if we only have one queue */
796 	if (sc->sc_nqueues > 1) {
797 		cp->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * BNXT_CP_PAGES);
798 		if (cp->ring_mem == NULL) {
799 			printf("%s: failed to allocate completion ring %d mem\n",
800 			    DEVNAME(sc), bq->q_index);
801 			goto free_rx;
802 		}
803 		cp->ring.vaddr = BNXT_DMA_KVA(cp->ring_mem);
804 		cp->ring.paddr = BNXT_DMA_DVA(cp->ring_mem);
805 		cp->cons = UINT32_MAX;
806 		cp->v_bit = 1;
807 		bnxt_mark_cpr_invalid(cp);
808 
809 		if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
810 		    &cp->ring, (uint16_t)HWRM_NA_SIGNATURE,
811 		    HWRM_NA_SIGNATURE, 1) != 0) {
812 			printf("%s: failed to allocate completion queue %d\n",
813 			    DEVNAME(sc), bq->q_index);
814 			goto free_rx;
815 		}
816 
817 		if (bnxt_set_cp_ring_aggint(sc, cp) != 0) {
818 			printf("%s: failed to set interrupt %d aggregation\n",
819 			    DEVNAME(sc), bq->q_index);
820 			goto free_rx;
821 		}
822 		bnxt_write_cp_doorbell(sc, &cp->ring, 1);
823 	}
824 
825 	if (bnxt_hwrm_stat_ctx_alloc(sc, &bq->q_cp,
826 	    BNXT_DMA_DVA(sc->sc_stats_ctx_mem) +
827 	    (bq->q_index * sizeof(struct ctx_hw_stats))) != 0) {
828 		printf("%s: failed to set up stats context\n", DEVNAME(sc));
829 		goto free_rx;
830 	}
831 
832 	tx->tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
833 	tx->tx_ring.id = BNXT_TX_RING_ID_BASE + bq->q_index;
834 	tx->tx_ring.doorbell = tx->tx_ring.id * 0x80;
835 	tx->tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
836 	tx->tx_ring.vaddr = BNXT_DMA_KVA(tx->tx_ring_mem);
837 	tx->tx_ring.paddr = BNXT_DMA_DVA(tx->tx_ring_mem);
838 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
839 	    &tx->tx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
840 		printf("%s: failed to set up tx ring\n",
841 		    DEVNAME(sc));
842 		goto dealloc_stats;
843 	}
844 	bnxt_write_tx_doorbell(sc, &tx->tx_ring, 0);
845 
846 	rx->rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
847 	rx->rx_ring.id = BNXT_RX_RING_ID_BASE + bq->q_index;
848 	rx->rx_ring.doorbell = rx->rx_ring.id * 0x80;
849 	rx->rx_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
850 	rx->rx_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem);
851 	rx->rx_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem);
852 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
853 	    &rx->rx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
854 		printf("%s: failed to set up rx ring\n",
855 		    DEVNAME(sc));
856 		goto dealloc_tx;
857 	}
858 	bnxt_write_rx_doorbell(sc, &rx->rx_ring, 0);
859 
860 	rx->rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
861 	rx->rx_ag_ring.id = BNXT_AG_RING_ID_BASE + bq->q_index;
862 	rx->rx_ag_ring.doorbell = rx->rx_ag_ring.id * 0x80;
863 	rx->rx_ag_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
864 	rx->rx_ag_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE;
865 	rx->rx_ag_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem) + PAGE_SIZE;
866 	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
867 	    &rx->rx_ag_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
868 		printf("%s: failed to set up rx ag ring\n",
869 		    DEVNAME(sc));
870 		goto dealloc_rx;
871 	}
872 	bnxt_write_rx_doorbell(sc, &rx->rx_ag_ring, 0);
873 
874 	rg->grp_id = HWRM_NA_SIGNATURE;
875 	rg->stats_ctx = cp->stats_ctx_id;
876 	rg->rx_ring_id = rx->rx_ring.phys_id;
877 	rg->ag_ring_id = rx->rx_ag_ring.phys_id;
878 	rg->cp_ring_id = cp->ring.phys_id;
879 	if (bnxt_hwrm_ring_grp_alloc(sc, rg) != 0) {
880 		printf("%s: failed to allocate ring group\n",
881 		    DEVNAME(sc));
882 		goto dealloc_ag;
883 	}
884 
885 	rx->rx_slots = mallocarray(sizeof(*bs), rx->rx_ring.ring_size,
886 	    M_DEVBUF, M_WAITOK | M_ZERO);
887 	if (rx->rx_slots == NULL) {
888 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
889 		goto dealloc_ring_group;
890 	}
891 
892 	for (i = 0; i < rx->rx_ring.ring_size; i++) {
893 		bs = &rx->rx_slots[i];
894 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
895 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map) != 0) {
896 			printf("%s: failed to allocate rx dma maps\n",
897 			    DEVNAME(sc));
898 			goto destroy_rx_slots;
899 		}
900 	}
901 
902 	rx->rx_ag_slots = mallocarray(sizeof(*bs), rx->rx_ag_ring.ring_size,
903 	    M_DEVBUF, M_WAITOK | M_ZERO);
904 	if (rx->rx_ag_slots == NULL) {
905 		printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc));
906 		goto destroy_rx_slots;
907 	}
908 
909 	for (i = 0; i < rx->rx_ag_ring.ring_size; i++) {
910 		bs = &rx->rx_ag_slots[i];
911 		if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,
912 		    BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
913 		    &bs->bs_map) != 0) {
914 			printf("%s: failed to allocate rx ag dma maps\n",
915 			    DEVNAME(sc));
916 			goto destroy_rx_ag_slots;
917 		}
918 	}
919 
920 	tx->tx_slots = mallocarray(sizeof(*bs), tx->tx_ring.ring_size,
921 	    M_DEVBUF, M_WAITOK | M_ZERO);
922 	if (tx->tx_slots == NULL) {
923 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
924 		goto destroy_rx_ag_slots;
925 	}
926 
927 	for (i = 0; i < tx->tx_ring.ring_size; i++) {
928 		bs = &tx->tx_slots[i];
929 		if (bus_dmamap_create(sc->sc_dmat, BNXT_MAX_MTU, BNXT_MAX_TX_SEGS,
930 		    BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
931 		    &bs->bs_map) != 0) {
932 			printf("%s: failed to allocate tx dma maps\n",
933 			    DEVNAME(sc));
934 			goto destroy_tx_slots;
935 		}
936 	}
937 
938 	/*
939 	 * initially, the rx ring must be filled at least some distance beyond
940 	 * the current consumer index, as it looks like the firmware assumes the
941 	 * ring is full on creation, but doesn't prefetch the whole thing.
942 	 * once the whole ring has been used once, we should be able to back off
943 	 * to 2 or so slots, but we currently don't have a way of doing that.
944 	 */
945 	if_rxr_init(&rx->rxr[0], 32, rx->rx_ring.ring_size - 1);
946 	if_rxr_init(&rx->rxr[1], 32, rx->rx_ag_ring.ring_size - 1);
947 	rx->rx_prod = 0;
948 	rx->rx_cons = 0;
949 	rx->rx_ag_prod = 0;
950 	rx->rx_ag_cons = 0;
951 	bnxt_rx_fill(bq);
952 
953 	tx->tx_cons = 0;
954 	tx->tx_prod = 0;
955 	tx->tx_ring_cons = 0;
956 	tx->tx_ring_prod = 0;
957 	ifq_clr_oactive(ifp->if_ifqs[bq->q_index]);
958 	ifq_restart(ifp->if_ifqs[bq->q_index]);
959 	return 0;
960 
961 destroy_tx_slots:
962 	bnxt_free_slots(sc, tx->tx_slots, i, tx->tx_ring.ring_size);
963 	tx->tx_slots = NULL;
964 
965 	i = rx->rx_ag_ring.ring_size;
966 destroy_rx_ag_slots:
967 	bnxt_free_slots(sc, rx->rx_ag_slots, i, rx->rx_ag_ring.ring_size);
968 	rx->rx_ag_slots = NULL;
969 
970 	i = rx->rx_ring.ring_size;
971 destroy_rx_slots:
972 	bnxt_free_slots(sc, rx->rx_slots, i, rx->rx_ring.ring_size);
973 	rx->rx_slots = NULL;
974 dealloc_ring_group:
975 	bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
976 dealloc_ag:
977 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
978 	    &rx->rx_ag_ring);
979 dealloc_tx:
980 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
981 	    &tx->tx_ring);
982 dealloc_rx:
983 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
984 	    &rx->rx_ring);
985 dealloc_stats:
986 	bnxt_hwrm_stat_ctx_free(sc, cp);
987 free_rx:
988 	bnxt_dmamem_free(sc, rx->rx_ring_mem);
989 	rx->rx_ring_mem = NULL;
990 free_tx:
991 	bnxt_dmamem_free(sc, tx->tx_ring_mem);
992 	tx->tx_ring_mem = NULL;
993 	return ENOMEM;
994 }
995 
996 void
997 bnxt_queue_down(struct bnxt_softc *sc, struct bnxt_queue *bq)
998 {
999 	struct bnxt_cp_ring *cp = &bq->q_cp;
1000 	struct bnxt_rx_queue *rx = &bq->q_rx;
1001 	struct bnxt_tx_queue *tx = &bq->q_tx;
1002 
1003 	/* empty rx ring first i guess */
1004 
1005 	bnxt_free_slots(sc, tx->tx_slots, tx->tx_ring.ring_size,
1006 	    tx->tx_ring.ring_size);
1007 	tx->tx_slots = NULL;
1008 
1009 	bnxt_free_slots(sc, rx->rx_ag_slots, rx->rx_ag_ring.ring_size,
1010 	    rx->rx_ag_ring.ring_size);
1011 	rx->rx_ag_slots = NULL;
1012 
1013 	bnxt_free_slots(sc, rx->rx_slots, rx->rx_ring.ring_size,
1014 	    rx->rx_ring.ring_size);
1015 	rx->rx_slots = NULL;
1016 
1017 	bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
1018 	bnxt_hwrm_stat_ctx_free(sc, &bq->q_cp);
1019 
1020 	/* may need to wait for 500ms here before we can free the rings */
1021 
1022 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1023 	    &tx->tx_ring);
1024 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1025 	    &rx->rx_ag_ring);
1026 	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1027 	    &rx->rx_ring);
1028 
1029 	/* if only one queue, leave cp ring in place for async events */
1030 	if (sc->sc_nqueues > 1) {
1031 		bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1032 		    &cp->ring);
1033 
1034 		bnxt_dmamem_free(sc, cp->ring_mem);
1035 		cp->ring_mem = NULL;
1036 	}
1037 
1038 	bnxt_dmamem_free(sc, rx->rx_ring_mem);
1039 	rx->rx_ring_mem = NULL;
1040 
1041 	bnxt_dmamem_free(sc, tx->tx_ring_mem);
1042 	tx->tx_ring_mem = NULL;
1043 }
1044 
1045 void
1046 bnxt_up(struct bnxt_softc *sc)
1047 {
1048 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1049 	int i;
1050 
1051 	sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
1052 	    sizeof(struct ctx_hw_stats) * sc->sc_nqueues);
1053 	if (sc->sc_stats_ctx_mem == NULL) {
1054 		printf("%s: failed to allocate stats contexts\n", DEVNAME(sc));
1055 		return;
1056 	}
1057 
1058 	sc->sc_rx_cfg = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
1059 	if (sc->sc_rx_cfg == NULL) {
1060 		printf("%s: failed to allocate rx config buffer\n",
1061 		    DEVNAME(sc));
1062 		goto free_stats;
1063 	}
1064 
1065 	for (i = 0; i < sc->sc_nqueues; i++) {
1066 		if (bnxt_queue_up(sc, &sc->sc_queues[i]) != 0) {
1067 			goto down_queues;
1068 		}
1069 	}
1070 
1071 	sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
1072 	if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
1073 		printf("%s: failed to allocate vnic rss context\n",
1074 		    DEVNAME(sc));
1075 		goto down_queues;
1076 	}
1077 
1078 	sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE;
1079 	sc->sc_vnic.def_ring_grp = sc->sc_queues[0].q_rg.grp_id;
1080 	sc->sc_vnic.mru = BNXT_MAX_MTU;
1081 	sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1082 	sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1083 	sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT |
1084 	    BNXT_VNIC_FLAG_VLAN_STRIP;
1085 	if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
1086 		printf("%s: failed to allocate vnic\n", DEVNAME(sc));
1087 		goto dealloc_vnic_ctx;
1088 	}
1089 
1090 	if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
1091 		printf("%s: failed to configure vnic\n", DEVNAME(sc));
1092 		goto dealloc_vnic;
1093 	}
1094 
1095 	if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
1096 		printf("%s: failed to configure vnic placement mode\n",
1097 		    DEVNAME(sc));
1098 		goto dealloc_vnic;
1099 	}
1100 
1101 	sc->sc_vnic.filter_id = -1;
1102 	if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
1103 		printf("%s: failed to set vnic filter\n", DEVNAME(sc));
1104 		goto dealloc_vnic;
1105 	}
1106 
1107 	if (sc->sc_nqueues > 1) {
1108 		uint16_t *rss_table = (BNXT_DMA_KVA(sc->sc_rx_cfg) + PAGE_SIZE);
1109 		uint8_t *hash_key = (uint8_t *)(rss_table + HW_HASH_INDEX_SIZE);
1110 
1111 		for (i = 0; i < HW_HASH_INDEX_SIZE; i++) {
1112 			struct bnxt_queue *bq;
1113 
1114 			bq = &sc->sc_queues[i % sc->sc_nqueues];
1115 			rss_table[i] = htole16(bq->q_rg.grp_id);
1116 		}
1117 		stoeplitz_to_key(hash_key, HW_HASH_KEY_SIZE);
1118 
1119 		if (bnxt_hwrm_vnic_rss_cfg(sc, &sc->sc_vnic,
1120 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
1121 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
1122 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
1123 		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6,
1124 		    BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE,
1125 		    BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE +
1126 		    (HW_HASH_INDEX_SIZE * sizeof(uint16_t))) != 0) {
1127 			printf("%s: failed to set RSS config\n", DEVNAME(sc));
1128 			goto dealloc_vnic;
1129 		}
1130 	}
1131 
1132 	bnxt_iff(sc);
1133 	SET(ifp->if_flags, IFF_RUNNING);
1134 
1135 	return;
1136 
1137 dealloc_vnic:
1138 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1139 dealloc_vnic_ctx:
1140 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1141 down_queues:
1142 	for (i = 0; i < sc->sc_nqueues; i++)
1143 		bnxt_queue_down(sc, &sc->sc_queues[i]);
1144 
1145 	bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1146 	sc->sc_rx_cfg = NULL;
1147 free_stats:
1148 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1149 	sc->sc_stats_ctx_mem = NULL;
1150 }
1151 
1152 void
1153 bnxt_down(struct bnxt_softc *sc)
1154 {
1155 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1156 	int i;
1157 
1158 	CLR(ifp->if_flags, IFF_RUNNING);
1159 
1160 	for (i = 0; i < sc->sc_nqueues; i++) {
1161 		ifq_clr_oactive(ifp->if_ifqs[i]);
1162 		ifq_barrier(ifp->if_ifqs[i]);
1163 		/* intr barrier? */
1164 
1165 		timeout_del(&sc->sc_queues[i].q_rx.rx_refill);
1166 	}
1167 
1168 	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
1169 	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1170 	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1171 
1172 	for (i = 0; i < sc->sc_nqueues; i++)
1173 		bnxt_queue_down(sc, &sc->sc_queues[i]);
1174 
1175 	bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1176 	sc->sc_rx_cfg = NULL;
1177 
1178 	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1179 	sc->sc_stats_ctx_mem = NULL;
1180 }
1181 
1182 void
1183 bnxt_iff(struct bnxt_softc *sc)
1184 {
1185 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1186 	struct ether_multi *enm;
1187 	struct ether_multistep step;
1188 	char *mc_list;
1189 	uint32_t rx_mask, mc_count;
1190 
1191 	rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
1192 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
1193 	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
1194 
1195 	mc_list = BNXT_DMA_KVA(sc->sc_rx_cfg);
1196 	mc_count = 0;
1197 
1198 	if (ifp->if_flags & IFF_PROMISC) {
1199 		SET(ifp->if_flags, IFF_ALLMULTI);
1200 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
1201 	} else if ((sc->sc_ac.ac_multirangecnt > 0) ||
1202 	    (sc->sc_ac.ac_multicnt > (PAGE_SIZE / ETHER_ADDR_LEN))) {
1203 		SET(ifp->if_flags, IFF_ALLMULTI);
1204 		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1205 	} else {
1206 		CLR(ifp->if_flags, IFF_ALLMULTI);
1207 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1208 		while (enm != NULL) {
1209 			memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN);
1210 			mc_list += ETHER_ADDR_LEN;
1211 			mc_count++;
1212 
1213 			ETHER_NEXT_MULTI(step, enm);
1214 		}
1215 	}
1216 
1217 	bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
1218 	    BNXT_DMA_DVA(sc->sc_rx_cfg), mc_count);
1219 }
1220 
1221 int
1222 bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1223 {
1224 	struct bnxt_softc 	*sc = (struct bnxt_softc *)ifp->if_softc;
1225 	struct ifreq		*ifr = (struct ifreq *)data;
1226 	int			s, error = 0;
1227 
1228 	s = splnet();
1229 	switch (cmd) {
1230 	case SIOCSIFADDR:
1231 		ifp->if_flags |= IFF_UP;
1232 		/* FALLTHROUGH */
1233 
1234 	case SIOCSIFFLAGS:
1235 		if (ISSET(ifp->if_flags, IFF_UP)) {
1236 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1237 				error = ENETRESET;
1238 			else
1239 				bnxt_up(sc);
1240 		} else {
1241 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1242 				bnxt_down(sc);
1243 		}
1244 		break;
1245 
1246 	case SIOCGIFMEDIA:
1247 	case SIOCSIFMEDIA:
1248 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1249 		break;
1250 
1251 	case SIOCGIFRXR:
1252 		error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1253 		break;
1254 
1255 	case SIOCGIFSFFPAGE:
1256 		error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1257 		break;
1258 
1259 	default:
1260 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1261 	}
1262 
1263 	if (error == ENETRESET) {
1264 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1265 		    (IFF_UP | IFF_RUNNING))
1266 			bnxt_iff(sc);
1267 		error = 0;
1268 	}
1269 
1270 	splx(s);
1271 
1272 	return (error);
1273 }
1274 
1275 int
1276 bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1277 {
1278 	struct if_rxring_info *ifr;
1279 	int i;
1280 	int error;
1281 
1282 	ifr = mallocarray(sc->sc_nqueues * 2, sizeof(*ifr), M_TEMP,
1283 	    M_WAITOK | M_ZERO | M_CANFAIL);
1284 	if (ifr == NULL)
1285 		return (ENOMEM);
1286 
1287 	for (i = 0; i < sc->sc_nqueues; i++) {
1288 		ifr[(i * 2)].ifr_size = MCLBYTES;
1289 		ifr[(i * 2)].ifr_info = sc->sc_queues[i].q_rx.rxr[0];
1290 
1291 		ifr[(i * 2) + 1].ifr_size = BNXT_AG_BUFFER_SIZE;
1292 		ifr[(i * 2) + 1].ifr_info = sc->sc_queues[i].q_rx.rxr[1];
1293 	}
1294 
1295 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues * 2, ifr);
1296 	free(ifr, M_TEMP, sc->sc_nqueues * 2 * sizeof(*ifr));
1297 
1298 	return (error);
1299 }
1300 
1301 int
1302 bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1303 {
1304 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1305 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1306 	case 0:
1307 		break;
1308 
1309 	case EFBIG:
1310 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1311 		    bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1312 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1313 			break;
1314 
1315 	default:
1316 		return (1);
1317 	}
1318 
1319 	bs->bs_m = m;
1320 	return (0);
1321 }
1322 
1323 void
1324 bnxt_start(struct ifqueue *ifq)
1325 {
1326 	struct ifnet *ifp = ifq->ifq_if;
1327 	struct tx_bd_short *txring;
1328 	struct tx_bd_long_hi *txhi;
1329 	struct bnxt_tx_queue *tx = ifq->ifq_softc;
1330 	struct bnxt_softc *sc = tx->tx_softc;
1331 	struct bnxt_slot *bs;
1332 	bus_dmamap_t map;
1333 	struct mbuf *m;
1334 	u_int idx, free, used, laststart;
1335 	uint16_t txflags;
1336 	int i;
1337 
1338 	txring = (struct tx_bd_short *)BNXT_DMA_KVA(tx->tx_ring_mem);
1339 
1340 	idx = tx->tx_ring_prod;
1341 	free = tx->tx_ring_cons;
1342 	if (free <= idx)
1343 		free += tx->tx_ring.ring_size;
1344 	free -= idx;
1345 
1346 	used = 0;
1347 
1348 	for (;;) {
1349 		/* +1 for tx_bd_long_hi */
1350 		if (used + BNXT_MAX_TX_SEGS + 1 > free) {
1351 			ifq_set_oactive(ifq);
1352 			break;
1353 		}
1354 
1355 		m = ifq_dequeue(ifq);
1356 		if (m == NULL)
1357 			break;
1358 
1359 		bs = &tx->tx_slots[tx->tx_prod];
1360 		if (bnxt_load_mbuf(sc, bs, m) != 0) {
1361 			m_freem(m);
1362 			ifp->if_oerrors++;
1363 			continue;
1364 		}
1365 
1366 #if NBPFILTER > 0
1367 		if (ifp->if_bpf)
1368 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1369 #endif
1370 		map = bs->bs_map;
1371 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1372 		    BUS_DMASYNC_PREWRITE);
1373 		used += BNXT_TX_SLOTS(bs);
1374 
1375 		/* first segment */
1376 		laststart = idx;
1377 		txring[idx].len = htole16(map->dm_segs[0].ds_len);
1378 		txring[idx].opaque = tx->tx_prod;
1379 		txring[idx].addr = htole64(map->dm_segs[0].ds_addr);
1380 
1381 		if (map->dm_mapsize < 512)
1382 			txflags = TX_BD_LONG_FLAGS_LHINT_LT512;
1383 		else if (map->dm_mapsize < 1024)
1384 			txflags = TX_BD_LONG_FLAGS_LHINT_LT1K;
1385 		else if (map->dm_mapsize < 2048)
1386 			txflags = TX_BD_LONG_FLAGS_LHINT_LT2K;
1387 		else
1388 			txflags = TX_BD_LONG_FLAGS_LHINT_GTE2K;
1389 		txflags |= TX_BD_LONG_TYPE_TX_BD_LONG |
1390 		    TX_BD_LONG_FLAGS_NO_CMPL |
1391 		    (BNXT_TX_SLOTS(bs) << TX_BD_LONG_FLAGS_BD_CNT_SFT);
1392 		if (map->dm_nsegs == 1)
1393 			txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1394 		txring[idx].flags_type = htole16(txflags);
1395 
1396 		idx++;
1397 		if (idx == tx->tx_ring.ring_size)
1398 			idx = 0;
1399 
1400 		/* long tx descriptor */
1401 		txhi = (struct tx_bd_long_hi *)&txring[idx];
1402 		memset(txhi, 0, sizeof(*txhi));
1403 		txflags = 0;
1404 		if (m->m_pkthdr.csum_flags & (M_UDP_CSUM_OUT | M_TCP_CSUM_OUT))
1405 			txflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
1406 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1407 			txflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
1408 		txhi->lflags = htole16(txflags);
1409 
1410 #if NVLAN > 0
1411 		if (m->m_flags & M_VLANTAG) {
1412 			txhi->cfa_meta = htole32(m->m_pkthdr.ether_vtag |
1413 			    TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 |
1414 			    TX_BD_LONG_CFA_META_KEY_VLAN_TAG);
1415 		}
1416 #endif
1417 
1418 		idx++;
1419 		if (idx == tx->tx_ring.ring_size)
1420 			idx = 0;
1421 
1422 		/* remaining segments */
1423 		txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT;
1424 		for (i = 1; i < map->dm_nsegs; i++) {
1425 			if (i == map->dm_nsegs - 1)
1426 				txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1427 			txring[idx].flags_type = htole16(txflags);
1428 
1429 			txring[idx].len =
1430 			    htole16(bs->bs_map->dm_segs[i].ds_len);
1431 			txring[idx].opaque = tx->tx_prod;
1432 			txring[idx].addr =
1433 			    htole64(bs->bs_map->dm_segs[i].ds_addr);
1434 
1435 			idx++;
1436 			if (idx == tx->tx_ring.ring_size)
1437 				idx = 0;
1438 		}
1439 
1440 		if (++tx->tx_prod >= tx->tx_ring.ring_size)
1441 			tx->tx_prod = 0;
1442 	}
1443 
1444 	/* unset NO_CMPL on the first bd of the last packet */
1445 	if (used != 0) {
1446 		txring[laststart].flags_type &=
1447 		    ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL);
1448 	}
1449 
1450 	bnxt_write_tx_doorbell(sc, &tx->tx_ring, idx);
1451 	tx->tx_ring_prod = idx;
1452 }
1453 
1454 void
1455 bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1456 {
1457 	struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1458 	uint16_t type = le16toh(ae->event_id);
1459 
1460 	switch (type) {
1461 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1462 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1463 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1464 		bnxt_hwrm_port_phy_qcfg(sc, NULL);
1465 		break;
1466 
1467 	default:
1468 		printf("%s: unexpected async event %x\n", DEVNAME(sc), type);
1469 		break;
1470 	}
1471 }
1472 
1473 struct cmpl_base *
1474 bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1475 {
1476 	struct cmpl_base *cmpl;
1477 	uint32_t cons;
1478 	int v_bit;
1479 
1480 	cons = cpr->cons + 1;
1481 	v_bit = cpr->v_bit;
1482 	if (cons == cpr->ring.ring_size) {
1483 		cons = 0;
1484 		v_bit = !v_bit;
1485 	}
1486 	cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1487 
1488 	if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V))) != (!!v_bit))
1489 		return (NULL);
1490 
1491 	cpr->cons = cons;
1492 	cpr->v_bit = v_bit;
1493 	return (cmpl);
1494 }
1495 
1496 void
1497 bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1498 {
1499 	cpr->commit_cons = cpr->cons;
1500 	cpr->commit_v_bit = cpr->v_bit;
1501 }
1502 
1503 void
1504 bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1505 {
1506 	cpr->cons = cpr->commit_cons;
1507 	cpr->v_bit = cpr->commit_v_bit;
1508 }
1509 
1510 int
1511 bnxt_admin_intr(void *xsc)
1512 {
1513 	struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1514 	struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1515 	struct cmpl_base *cmpl;
1516 	uint16_t type;
1517 
1518 	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1519 	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1520 	while (cmpl != NULL) {
1521 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1522 		switch (type) {
1523 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1524 			bnxt_handle_async_event(sc, cmpl);
1525 			break;
1526 		default:
1527 			printf("%s: unexpected completion type %u\n",
1528 			    DEVNAME(sc), type);
1529 		}
1530 
1531 		bnxt_cpr_commit(sc, cpr);
1532 		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1533 	}
1534 
1535 	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1536 	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1537 	return (1);
1538 }
1539 
1540 int
1541 bnxt_intr(void *xq)
1542 {
1543 	struct bnxt_queue *q = (struct bnxt_queue *)xq;
1544 	struct bnxt_softc *sc = q->q_sc;
1545 	struct bnxt_cp_ring *cpr = &q->q_cp;
1546 	struct bnxt_rx_queue *rx = &q->q_rx;
1547 	struct bnxt_tx_queue *tx = &q->q_tx;
1548 	struct cmpl_base *cmpl;
1549 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1550 	uint16_t type;
1551 	int rxfree, txfree, agfree, rv, rollback;
1552 
1553 	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1554 	rxfree = 0;
1555 	txfree = 0;
1556 	agfree = 0;
1557 	rv = -1;
1558 	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1559 	while (cmpl != NULL) {
1560 		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1561 		rollback = 0;
1562 		switch (type) {
1563 		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1564 			bnxt_handle_async_event(sc, cmpl);
1565 			break;
1566 		case CMPL_BASE_TYPE_RX_L2:
1567 			rollback = bnxt_rx(sc, rx, cpr, &ml, &rxfree, &agfree, cmpl);
1568 			break;
1569 		case CMPL_BASE_TYPE_TX_L2:
1570 			bnxt_txeof(sc, tx, &txfree, cmpl);
1571 			break;
1572 		default:
1573 			printf("%s: unexpected completion type %u\n",
1574 			    DEVNAME(sc), type);
1575 		}
1576 
1577 		if (rollback) {
1578 			bnxt_cpr_rollback(sc, cpr);
1579 			break;
1580 		}
1581 		rv = 1;
1582 		bnxt_cpr_commit(sc, cpr);
1583 		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1584 	}
1585 
1586 	/*
1587 	 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1588 	 * but writing cpr->cons + 1 makes it stop interrupting.
1589 	 */
1590 	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1591 	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1592 
1593 	if (rxfree != 0) {
1594 		rx->rx_cons += rxfree;
1595 		if (rx->rx_cons >= rx->rx_ring.ring_size)
1596 			rx->rx_cons -= rx->rx_ring.ring_size;
1597 
1598 		rx->rx_ag_cons += agfree;
1599 		if (rx->rx_ag_cons >= rx->rx_ag_ring.ring_size)
1600 			rx->rx_ag_cons -= rx->rx_ag_ring.ring_size;
1601 
1602 		if_rxr_put(&rx->rxr[0], rxfree);
1603 		if_rxr_put(&rx->rxr[1], agfree);
1604 
1605 		if (ifiq_input(rx->rx_ifiq, &ml)) {
1606 			if_rxr_livelocked(&rx->rxr[0]);
1607 			if_rxr_livelocked(&rx->rxr[1]);
1608 		}
1609 
1610 		bnxt_rx_fill(q);
1611 		if ((rx->rx_cons == rx->rx_prod) ||
1612 		    (rx->rx_ag_cons == rx->rx_ag_prod))
1613 			timeout_add(&rx->rx_refill, 0);
1614 	}
1615 	if (txfree != 0) {
1616 		if (ifq_is_oactive(tx->tx_ifq))
1617 			ifq_restart(tx->tx_ifq);
1618 	}
1619 	return (rv);
1620 }
1621 
1622 void
1623 bnxt_watchdog(struct ifnet *ifp)
1624 {
1625 }
1626 
1627 void
1628 bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1629 {
1630 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1631 	bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1632 }
1633 
1634 uint64_t
1635 bnxt_get_media_type(uint64_t speed, int phy_type)
1636 {
1637 	switch (phy_type) {
1638 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
1639 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
1640 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
1641 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
1642 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
1643 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
1644 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
1645 		switch (speed) {
1646 		case IF_Gbps(1):
1647 			return IFM_1000_T;
1648 		case IF_Gbps(10):
1649 			return IFM_10G_SFP_CU;
1650 		case IF_Gbps(25):
1651 			return IFM_25G_CR;
1652 		case IF_Gbps(40):
1653 			return IFM_40G_CR4;
1654 		case IF_Gbps(50):
1655 			return IFM_50G_CR2;
1656 		case IF_Gbps(100):
1657 			return IFM_100G_CR4;
1658 		}
1659 		break;
1660 
1661 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
1662 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
1663 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
1664 		switch (speed) {
1665 		case IF_Gbps(1):
1666 			return IFM_1000_LX;
1667 		case IF_Gbps(10):
1668 			return IFM_10G_LR;
1669 		case IF_Gbps(25):
1670 			return IFM_25G_LR;
1671 		case IF_Gbps(40):
1672 			return IFM_40G_LR4;
1673 		case IF_Gbps(100):
1674 			return IFM_100G_LR4;
1675 		}
1676 		break;
1677 
1678 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
1679 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
1680 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
1681 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
1682 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
1683 		switch (speed) {
1684 		case IF_Gbps(1):
1685 			return IFM_1000_SX;
1686 		case IF_Gbps(10):
1687 			return IFM_10G_SR;
1688 		case IF_Gbps(25):
1689 			return IFM_25G_SR;
1690 		case IF_Gbps(40):
1691 			return IFM_40G_SR4;
1692 		case IF_Gbps(100):
1693 			return IFM_100G_SR4;
1694 		}
1695 		break;
1696 
1697 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
1698 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
1699 		switch (speed) {
1700 		case IF_Gbps(10):
1701 			return IFM_10G_ER;
1702 		case IF_Gbps(25):
1703 			return IFM_25G_ER;
1704 		}
1705 		/* missing IFM_40G_ER4, IFM_100G_ER4 */
1706 		break;
1707 
1708 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
1709 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
1710 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
1711 		switch (speed) {
1712 		case IF_Gbps(10):
1713 			return IFM_10G_KR;
1714 		case IF_Gbps(20):
1715 			return IFM_20G_KR2;
1716 		case IF_Gbps(25):
1717 			return IFM_25G_KR;
1718 		case IF_Gbps(40):
1719 			return IFM_40G_KR4;
1720 		case IF_Gbps(50):
1721 			return IFM_50G_KR2;
1722 		case IF_Gbps(100):
1723 			return IFM_100G_KR4;
1724 		}
1725 		break;
1726 
1727 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
1728 		switch (speed) {
1729 		case IF_Gbps(1):
1730 			return IFM_1000_KX;
1731 		case IF_Mbps(2500):
1732 			return IFM_2500_KX;
1733 		case IF_Gbps(10):
1734 			return IFM_10G_KX4;
1735 		}
1736 		break;
1737 
1738 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
1739 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
1740 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
1741 		switch (speed) {
1742 		case IF_Mbps(10):
1743 			return IFM_10_T;
1744 		case IF_Mbps(100):
1745 			return IFM_100_TX;
1746 		case IF_Gbps(1):
1747 			return IFM_1000_T;
1748 		case IF_Mbps(2500):
1749 			return IFM_2500_T;
1750 		case IF_Gbps(10):
1751 			return IFM_10G_T;
1752 		}
1753 		break;
1754 
1755 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
1756 		switch (speed) {
1757 		case IF_Gbps(1):
1758 			return IFM_1000_SGMII;
1759 		}
1760 		break;
1761 
1762 	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
1763 		switch (speed) {
1764 		case IF_Gbps(10):
1765 			return IFM_10G_AOC;
1766 		case IF_Gbps(25):
1767 			return IFM_25G_AOC;
1768 		case IF_Gbps(40):
1769 			return IFM_40G_AOC;
1770 		case IF_Gbps(100):
1771 			return IFM_100G_AOC;
1772 		}
1773 		break;
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 void
1780 bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1781 {
1782 	int speed_bit = 0;
1783 	switch (speed) {
1784 	case IF_Gbps(1):
1785 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB;
1786 		break;
1787 	case IF_Gbps(2):
1788 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB;
1789 		break;
1790 	case IF_Mbps(2500):
1791 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB;
1792 		break;
1793 	case IF_Gbps(10):
1794 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB;
1795 		break;
1796 	case IF_Gbps(20):
1797 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB;
1798 		break;
1799 	case IF_Gbps(25):
1800 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB;
1801 		break;
1802 	case IF_Gbps(40):
1803 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB;
1804 		break;
1805 	case IF_Gbps(50):
1806 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB;
1807 		break;
1808 	case IF_Gbps(100):
1809 		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB;
1810 		break;
1811 	}
1812 	if (supported_speeds & speed_bit)
1813 		ifmedia_add(&sc->sc_media, IFM_ETHER | ifmt, 0, NULL);
1814 }
1815 
1816 int
1817 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1818 {
1819 	struct ifnet *ifp = &softc->sc_ac.ac_if;
1820 	struct hwrm_port_phy_qcfg_input req = {0};
1821 	struct hwrm_port_phy_qcfg_output *resp =
1822 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
1823 	int link_state = LINK_STATE_DOWN;
1824 	uint64_t speeds[] = {
1825 		IF_Gbps(1), IF_Gbps(2), IF_Mbps(2500), IF_Gbps(10), IF_Gbps(20),
1826 		IF_Gbps(25), IF_Gbps(40), IF_Gbps(50), IF_Gbps(100)
1827 	};
1828 	uint64_t media_type;
1829 	int duplex;
1830 	int rc = 0;
1831 	int i;
1832 
1833 	BNXT_HWRM_LOCK(softc);
1834 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1835 
1836 	rc = _hwrm_send_message(softc, &req, sizeof(req));
1837 	if (rc) {
1838 		printf("%s: failed to query port phy config\n", DEVNAME(softc));
1839 		goto exit;
1840 	}
1841 
1842 	if (softc->sc_hwrm_ver > 0x10800)
1843 		duplex = resp->duplex_state;
1844 	else
1845 		duplex = resp->duplex_cfg;
1846 
1847 	if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
1848 		if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1849 			link_state = LINK_STATE_HALF_DUPLEX;
1850 		else
1851 			link_state = LINK_STATE_FULL_DUPLEX;
1852 
1853 		switch (resp->link_speed) {
1854 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
1855 			ifp->if_baudrate = IF_Mbps(10);
1856 			break;
1857 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1858 			ifp->if_baudrate = IF_Mbps(100);
1859 			break;
1860 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1861 			ifp->if_baudrate = IF_Gbps(1);
1862 			break;
1863 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1864 			ifp->if_baudrate = IF_Gbps(2);
1865 			break;
1866 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1867 			ifp->if_baudrate = IF_Mbps(2500);
1868 			break;
1869 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1870 			ifp->if_baudrate = IF_Gbps(10);
1871 			break;
1872 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1873 			ifp->if_baudrate = IF_Gbps(20);
1874 			break;
1875 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1876 			ifp->if_baudrate = IF_Gbps(25);
1877 			break;
1878 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1879 			ifp->if_baudrate = IF_Gbps(40);
1880 			break;
1881 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1882 			ifp->if_baudrate = IF_Gbps(50);
1883 			break;
1884 		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
1885 			ifp->if_baudrate = IF_Gbps(100);
1886 			break;
1887 		}
1888 	}
1889 
1890 	ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY);
1891 	for (i = 0; i < nitems(speeds); i++) {
1892 		media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1893 		if (media_type != 0)
1894 			bnxt_add_media_type(softc, resp->support_speeds,
1895 			    speeds[i], media_type);
1896 	}
1897 	ifmedia_add(&softc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1898 	ifmedia_set(&softc->sc_media, IFM_ETHER|IFM_AUTO);
1899 
1900 	if (ifmr != NULL) {
1901 		ifmr->ifm_status = IFM_AVALID;
1902 		if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1903 			ifmr->ifm_status |= IFM_ACTIVE;
1904 			ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
1905 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
1906 				ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1907 			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
1908 				ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1909 			if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1910 				ifmr->ifm_active |= IFM_HDX;
1911 			else
1912 				ifmr->ifm_active |= IFM_FDX;
1913 
1914 			media_type = bnxt_get_media_type(ifp->if_baudrate, resp->phy_type);
1915 			if (media_type != 0)
1916 				ifmr->ifm_active |= media_type;
1917 		}
1918 	}
1919 
1920 exit:
1921 	BNXT_HWRM_UNLOCK(softc);
1922 
1923 	if (rc == 0 && (link_state != ifp->if_link_state)) {
1924 		ifp->if_link_state = link_state;
1925 		if_link_state_change(ifp);
1926 	}
1927 
1928 	return rc;
1929 }
1930 
1931 int
1932 bnxt_media_change(struct ifnet *ifp)
1933 {
1934 	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1935 	struct hwrm_port_phy_cfg_input req = {0};
1936 	uint64_t link_speed;
1937 
1938 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1939 		return EINVAL;
1940 
1941 	if (sc->sc_flags & BNXT_FLAG_NPAR)
1942 		return ENODEV;
1943 
1944 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1945 
1946 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
1947 	case IFM_100G_CR4:
1948 	case IFM_100G_SR4:
1949 	case IFM_100G_KR4:
1950 	case IFM_100G_LR4:
1951 	case IFM_100G_AOC:
1952 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB;
1953 		break;
1954 
1955 	case IFM_50G_CR2:
1956 	case IFM_50G_KR2:
1957 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB;
1958 		break;
1959 
1960 	case IFM_40G_CR4:
1961 	case IFM_40G_SR4:
1962 	case IFM_40G_LR4:
1963 	case IFM_40G_KR4:
1964 	case IFM_40G_AOC:
1965 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB;
1966 		break;
1967 
1968 	case IFM_25G_CR:
1969 	case IFM_25G_KR:
1970 	case IFM_25G_SR:
1971 	case IFM_25G_LR:
1972 	case IFM_25G_ER:
1973 	case IFM_25G_AOC:
1974 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB;
1975 		break;
1976 
1977 	case IFM_10G_LR:
1978 	case IFM_10G_SR:
1979 	case IFM_10G_CX4:
1980 	case IFM_10G_T:
1981 	case IFM_10G_SFP_CU:
1982 	case IFM_10G_LRM:
1983 	case IFM_10G_KX4:
1984 	case IFM_10G_KR:
1985 	case IFM_10G_CR1:
1986 	case IFM_10G_ER:
1987 	case IFM_10G_AOC:
1988 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB;
1989 		break;
1990 
1991 	case IFM_2500_SX:
1992 	case IFM_2500_KX:
1993 	case IFM_2500_T:
1994 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB;
1995 		break;
1996 
1997 	case IFM_1000_T:
1998 	case IFM_1000_LX:
1999 	case IFM_1000_SX:
2000 	case IFM_1000_CX:
2001 	case IFM_1000_KX:
2002 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB;
2003 		break;
2004 
2005 	case IFM_100_TX:
2006 		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB;
2007 		break;
2008 
2009 	default:
2010 		link_speed = 0;
2011 	}
2012 
2013 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2014 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2015 	if (link_speed == 0) {
2016 		req.auto_mode |=
2017 		    HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2018 		req.flags |=
2019 		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2020 		req.enables |=
2021 		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
2022 	} else {
2023 		req.force_link_speed = htole16(link_speed);
2024 		req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
2025 	}
2026 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2027 
2028 	return hwrm_send_message(sc, &req, sizeof(req));
2029 }
2030 
2031 int
2032 bnxt_media_autonegotiate(struct bnxt_softc *sc)
2033 {
2034 	struct hwrm_port_phy_cfg_input req = {0};
2035 
2036 	if (sc->sc_flags & BNXT_FLAG_NPAR)
2037 		return ENODEV;
2038 
2039 	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
2040 	req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2041 	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2042 	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |
2043 	    HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2044 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2045 	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2046 
2047 	return hwrm_send_message(sc, &req, sizeof(req));
2048 }
2049 
2050 
2051 void
2052 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
2053 {
2054 	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
2055 	int i;
2056 
2057 	for (i = 0; i < cpr->ring.ring_size; i++)
2058 		cmp[i].info3_v = !cpr->v_bit;
2059 }
2060 
2061 void
2062 bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
2063     int enable)
2064 {
2065 	uint32_t val = CMPL_DOORBELL_KEY_CMPL;
2066 	if (enable == 0)
2067 		val |= CMPL_DOORBELL_MASK;
2068 
2069 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2070 	    BUS_SPACE_BARRIER_WRITE);
2071 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2072 	    BUS_SPACE_BARRIER_WRITE);
2073 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2074 	    htole32(val));
2075 }
2076 
2077 void
2078 bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
2079     uint32_t index, int enable)
2080 {
2081 	uint32_t val = CMPL_DOORBELL_KEY_CMPL | CMPL_DOORBELL_IDX_VALID |
2082 	    (index & CMPL_DOORBELL_IDX_MASK);
2083 	if (enable == 0)
2084 		val |= CMPL_DOORBELL_MASK;
2085 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2086 	    BUS_SPACE_BARRIER_WRITE);
2087 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2088 	    htole32(val));
2089 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2090 	    BUS_SPACE_BARRIER_WRITE);
2091 }
2092 
2093 void
2094 bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2095 {
2096 	uint32_t val = RX_DOORBELL_KEY_RX | index;
2097 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2098 	    BUS_SPACE_BARRIER_WRITE);
2099 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2100 	    htole32(val));
2101 
2102 	/* second write isn't necessary on all hardware */
2103 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2104 	    BUS_SPACE_BARRIER_WRITE);
2105 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2106 	    htole32(val));
2107 }
2108 
2109 void
2110 bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2111 {
2112 	uint32_t val = TX_DOORBELL_KEY_TX | index;
2113 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2114 	    BUS_SPACE_BARRIER_WRITE);
2115 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2116 	    htole32(val));
2117 
2118 	/* second write isn't necessary on all hardware */
2119 	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2120 	    BUS_SPACE_BARRIER_WRITE);
2121 	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2122 	    htole32(val));
2123 }
2124 
2125 u_int
2126 bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
2127     struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
2128     u_int nslots)
2129 {
2130 	struct rx_prod_pkt_bd *rxring;
2131 	struct bnxt_slot *bs;
2132 	struct mbuf *m;
2133 	uint p, fills;
2134 
2135 	rxring = (struct rx_prod_pkt_bd *)ring_mem;
2136 	p = *prod;
2137 	for (fills = 0; fills < nslots; fills++) {
2138 		bs = &slots[p];
2139 		m = MCLGETL(NULL, M_DONTWAIT, bufsize);
2140 		if (m == NULL)
2141 			break;
2142 
2143 		m->m_len = m->m_pkthdr.len = bufsize;
2144 		if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
2145 		    BUS_DMA_NOWAIT) != 0) {
2146 			m_freem(m);
2147 			break;
2148 		}
2149 		bs->bs_m = m;
2150 
2151 		rxring[p].flags_type = htole16(bdtype);
2152 		rxring[p].len = htole16(bufsize);
2153 		rxring[p].opaque = p;
2154 		rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr);
2155 
2156 		if (++p >= ring->ring_size)
2157 			p = 0;
2158 	}
2159 
2160 	if (fills != 0)
2161 		bnxt_write_rx_doorbell(sc, ring, p);
2162 	*prod = p;
2163 
2164 	return (nslots - fills);
2165 }
2166 
2167 int
2168 bnxt_rx_fill(struct bnxt_queue *q)
2169 {
2170 	struct bnxt_rx_queue *rx = &q->q_rx;
2171 	struct bnxt_softc *sc = q->q_sc;
2172 	u_int slots;
2173 	int rv = 0;
2174 
2175 	slots = if_rxr_get(&rx->rxr[0], rx->rx_ring.ring_size);
2176 	if (slots > 0) {
2177 		slots = bnxt_rx_fill_slots(sc, &rx->rx_ring,
2178 		    BNXT_DMA_KVA(rx->rx_ring_mem), rx->rx_slots,
2179 		    &rx->rx_prod, MCLBYTES,
2180 		    RX_PROD_PKT_BD_TYPE_RX_PROD_PKT, slots);
2181 		if_rxr_put(&rx->rxr[0], slots);
2182 	} else
2183 		rv = 1;
2184 
2185 	slots = if_rxr_get(&rx->rxr[1],  rx->rx_ag_ring.ring_size);
2186 	if (slots > 0) {
2187 		slots = bnxt_rx_fill_slots(sc, &rx->rx_ag_ring,
2188 		    BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE,
2189 		    rx->rx_ag_slots, &rx->rx_ag_prod,
2190 		    BNXT_AG_BUFFER_SIZE,
2191 		    RX_PROD_AGG_BD_TYPE_RX_PROD_AGG, slots);
2192 		if_rxr_put(&rx->rxr[1], slots);
2193 	} else
2194 		rv = 1;
2195 
2196 	return (rv);
2197 }
2198 
2199 void
2200 bnxt_refill(void *xq)
2201 {
2202 	struct bnxt_queue *q = xq;
2203 	struct bnxt_rx_queue *rx = &q->q_rx;
2204 
2205 	bnxt_rx_fill(q);
2206 
2207 	if (rx->rx_cons == rx->rx_prod)
2208 		timeout_add(&rx->rx_refill, 1);
2209 }
2210 
2211 int
2212 bnxt_rx(struct bnxt_softc *sc, struct bnxt_rx_queue *rx,
2213     struct bnxt_cp_ring *cpr, struct mbuf_list *ml, int *slots, int *agslots,
2214     struct cmpl_base *cmpl)
2215 {
2216 	struct mbuf *m, *am;
2217 	struct bnxt_slot *bs;
2218 	struct rx_pkt_cmpl *rxlo = (struct rx_pkt_cmpl *)cmpl;
2219 	struct rx_pkt_cmpl_hi *rxhi;
2220 	struct rx_abuf_cmpl *ag;
2221 	uint32_t flags;
2222 	uint16_t errors;
2223 
2224 	/* second part of the rx completion */
2225 	rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
2226 	if (rxhi == NULL) {
2227 		return (1);
2228 	}
2229 
2230 	/* packets over 2k in size use an aggregation buffer completion too */
2231 	ag = NULL;
2232 	if ((rxlo->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT) != 0) {
2233 		ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
2234 		if (ag == NULL) {
2235 			return (1);
2236 		}
2237 	}
2238 
2239 	bs = &rx->rx_slots[rxlo->opaque];
2240 	bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,
2241 	    BUS_DMASYNC_POSTREAD);
2242 	bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2243 
2244 	m = bs->bs_m;
2245 	bs->bs_m = NULL;
2246 	m->m_pkthdr.len = m->m_len = letoh16(rxlo->len);
2247 	(*slots)++;
2248 
2249 	/* checksum flags */
2250 	flags = lemtoh32(&rxhi->flags2);
2251 	errors = lemtoh16(&rxhi->errors_v2);
2252 	if ((flags & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) != 0 &&
2253 	    (errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR) == 0)
2254 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2255 
2256 	if ((flags & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) != 0 &&
2257 	    (errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR) == 0)
2258 		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
2259 		    M_UDP_CSUM_IN_OK;
2260 
2261 #if NVLAN > 0
2262 	if ((flags & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
2263 	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
2264 		m->m_pkthdr.ether_vtag = lemtoh16(&rxhi->metadata);
2265 		m->m_flags |= M_VLANTAG;
2266 	}
2267 #endif
2268 
2269 	if (lemtoh16(&rxlo->flags_type) & RX_PKT_CMPL_FLAGS_RSS_VALID) {
2270 		m->m_pkthdr.ph_flowid = lemtoh32(&rxlo->rss_hash);
2271 		m->m_pkthdr.csum_flags |= M_FLOWID;
2272 	}
2273 
2274 	if (ag != NULL) {
2275 		bs = &rx->rx_ag_slots[ag->opaque];
2276 		bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
2277 		    bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2278 		bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2279 
2280 		am = bs->bs_m;
2281 		bs->bs_m = NULL;
2282 		am->m_len = letoh16(ag->len);
2283 		m->m_next = am;
2284 		m->m_pkthdr.len += am->m_len;
2285 		(*agslots)++;
2286 	}
2287 
2288 	ml_enqueue(ml, m);
2289 	return (0);
2290 }
2291 
2292 void
2293 bnxt_txeof(struct bnxt_softc *sc, struct bnxt_tx_queue *tx, int *txfree,
2294     struct cmpl_base *cmpl)
2295 {
2296 	struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
2297 	struct bnxt_slot *bs;
2298 	bus_dmamap_t map;
2299 	u_int idx, segs, last;
2300 
2301 	idx = tx->tx_ring_cons;
2302 	last = tx->tx_cons;
2303 	do {
2304 		bs = &tx->tx_slots[tx->tx_cons];
2305 		map = bs->bs_map;
2306 
2307 		segs = BNXT_TX_SLOTS(bs);
2308 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2309 		    BUS_DMASYNC_POSTWRITE);
2310 		bus_dmamap_unload(sc->sc_dmat, map);
2311 		m_freem(bs->bs_m);
2312 		bs->bs_m = NULL;
2313 
2314 		idx += segs;
2315 		(*txfree) += segs;
2316 		if (idx >= tx->tx_ring.ring_size)
2317 			idx -= tx->tx_ring.ring_size;
2318 
2319 		last = tx->tx_cons;
2320 		if (++tx->tx_cons >= tx->tx_ring.ring_size)
2321 			tx->tx_cons = 0;
2322 
2323 	} while (last != txcmpl->opaque);
2324 	tx->tx_ring_cons = idx;
2325 }
2326 
2327 /* bnxt_hwrm.c */
2328 
2329 int
2330 bnxt_hwrm_err_map(uint16_t err)
2331 {
2332 	int rc;
2333 
2334 	switch (err) {
2335 	case HWRM_ERR_CODE_SUCCESS:
2336 		return 0;
2337 	case HWRM_ERR_CODE_INVALID_PARAMS:
2338 	case HWRM_ERR_CODE_INVALID_FLAGS:
2339 	case HWRM_ERR_CODE_INVALID_ENABLES:
2340 		return EINVAL;
2341 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
2342 		return EACCES;
2343 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
2344 		return ENOMEM;
2345 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
2346 		return ENOSYS;
2347 	case HWRM_ERR_CODE_FAIL:
2348 		return EIO;
2349 	case HWRM_ERR_CODE_HWRM_ERROR:
2350 	case HWRM_ERR_CODE_UNKNOWN_ERR:
2351 	default:
2352 		return EIO;
2353 	}
2354 
2355 	return rc;
2356 }
2357 
2358 void
2359 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2360     uint16_t req_type)
2361 {
2362 	struct input *req = request;
2363 
2364 	req->req_type = htole16(req_type);
2365 	req->cmpl_ring = 0xffff;
2366 	req->target_id = 0xffff;
2367 	req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2368 }
2369 
2370 int
2371 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2372 {
2373 	struct input *req = msg;
2374 	struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2375 	uint32_t *data = msg;
2376 	int i;
2377 	uint8_t *valid;
2378 	uint16_t err;
2379 	uint16_t max_req_len = HWRM_MAX_REQ_LEN;
2380 	struct hwrm_short_input short_input = {0};
2381 
2382 	/* TODO: DMASYNC in here. */
2383 	req->seq_id = htole16(softc->sc_cmd_seq++);
2384 	memset(resp, 0, PAGE_SIZE);
2385 
2386 	if (softc->sc_flags & BNXT_FLAG_SHORT_CMD) {
2387 		void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp);
2388 
2389 		memcpy(short_cmd_req, req, msg_len);
2390 		memset((uint8_t *) short_cmd_req + msg_len, 0,
2391 		    softc->sc_max_req_len - msg_len);
2392 
2393 		short_input.req_type = req->req_type;
2394 		short_input.signature =
2395 		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
2396 		short_input.size = htole16(msg_len);
2397 		short_input.req_addr =
2398 		    htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2399 
2400 		data = (uint32_t *)&short_input;
2401 		msg_len = sizeof(short_input);
2402 
2403 		/* Sync memory write before updating doorbell */
2404 		membar_sync();
2405 
2406 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
2407 	}
2408 
2409 	/* Write request msg to hwrm channel */
2410 	for (i = 0; i < msg_len; i += 4) {
2411 		bus_space_write_4(softc->sc_hwrm_t,
2412 				  softc->sc_hwrm_h,
2413 				  i, *data);
2414 		data++;
2415 	}
2416 
2417 	/* Clear to the end of the request buffer */
2418 	for (i = msg_len; i < max_req_len; i += 4)
2419 		bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,
2420 		    i, 0);
2421 
2422 	/* Ring channel doorbell */
2423 	bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,
2424 	    htole32(1));
2425 
2426 	/* Check if response len is updated */
2427 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2428 		if (resp->resp_len && resp->resp_len <= 4096)
2429 			break;
2430 		DELAY(1000);
2431 	}
2432 	if (i >= softc->sc_cmd_timeo) {
2433 		printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2434 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2435 		    softc->sc_cmd_timeo,
2436 		    le16toh(req->seq_id));
2437 		return ETIMEDOUT;
2438 	}
2439 	/* Last byte of resp contains the valid key */
2440 	valid = (uint8_t *)resp + resp->resp_len - 1;
2441 	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2442 		if (*valid == HWRM_RESP_VALID_KEY)
2443 			break;
2444 		DELAY(1000);
2445 	}
2446 	if (i >= softc->sc_cmd_timeo) {
2447 		printf("%s: timeout sending %s: "
2448 		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2449 		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2450 		    softc->sc_cmd_timeo, le16toh(req->req_type),
2451 		    le16toh(req->seq_id), msg_len,
2452 		    *valid);
2453 		return ETIMEDOUT;
2454 	}
2455 
2456 	err = le16toh(resp->error_code);
2457 	if (err) {
2458 		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2459 		if (err != HWRM_ERR_CODE_FAIL) {
2460 			printf("%s: %s command returned %s error.\n",
2461 			    DEVNAME(softc),
2462 			    GET_HWRM_REQ_TYPE(req->req_type),
2463 			    GET_HWRM_ERROR_CODE(err));
2464 		}
2465 		return bnxt_hwrm_err_map(err);
2466 	}
2467 
2468 	return 0;
2469 }
2470 
2471 
2472 int
2473 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2474 {
2475 	int rc;
2476 
2477 	BNXT_HWRM_LOCK(softc);
2478 	rc = _hwrm_send_message(softc, msg, msg_len);
2479 	BNXT_HWRM_UNLOCK(softc);
2480 	return rc;
2481 }
2482 
2483 
2484 int
2485 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2486 {
2487 	struct hwrm_queue_qportcfg_input req = {0};
2488 	struct hwrm_queue_qportcfg_output *resp =
2489 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2490 	int	rc = 0;
2491 
2492 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
2493 
2494 	BNXT_HWRM_LOCK(softc);
2495 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2496 	if (rc)
2497 		goto qportcfg_exit;
2498 
2499 	if (!resp->max_configurable_queues) {
2500 		rc = -EINVAL;
2501 		goto qportcfg_exit;
2502 	}
2503 
2504 	softc->sc_tx_queue_id = resp->queue_id0;
2505 
2506 qportcfg_exit:
2507 	BNXT_HWRM_UNLOCK(softc);
2508 	return rc;
2509 }
2510 
2511 int
2512 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2513 {
2514 	struct hwrm_ver_get_input	req = {0};
2515 	struct hwrm_ver_get_output	*resp =
2516 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2517 	int				rc;
2518 #if 0
2519 	const char nastr[] = "<not installed>";
2520 	const char naver[] = "<N/A>";
2521 #endif
2522 	uint32_t dev_caps_cfg;
2523 
2524 	softc->sc_max_req_len = HWRM_MAX_REQ_LEN;
2525 	softc->sc_cmd_timeo = 1000;
2526 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
2527 
2528 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
2529 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
2530 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
2531 
2532 	BNXT_HWRM_LOCK(softc);
2533 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2534 	if (rc)
2535 		goto fail;
2536 
2537 	printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2538 	    resp->hwrm_fw_bld);
2539 
2540 	softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2541 	    (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2542 #if 0
2543 	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2544 	    resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2545 	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2546 	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2547 	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2548 	snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2549 	    resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2550 	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
2551 	    BNXT_VERSTR_SIZE);
2552 	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2553 	    BNXT_NAME_SIZE);
2554 
2555 	if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2556 	    resp->mgmt_fw_bld == 0) {
2557 		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2558 		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2559 	}
2560 	else {
2561 		snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2562 		    "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2563 		    resp->mgmt_fw_bld);
2564 		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2565 		    BNXT_NAME_SIZE);
2566 	}
2567 	if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2568 	    resp->netctrl_fw_bld == 0) {
2569 		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2570 		    BNXT_VERSTR_SIZE);
2571 		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2572 		    BNXT_NAME_SIZE);
2573 	}
2574 	else {
2575 		snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2576 		    "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2577 		    resp->netctrl_fw_bld);
2578 		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2579 		    BNXT_NAME_SIZE);
2580 	}
2581 	if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2582 	    resp->roce_fw_bld == 0) {
2583 		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2584 		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2585 	}
2586 	else {
2587 		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2588 		    "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2589 		    resp->roce_fw_bld);
2590 		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2591 		    BNXT_NAME_SIZE);
2592 	}
2593 	softc->ver_info->chip_num = le16toh(resp->chip_num);
2594 	softc->ver_info->chip_rev = resp->chip_rev;
2595 	softc->ver_info->chip_metal = resp->chip_metal;
2596 	softc->ver_info->chip_bond_id = resp->chip_bond_id;
2597 	softc->ver_info->chip_type = resp->chip_platform_type;
2598 #endif
2599 
2600 	if (resp->max_req_win_len)
2601 		softc->sc_max_req_len = le16toh(resp->max_req_win_len);
2602 	if (resp->def_req_timeout)
2603 		softc->sc_cmd_timeo = le16toh(resp->def_req_timeout);
2604 
2605 	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
2606 	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
2607 	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
2608 		softc->sc_flags |= BNXT_FLAG_SHORT_CMD;
2609 
2610 fail:
2611 	BNXT_HWRM_UNLOCK(softc);
2612 	return rc;
2613 }
2614 
2615 
2616 int
2617 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2618 {
2619 	struct hwrm_func_drv_rgtr_input req = {0};
2620 
2621 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2622 
2623 	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
2624 	    HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
2625 	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
2626 
2627 	req.ver_maj = 6;
2628 	req.ver_min = 4;
2629 	req.ver_upd = 0;
2630 
2631 	return hwrm_send_message(softc, &req, sizeof(req));
2632 }
2633 
2634 #if 0
2635 
2636 int
2637 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
2638 {
2639 	struct hwrm_func_drv_unrgtr_input req = {0};
2640 
2641 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
2642 	if (shutdown == true)
2643 		req.flags |=
2644 		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
2645 	return hwrm_send_message(softc, &req, sizeof(req));
2646 }
2647 
2648 #endif
2649 
2650 int
2651 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2652 {
2653 	int rc = 0;
2654 	struct hwrm_func_qcaps_input req = {0};
2655 	struct hwrm_func_qcaps_output *resp =
2656 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2657 	/* struct bnxt_func_info *func = &softc->func; */
2658 
2659 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
2660 	req.fid = htole16(0xffff);
2661 
2662 	BNXT_HWRM_LOCK(softc);
2663 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2664 	if (rc)
2665 		goto fail;
2666 
2667 	if (resp->flags &
2668 	    htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
2669 		softc->sc_flags |= BNXT_FLAG_WOL_CAP;
2670 
2671 	memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6);
2672 	/*
2673 	func->fw_fid = le16toh(resp->fid);
2674 	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2675 	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2676 	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2677 	func->max_tx_rings = le16toh(resp->max_tx_rings);
2678 	func->max_rx_rings = le16toh(resp->max_rx_rings);
2679 	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2680 	if (!func->max_hw_ring_grps)
2681 		func->max_hw_ring_grps = func->max_tx_rings;
2682 	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2683 	func->max_vnics = le16toh(resp->max_vnics);
2684 	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2685 	if (BNXT_PF(softc)) {
2686 		struct bnxt_pf_info *pf = &softc->pf;
2687 
2688 		pf->port_id = le16toh(resp->port_id);
2689 		pf->first_vf_id = le16toh(resp->first_vf_id);
2690 		pf->max_vfs = le16toh(resp->max_vfs);
2691 		pf->max_encap_records = le32toh(resp->max_encap_records);
2692 		pf->max_decap_records = le32toh(resp->max_decap_records);
2693 		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2694 		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2695 		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2696 		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2697 	}
2698 	if (!_is_valid_ether_addr(func->mac_addr)) {
2699 		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2700 		get_random_ether_addr(func->mac_addr);
2701 	}
2702 	*/
2703 
2704 fail:
2705 	BNXT_HWRM_UNLOCK(softc);
2706 	return rc;
2707 }
2708 
2709 
2710 int
2711 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2712 {
2713         struct hwrm_func_qcfg_input req = {0};
2714         /* struct hwrm_func_qcfg_output *resp =
2715 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2716 	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2717         int rc;
2718 
2719 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
2720         req.fid = htole16(0xffff);
2721 	BNXT_HWRM_LOCK(softc);
2722 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2723         if (rc)
2724 		goto fail;
2725 
2726 	/*
2727 	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2728 	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2729 	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2730 	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2731 	*/
2732 fail:
2733 	BNXT_HWRM_UNLOCK(softc);
2734         return rc;
2735 }
2736 
2737 
2738 int
2739 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2740 {
2741 	struct hwrm_func_reset_input req = {0};
2742 
2743 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
2744 	req.enables = 0;
2745 
2746 	return hwrm_send_message(softc, &req, sizeof(req));
2747 }
2748 
2749 int
2750 bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2751     struct bnxt_vnic_info *vnic)
2752 {
2753 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
2754 
2755 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
2756 
2757 	req.flags = htole32(
2758 	    HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2759 	req.enables = htole32(
2760 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2761 	req.vnic_id = htole16(vnic->id);
2762 	req.jumbo_thresh = htole16(MCLBYTES);
2763 
2764 	return hwrm_send_message(softc, &req, sizeof(req));
2765 }
2766 
2767 int
2768 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2769 {
2770 	struct hwrm_vnic_cfg_input req = {0};
2771 
2772 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
2773 
2774 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2775 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2776 	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
2777 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2778 	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
2779 		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2780 	req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
2781 	    HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
2782 	    HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
2783 	req.vnic_id = htole16(vnic->id);
2784 	req.dflt_ring_grp = htole16(vnic->def_ring_grp);
2785 	req.rss_rule = htole16(vnic->rss_id);
2786 	req.cos_rule = htole16(vnic->cos_rule);
2787 	req.lb_rule = htole16(vnic->lb_rule);
2788 	req.mru = htole16(vnic->mru);
2789 
2790 	return hwrm_send_message(softc, &req, sizeof(req));
2791 }
2792 
2793 int
2794 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2795 {
2796 	struct hwrm_vnic_alloc_input req = {0};
2797 	struct hwrm_vnic_alloc_output *resp =
2798 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2799 	int rc;
2800 
2801 	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
2802 		printf("%s: attempt to re-allocate vnic %04x\n",
2803 		    DEVNAME(softc), vnic->id);
2804 		return EINVAL;
2805 	}
2806 
2807 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
2808 
2809 	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2810 		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2811 
2812 	BNXT_HWRM_LOCK(softc);
2813 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2814 	if (rc)
2815 		goto fail;
2816 
2817 	vnic->id = le32toh(resp->vnic_id);
2818 
2819 fail:
2820 	BNXT_HWRM_UNLOCK(softc);
2821 	return rc;
2822 }
2823 
2824 int
2825 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2826 {
2827 	struct hwrm_vnic_free_input req = {0};
2828 	int rc;
2829 
2830 	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE) {
2831 		printf("%s: attempt to deallocate vnic %04x\n",
2832 		    DEVNAME(softc), vnic->id);
2833 		return (EINVAL);
2834 	}
2835 
2836 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
2837 	req.vnic_id = htole16(vnic->id);
2838 
2839 	BNXT_HWRM_LOCK(softc);
2840 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2841 	if (rc == 0)
2842 		vnic->id = (uint16_t)HWRM_NA_SIGNATURE;
2843 	BNXT_HWRM_UNLOCK(softc);
2844 
2845 	return (rc);
2846 }
2847 
2848 int
2849 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2850 {
2851 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2852 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2853 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2854 	int rc;
2855 
2856 	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
2857 		printf("%s: attempt to re-allocate vnic ctx %04x\n",
2858 		    DEVNAME(softc), *ctx_id);
2859 		return EINVAL;
2860 	}
2861 
2862 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
2863 
2864 	BNXT_HWRM_LOCK(softc);
2865 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2866 	if (rc)
2867 		goto fail;
2868 
2869 	*ctx_id = letoh16(resp->rss_cos_lb_ctx_id);
2870 
2871 fail:
2872 	BNXT_HWRM_UNLOCK(softc);
2873 	return (rc);
2874 }
2875 
2876 int
2877 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2878 {
2879 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2880 	int rc;
2881 
2882 	if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE) {
2883 		printf("%s: attempt to deallocate vnic ctx %04x\n",
2884 		    DEVNAME(softc), *ctx_id);
2885 		return (EINVAL);
2886 	}
2887 
2888 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2889 	req.rss_cos_lb_ctx_id = htole32(*ctx_id);
2890 
2891 	BNXT_HWRM_LOCK(softc);
2892 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2893 	if (rc == 0)
2894 		*ctx_id = (uint16_t)HWRM_NA_SIGNATURE;
2895 	BNXT_HWRM_UNLOCK(softc);
2896 	return (rc);
2897 }
2898 
2899 int
2900 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2901 {
2902 	struct hwrm_ring_grp_alloc_input req = {0};
2903 	struct hwrm_ring_grp_alloc_output *resp;
2904 	int rc = 0;
2905 
2906 	if (grp->grp_id != HWRM_NA_SIGNATURE) {
2907 		printf("%s: attempt to re-allocate ring group %04x\n",
2908 		    DEVNAME(softc), grp->grp_id);
2909 		return EINVAL;
2910 	}
2911 
2912 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2913 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
2914 	req.cr = htole16(grp->cp_ring_id);
2915 	req.rr = htole16(grp->rx_ring_id);
2916 	req.ar = htole16(grp->ag_ring_id);
2917 	req.sc = htole16(grp->stats_ctx);
2918 
2919 	BNXT_HWRM_LOCK(softc);
2920 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2921 	if (rc)
2922 		goto fail;
2923 
2924 	grp->grp_id = letoh32(resp->ring_group_id);
2925 
2926 fail:
2927 	BNXT_HWRM_UNLOCK(softc);
2928 	return rc;
2929 }
2930 
2931 int
2932 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2933 {
2934 	struct hwrm_ring_grp_free_input req = {0};
2935 	int rc = 0;
2936 
2937 	if (grp->grp_id == HWRM_NA_SIGNATURE) {
2938 		printf("%s: attempt to free ring group %04x\n",
2939 		    DEVNAME(softc), grp->grp_id);
2940 		return EINVAL;
2941 	}
2942 
2943 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
2944 	req.ring_group_id = htole32(grp->grp_id);
2945 
2946 	BNXT_HWRM_LOCK(softc);
2947 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2948 	if (rc == 0)
2949 		grp->grp_id = HWRM_NA_SIGNATURE;
2950 
2951 	BNXT_HWRM_UNLOCK(softc);
2952 	return (rc);
2953 }
2954 
2955 /*
2956  * Ring allocation message to the firmware
2957  */
2958 int
2959 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
2960     struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
2961     int irq)
2962 {
2963 	struct hwrm_ring_alloc_input req = {0};
2964 	struct hwrm_ring_alloc_output *resp;
2965 	int rc;
2966 
2967 	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
2968 		printf("%s: attempt to re-allocate ring %04x\n",
2969 		    DEVNAME(softc), ring->phys_id);
2970 		return EINVAL;
2971 	}
2972 
2973 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2974 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
2975 	req.enables = htole32(0);
2976 	req.fbo = htole32(0);
2977 
2978 	if (stat_ctx_id != HWRM_NA_SIGNATURE) {
2979 		req.enables |= htole32(
2980 		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
2981 		req.stat_ctx_id = htole32(stat_ctx_id);
2982 	}
2983 	req.ring_type = type;
2984 	req.page_tbl_addr = htole64(ring->paddr);
2985 	req.length = htole32(ring->ring_size);
2986 	req.logical_id = htole16(ring->id);
2987 	req.cmpl_ring_id = htole16(cmpl_ring_id);
2988 	req.queue_id = htole16(softc->sc_tx_queue_id);
2989 	req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX) ?
2990 	    HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX :
2991 	    HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY;
2992 	BNXT_HWRM_LOCK(softc);
2993 	rc = _hwrm_send_message(softc, &req, sizeof(req));
2994 	if (rc)
2995 		goto fail;
2996 
2997 	ring->phys_id = le16toh(resp->ring_id);
2998 
2999 fail:
3000 	BNXT_HWRM_UNLOCK(softc);
3001 	return rc;
3002 }
3003 
3004 int
3005 bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
3006 {
3007 	struct hwrm_ring_free_input req = {0};
3008 	int rc;
3009 
3010 	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE) {
3011 		printf("%s: attempt to deallocate ring %04x\n",
3012 		    DEVNAME(softc), ring->phys_id);
3013 		return (EINVAL);
3014 	}
3015 
3016 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
3017 	req.ring_type = type;
3018 	req.ring_id = htole16(ring->phys_id);
3019 	BNXT_HWRM_LOCK(softc);
3020 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3021 	if (rc)
3022 		goto fail;
3023 
3024 	ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3025 fail:
3026 	BNXT_HWRM_UNLOCK(softc);
3027 	return (rc);
3028 }
3029 
3030 
3031 int
3032 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
3033     uint64_t paddr)
3034 {
3035 	struct hwrm_stat_ctx_alloc_input req = {0};
3036 	struct hwrm_stat_ctx_alloc_output *resp;
3037 	int rc = 0;
3038 
3039 	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
3040 		printf("%s: attempt to re-allocate stats ctx %08x\n",
3041 		    DEVNAME(softc), cpr->stats_ctx_id);
3042 		return EINVAL;
3043 	}
3044 
3045 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3046 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
3047 
3048 	req.update_period_ms = htole32(1000);
3049 	req.stats_dma_addr = htole64(paddr);
3050 
3051 	BNXT_HWRM_LOCK(softc);
3052 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3053 	if (rc)
3054 		goto fail;
3055 
3056 	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
3057 
3058 fail:
3059 	BNXT_HWRM_UNLOCK(softc);
3060 
3061 	return rc;
3062 }
3063 
3064 int
3065 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3066 {
3067 	struct hwrm_stat_ctx_free_input req = {0};
3068 	int rc = 0;
3069 
3070 	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE) {
3071 		printf("%s: attempt to free stats ctx %08x\n",
3072 		    DEVNAME(softc), cpr->stats_ctx_id);
3073 		return EINVAL;
3074 	}
3075 
3076 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
3077 	req.stat_ctx_id = htole32(cpr->stats_ctx_id);
3078 
3079 	BNXT_HWRM_LOCK(softc);
3080 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3081 	BNXT_HWRM_UNLOCK(softc);
3082 
3083 	if (rc == 0)
3084 		cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
3085 
3086 	return (rc);
3087 }
3088 
3089 #if 0
3090 
3091 int
3092 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
3093 {
3094 	struct hwrm_port_qstats_input req = {0};
3095 	int rc = 0;
3096 
3097 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
3098 
3099 	req.port_id = htole16(softc->pf.port_id);
3100 	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
3101 	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
3102 
3103 	BNXT_HWRM_LOCK(softc);
3104 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3105 	BNXT_HWRM_UNLOCK(softc);
3106 
3107 	return rc;
3108 }
3109 
3110 #endif
3111 
3112 int
3113 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
3114     uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
3115 {
3116 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3117 
3118 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
3119 
3120 	req.vnic_id = htole32(vnic_id);
3121 	req.mask = htole32(rx_mask);
3122 	req.mc_tbl_addr = htole64(mc_addr);
3123 	req.num_mc_entries = htole32(mc_count);
3124 	return hwrm_send_message(softc, &req, sizeof(req));
3125 }
3126 
3127 int
3128 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3129 {
3130 	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
3131 	struct hwrm_cfa_l2_filter_alloc_output	*resp;
3132 	uint32_t enables = 0;
3133 	int rc = 0;
3134 
3135 	if (vnic->filter_id != -1) {
3136 		printf("%s: attempt to re-allocate l2 ctx filter\n",
3137 		    DEVNAME(softc));
3138 		return EINVAL;
3139 	}
3140 
3141 	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3142 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
3143 
3144 	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
3145 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
3146 	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
3147 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
3148 	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3149 	req.enables = htole32(enables);
3150 	req.dst_id = htole16(vnic->id);
3151 	memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
3152 	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
3153 
3154 	BNXT_HWRM_LOCK(softc);
3155 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3156 	if (rc)
3157 		goto fail;
3158 
3159 	vnic->filter_id = le64toh(resp->l2_filter_id);
3160 	vnic->flow_id = le64toh(resp->flow_id);
3161 
3162 fail:
3163 	BNXT_HWRM_UNLOCK(softc);
3164 	return (rc);
3165 }
3166 
3167 int
3168 bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3169 {
3170 	struct hwrm_cfa_l2_filter_free_input req = {0};
3171 	int rc = 0;
3172 
3173 	if (vnic->filter_id == -1) {
3174 		printf("%s: attempt to deallocate filter %llx\n",
3175 		     DEVNAME(softc), vnic->filter_id);
3176 		return (EINVAL);
3177 	}
3178 
3179 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
3180 	req.l2_filter_id = htole64(vnic->filter_id);
3181 
3182 	BNXT_HWRM_LOCK(softc);
3183 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3184 	if (rc == 0)
3185 		vnic->filter_id = -1;
3186 	BNXT_HWRM_UNLOCK(softc);
3187 
3188 	return (rc);
3189 }
3190 
3191 
3192 int
3193 bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
3194     uint32_t hash_type, daddr_t rss_table, daddr_t rss_key)
3195 {
3196 	struct hwrm_vnic_rss_cfg_input	req = {0};
3197 
3198 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
3199 
3200 	req.hash_type = htole32(hash_type);
3201 	req.ring_grp_tbl_addr = htole64(rss_table);
3202 	req.hash_key_tbl_addr = htole64(rss_key);
3203 	req.rss_ctx_idx = htole16(vnic->rss_id);
3204 
3205 	return hwrm_send_message(softc, &req, sizeof(req));
3206 }
3207 
3208 int
3209 bnxt_cfg_async_cr(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3210 {
3211 	int rc = 0;
3212 
3213 	if (1 /* BNXT_PF(softc) */) {
3214 		struct hwrm_func_cfg_input req = {0};
3215 
3216 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
3217 
3218 		req.fid = htole16(0xffff);
3219 		req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3220 		req.async_event_cr = htole16(cpr->ring.phys_id);
3221 
3222 		rc = hwrm_send_message(softc, &req, sizeof(req));
3223 	} else {
3224 		struct hwrm_func_vf_cfg_input req = {0};
3225 
3226 		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
3227 
3228 		req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3229 		req.async_event_cr = htole16(cpr->ring.phys_id);
3230 
3231 		rc = hwrm_send_message(softc, &req, sizeof(req));
3232 	}
3233 	return rc;
3234 }
3235 
3236 #if 0
3237 
3238 void
3239 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
3240 {
3241 	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
3242 
3243         softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
3244 
3245 	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
3246 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
3247 
3248 	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
3249 		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
3250 
3251 	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
3252 }
3253 
3254 int
3255 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
3256 {
3257 	struct hwrm_vnic_tpa_cfg_input req = {0};
3258 	uint32_t flags;
3259 
3260 	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
3261 		return 0;
3262 	}
3263 
3264 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
3265 
3266 	if (softc->hw_lro.enable) {
3267 		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
3268 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
3269 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
3270 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3271 
3272         	if (softc->hw_lro.is_mode_gro)
3273 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
3274 		else
3275 			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
3276 
3277 		req.flags = htole32(flags);
3278 
3279 		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
3280 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
3281 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
3282 
3283 		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
3284 		req.max_aggs = htole16(softc->hw_lro.max_aggs);
3285 		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
3286 	}
3287 
3288 	req.vnic_id = htole16(softc->vnic_info.id);
3289 
3290 	return hwrm_send_message(softc, &req, sizeof(req));
3291 }
3292 
3293 
3294 int
3295 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
3296     uint8_t *selfreset)
3297 {
3298 	struct hwrm_fw_reset_input req = {0};
3299 	struct hwrm_fw_reset_output *resp =
3300 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3301 	int rc;
3302 
3303 	MPASS(selfreset);
3304 
3305 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
3306 	req.embedded_proc_type = processor;
3307 	req.selfrst_status = *selfreset;
3308 
3309 	BNXT_HWRM_LOCK(softc);
3310 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3311 	if (rc)
3312 		goto exit;
3313 	*selfreset = resp->selfrst_status;
3314 
3315 exit:
3316 	BNXT_HWRM_UNLOCK(softc);
3317 	return rc;
3318 }
3319 
3320 int
3321 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3322 {
3323 	struct hwrm_fw_qstatus_input req = {0};
3324 	struct hwrm_fw_qstatus_output *resp =
3325 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3326 	int rc;
3327 
3328 	MPASS(selfreset);
3329 
3330 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
3331 	req.embedded_proc_type = type;
3332 
3333 	BNXT_HWRM_LOCK(softc);
3334 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3335 	if (rc)
3336 		goto exit;
3337 	*selfreset = resp->selfrst_status;
3338 
3339 exit:
3340 	BNXT_HWRM_UNLOCK(softc);
3341 	return rc;
3342 }
3343 
3344 #endif
3345 
3346 int
3347 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3348     uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3349     uint32_t *reserved_size, uint32_t *available_size)
3350 {
3351 	struct hwrm_nvm_get_dev_info_input req = {0};
3352 	struct hwrm_nvm_get_dev_info_output *resp =
3353 	    BNXT_DMA_KVA(softc->sc_cmd_resp);
3354 	int rc;
3355 	uint32_t old_timeo;
3356 
3357 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
3358 
3359 	BNXT_HWRM_LOCK(softc);
3360 	old_timeo = softc->sc_cmd_timeo;
3361 	softc->sc_cmd_timeo = BNXT_NVM_TIMEO;
3362 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3363 	softc->sc_cmd_timeo = old_timeo;
3364 	if (rc)
3365 		goto exit;
3366 
3367 	if (mfg_id)
3368 		*mfg_id = le16toh(resp->manufacturer_id);
3369 	if (device_id)
3370 		*device_id = le16toh(resp->device_id);
3371 	if (sector_size)
3372 		*sector_size = le32toh(resp->sector_size);
3373 	if (nvram_size)
3374 		*nvram_size = le32toh(resp->nvram_size);
3375 	if (reserved_size)
3376 		*reserved_size = le32toh(resp->reserved_size);
3377 	if (available_size)
3378 		*available_size = le32toh(resp->available_size);
3379 
3380 exit:
3381 	BNXT_HWRM_UNLOCK(softc);
3382 	return rc;
3383 }
3384 
3385 #if 0
3386 
3387 int
3388 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3389     uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3390     uint16_t *millisecond, uint16_t *zone)
3391 {
3392 	struct hwrm_fw_get_time_input req = {0};
3393 	struct hwrm_fw_get_time_output *resp =
3394 	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3395 	int rc;
3396 
3397 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
3398 
3399 	BNXT_HWRM_LOCK(softc);
3400 	rc = _hwrm_send_message(softc, &req, sizeof(req));
3401 	if (rc)
3402 		goto exit;
3403 
3404 	if (year)
3405 		*year = le16toh(resp->year);
3406 	if (month)
3407 		*month = resp->month;
3408 	if (day)
3409 		*day = resp->day;
3410 	if (hour)
3411 		*hour = resp->hour;
3412 	if (minute)
3413 		*minute = resp->minute;
3414 	if (second)
3415 		*second = resp->second;
3416 	if (millisecond)
3417 		*millisecond = le16toh(resp->millisecond);
3418 	if (zone)
3419 		*zone = le16toh(resp->zone);
3420 
3421 exit:
3422 	BNXT_HWRM_UNLOCK(softc);
3423 	return rc;
3424 }
3425 
3426 int
3427 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3428     uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3429     uint16_t millisecond, uint16_t zone)
3430 {
3431 	struct hwrm_fw_set_time_input req = {0};
3432 
3433 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
3434 
3435 	req.year = htole16(year);
3436 	req.month = month;
3437 	req.day = day;
3438 	req.hour = hour;
3439 	req.minute = minute;
3440 	req.second = second;
3441 	req.millisecond = htole16(millisecond);
3442 	req.zone = htole16(zone);
3443 	return hwrm_send_message(softc, &req, sizeof(req));
3444 }
3445 
3446 #endif
3447 
3448 void
3449 _bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3450 {
3451 	req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3452 }
3453 
3454 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3455 {
3456 	struct hwrm_func_drv_rgtr_input req = {0};
3457 	int events[] = {
3458 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
3459 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
3460 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
3461 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
3462 		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
3463 	};
3464 	int i;
3465 
3466 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
3467 
3468 	req.enables =
3469 		htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
3470 
3471 	for (i = 0; i < nitems(events); i++)
3472 		_bnxt_hwrm_set_async_event_bit(&req, events[i]);
3473 
3474 	return hwrm_send_message(softc, &req, sizeof(req));
3475 }
3476 
3477 int
3478 bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3479 {
3480 	struct hwrm_port_phy_i2c_read_input req;
3481 	struct hwrm_port_phy_i2c_read_output *out;
3482 	int offset;
3483 
3484 	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
3485 	req.i2c_slave_addr = sff->sff_addr;
3486 	req.page_number = htole16(sff->sff_page);
3487 
3488 	for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3489 		req.page_offset = htole16(offset);
3490 		req.data_length = sizeof(out->data);
3491 		req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET);
3492 
3493 		if (hwrm_send_message(softc, &req, sizeof(req))) {
3494 			printf("%s: failed to read i2c data\n", DEVNAME(softc));
3495 			return 1;
3496 		}
3497 
3498 		out = (struct hwrm_port_phy_i2c_read_output *)
3499 		    BNXT_DMA_KVA(softc->sc_cmd_resp);
3500 		memcpy(sff->sff_data + offset, out->data, sizeof(out->data));
3501 	}
3502 
3503 	return 0;
3504 }
3505