xref: /openbsd-src/sys/dev/pci/if_mcx.c (revision b99ef4df7fac99f3475b694d6cd4990521c99ae6)
1 /*	$OpenBSD: if_mcx.c,v 1.98 2021/01/27 07:46:11 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 #include "vlan.h"
22 #include "kstat.h"
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/sockio.h>
27 #include <sys/mbuf.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/device.h>
31 #include <sys/pool.h>
32 #include <sys/queue.h>
33 #include <sys/timeout.h>
34 #include <sys/task.h>
35 #include <sys/atomic.h>
36 #include <sys/timetc.h>
37 #include <sys/intrmap.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/toeplitz.h>
46 
47 #if NBPFILTER > 0
48 #include <net/bpf.h>
49 #endif
50 
51 #if NKSTAT > 0
52 #include <sys/kstat.h>
53 #endif
54 
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57 
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <dev/pci/pcidevs.h>
61 
62 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
63 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
64 
65 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
66 
67 #define MCX_FW_VER			0x0000
68 #define  MCX_FW_VER_MAJOR(_v)			((_v) & 0xffff)
69 #define  MCX_FW_VER_MINOR(_v)			((_v) >> 16)
70 #define MCX_CMDIF_FW_SUBVER		0x0004
71 #define  MCX_FW_VER_SUBMINOR(_v)		((_v) & 0xffff)
72 #define  MCX_CMDIF(_v)				((_v) >> 16)
73 
74 #define MCX_ISSI			1 /* as per the PRM */
75 #define MCX_CMD_IF_SUPPORTED		5
76 
77 #define MCX_HARDMTU			9500
78 
79 #define MCX_PAGE_SHIFT			12
80 #define MCX_PAGE_SIZE			(1 << MCX_PAGE_SHIFT)
81 
82 /* queue sizes */
83 #define MCX_LOG_EQ_SIZE			7
84 #define MCX_LOG_CQ_SIZE			12
85 #define MCX_LOG_RQ_SIZE			10
86 #define MCX_LOG_SQ_SIZE			11
87 
88 #define MCX_MAX_QUEUES			16
89 
90 /* completion event moderation - about 10khz, or 90% of the cq */
91 #define MCX_CQ_MOD_PERIOD		50
92 #define MCX_CQ_MOD_COUNTER		\
93 	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
94 
95 #define MCX_LOG_SQ_ENTRY_SIZE		6
96 #define MCX_SQ_ENTRY_MAX_SLOTS		4
97 #define MCX_SQ_SEGS_PER_SLOT		\
98 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
99 #define MCX_SQ_MAX_SEGMENTS		\
100 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
101 
102 #define MCX_LOG_FLOW_TABLE_SIZE		5
103 #define MCX_NUM_STATIC_FLOWS		4 /* promisc, allmulti, ucast, bcast */
104 #define MCX_NUM_MCAST_FLOWS 		\
105 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
106 
107 #define MCX_SQ_INLINE_SIZE		18
108 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
109 
110 /* doorbell offsets */
111 #define MCX_DOORBELL_AREA_SIZE		MCX_PAGE_SIZE
112 
113 #define MCX_CQ_DOORBELL_BASE		0
114 #define MCX_CQ_DOORBELL_STRIDE		64
115 
116 #define MCX_WQ_DOORBELL_BASE		MCX_PAGE_SIZE/2
117 #define MCX_WQ_DOORBELL_STRIDE		64
118 /* make sure the doorbells fit */
119 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
120 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
121     MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
122 
123 #define MCX_WQ_DOORBELL_MASK		0xffff
124 
125 /* uar registers */
126 #define MCX_UAR_CQ_DOORBELL		0x20
127 #define MCX_UAR_EQ_DOORBELL_ARM		0x40
128 #define MCX_UAR_EQ_DOORBELL		0x48
129 #define MCX_UAR_BF			0x800
130 
131 #define MCX_CMDQ_ADDR_HI		0x0010
132 #define MCX_CMDQ_ADDR_LO		0x0014
133 #define MCX_CMDQ_ADDR_NMASK		0xfff
134 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
135 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
136 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
137 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
138 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
139 
140 #define MCX_CMDQ_DOORBELL		0x0018
141 
142 #define MCX_STATE			0x01fc
143 #define MCX_STATE_MASK				(1 << 31)
144 #define MCX_STATE_INITIALIZING			(1 << 31)
145 #define MCX_STATE_READY				(0 << 31)
146 #define MCX_STATE_INTERFACE_MASK		(0x3 << 24)
147 #define MCX_STATE_INTERFACE_FULL_DRIVER		(0x0 << 24)
148 #define MCX_STATE_INTERFACE_DISABLED		(0x1 << 24)
149 
150 #define MCX_INTERNAL_TIMER		0x1000
151 #define MCX_INTERNAL_TIMER_H		0x1000
152 #define MCX_INTERNAL_TIMER_L		0x1004
153 
154 #define MCX_CLEAR_INT			0x100c
155 
156 #define MCX_REG_OP_WRITE		0
157 #define MCX_REG_OP_READ			1
158 
159 #define MCX_REG_PMLP			0x5002
160 #define MCX_REG_PMTU			0x5003
161 #define MCX_REG_PTYS			0x5004
162 #define MCX_REG_PAOS			0x5006
163 #define MCX_REG_PFCC			0x5007
164 #define MCX_REG_PPCNT			0x5008
165 #define MCX_REG_MTCAP			0x9009 /* mgmt temp capabilities */
166 #define MCX_REG_MTMP			0x900a /* mgmt temp */
167 #define MCX_REG_MCIA			0x9014
168 #define MCX_REG_MCAM			0x907f
169 
170 #define MCX_ETHER_CAP_SGMII		0
171 #define MCX_ETHER_CAP_1000_KX		1
172 #define MCX_ETHER_CAP_10G_CX4		2
173 #define MCX_ETHER_CAP_10G_KX4		3
174 #define MCX_ETHER_CAP_10G_KR		4
175 #define MCX_ETHER_CAP_40G_CR4		6
176 #define MCX_ETHER_CAP_40G_KR4		7
177 #define MCX_ETHER_CAP_10G_CR		12
178 #define MCX_ETHER_CAP_10G_SR		13
179 #define MCX_ETHER_CAP_10G_LR		14
180 #define MCX_ETHER_CAP_40G_SR4		15
181 #define MCX_ETHER_CAP_40G_LR4		16
182 #define MCX_ETHER_CAP_50G_SR2		18
183 #define MCX_ETHER_CAP_100G_CR4		20
184 #define MCX_ETHER_CAP_100G_SR4		21
185 #define MCX_ETHER_CAP_100G_KR4		22
186 #define MCX_ETHER_CAP_25G_CR		27
187 #define MCX_ETHER_CAP_25G_KR		28
188 #define MCX_ETHER_CAP_25G_SR		29
189 #define MCX_ETHER_CAP_50G_CR2		30
190 #define MCX_ETHER_CAP_50G_KR2		31
191 
192 #define MCX_MAX_CQE			32
193 
194 #define MCX_CMD_QUERY_HCA_CAP		0x100
195 #define MCX_CMD_QUERY_ADAPTER		0x101
196 #define MCX_CMD_INIT_HCA		0x102
197 #define MCX_CMD_TEARDOWN_HCA		0x103
198 #define MCX_CMD_ENABLE_HCA		0x104
199 #define MCX_CMD_DISABLE_HCA		0x105
200 #define MCX_CMD_QUERY_PAGES		0x107
201 #define MCX_CMD_MANAGE_PAGES		0x108
202 #define MCX_CMD_SET_HCA_CAP		0x109
203 #define MCX_CMD_QUERY_ISSI		0x10a
204 #define MCX_CMD_SET_ISSI		0x10b
205 #define MCX_CMD_SET_DRIVER_VERSION	0x10d
206 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS	0x203
207 #define MCX_CMD_CREATE_EQ		0x301
208 #define MCX_CMD_DESTROY_EQ		0x302
209 #define MCX_CMD_QUERY_EQ		0x303
210 #define MCX_CMD_CREATE_CQ		0x400
211 #define MCX_CMD_DESTROY_CQ		0x401
212 #define MCX_CMD_QUERY_CQ		0x402
213 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT	0x754
214 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
215 					0x755
216 #define MCX_CMD_QUERY_VPORT_COUNTERS	0x770
217 #define MCX_CMD_ALLOC_PD		0x800
218 #define MCX_CMD_ALLOC_UAR		0x802
219 #define MCX_CMD_ACCESS_REG		0x805
220 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN	0x816
221 #define MCX_CMD_CREATE_TIR		0x900
222 #define MCX_CMD_DESTROY_TIR		0x902
223 #define MCX_CMD_CREATE_SQ		0x904
224 #define MCX_CMD_MODIFY_SQ		0x905
225 #define MCX_CMD_DESTROY_SQ		0x906
226 #define MCX_CMD_QUERY_SQ		0x907
227 #define MCX_CMD_CREATE_RQ		0x908
228 #define MCX_CMD_MODIFY_RQ		0x909
229 #define MCX_CMD_DESTROY_RQ		0x90a
230 #define MCX_CMD_QUERY_RQ		0x90b
231 #define MCX_CMD_CREATE_TIS		0x912
232 #define MCX_CMD_DESTROY_TIS		0x914
233 #define MCX_CMD_CREATE_RQT		0x916
234 #define MCX_CMD_DESTROY_RQT		0x918
235 #define MCX_CMD_SET_FLOW_TABLE_ROOT	0x92f
236 #define MCX_CMD_CREATE_FLOW_TABLE	0x930
237 #define MCX_CMD_DESTROY_FLOW_TABLE	0x931
238 #define MCX_CMD_QUERY_FLOW_TABLE	0x932
239 #define MCX_CMD_CREATE_FLOW_GROUP	0x933
240 #define MCX_CMD_DESTROY_FLOW_GROUP	0x934
241 #define MCX_CMD_QUERY_FLOW_GROUP	0x935
242 #define MCX_CMD_SET_FLOW_TABLE_ENTRY	0x936
243 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY	0x937
244 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY	0x938
245 #define MCX_CMD_ALLOC_FLOW_COUNTER	0x939
246 #define MCX_CMD_QUERY_FLOW_COUNTER	0x93b
247 
248 #define MCX_QUEUE_STATE_RST		0
249 #define MCX_QUEUE_STATE_RDY		1
250 #define MCX_QUEUE_STATE_ERR		3
251 
252 #define MCX_FLOW_TABLE_TYPE_RX		0
253 #define MCX_FLOW_TABLE_TYPE_TX		1
254 
255 #define MCX_CMDQ_INLINE_DATASIZE 16
256 
257 struct mcx_cmdq_entry {
258 	uint8_t			cq_type;
259 #define MCX_CMDQ_TYPE_PCIE		0x7
260 	uint8_t			cq_reserved0[3];
261 
262 	uint32_t		cq_input_length;
263 	uint64_t		cq_input_ptr;
264 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
265 
266 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
267 	uint64_t		cq_output_ptr;
268 	uint32_t		cq_output_length;
269 
270 	uint8_t			cq_token;
271 	uint8_t			cq_signature;
272 	uint8_t			cq_reserved1[1];
273 	uint8_t			cq_status;
274 #define MCX_CQ_STATUS_SHIFT		1
275 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
276 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
277 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
278 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
279 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
280 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
281 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
282 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
283 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
284 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
285 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
290 					(0x10 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_OWN_MASK		0x1
293 #define MCX_CQ_STATUS_OWN_SW		0x0
294 #define MCX_CQ_STATUS_OWN_HW		0x1
295 } __packed __aligned(8);
296 
297 #define MCX_CMDQ_MAILBOX_DATASIZE	512
298 
299 struct mcx_cmdq_mailbox {
300 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
301 	uint8_t			mb_reserved0[48];
302 	uint64_t		mb_next_ptr;
303 	uint32_t		mb_block_number;
304 	uint8_t			mb_reserved1[1];
305 	uint8_t			mb_token;
306 	uint8_t			mb_ctrl_signature;
307 	uint8_t			mb_signature;
308 } __packed __aligned(8);
309 
310 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
311 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
312 				    MCX_CMDQ_MAILBOX_ALIGN)
313 /*
314  * command mailbox structres
315  */
316 
317 struct mcx_cmd_enable_hca_in {
318 	uint16_t		cmd_opcode;
319 	uint8_t			cmd_reserved0[4];
320 	uint16_t		cmd_op_mod;
321 	uint8_t			cmd_reserved1[2];
322 	uint16_t		cmd_function_id;
323 	uint8_t			cmd_reserved2[4];
324 } __packed __aligned(4);
325 
326 struct mcx_cmd_enable_hca_out {
327 	uint8_t			cmd_status;
328 	uint8_t			cmd_reserved0[3];
329 	uint32_t		cmd_syndrome;
330 	uint8_t			cmd_reserved1[4];
331 } __packed __aligned(4);
332 
333 struct mcx_cmd_init_hca_in {
334 	uint16_t		cmd_opcode;
335 	uint8_t			cmd_reserved0[4];
336 	uint16_t		cmd_op_mod;
337 	uint8_t			cmd_reserved1[8];
338 } __packed __aligned(4);
339 
340 struct mcx_cmd_init_hca_out {
341 	uint8_t			cmd_status;
342 	uint8_t			cmd_reserved0[3];
343 	uint32_t		cmd_syndrome;
344 	uint8_t			cmd_reserved1[8];
345 } __packed __aligned(4);
346 
347 struct mcx_cmd_teardown_hca_in {
348 	uint16_t		cmd_opcode;
349 	uint8_t			cmd_reserved0[4];
350 	uint16_t		cmd_op_mod;
351 	uint8_t			cmd_reserved1[2];
352 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
353 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
354 	uint16_t		cmd_profile;
355 	uint8_t			cmd_reserved2[4];
356 } __packed __aligned(4);
357 
358 struct mcx_cmd_teardown_hca_out {
359 	uint8_t			cmd_status;
360 	uint8_t			cmd_reserved0[3];
361 	uint32_t		cmd_syndrome;
362 	uint8_t			cmd_reserved1[8];
363 } __packed __aligned(4);
364 
365 struct mcx_cmd_access_reg_in {
366 	uint16_t		cmd_opcode;
367 	uint8_t			cmd_reserved0[4];
368 	uint16_t		cmd_op_mod;
369 	uint8_t			cmd_reserved1[2];
370 	uint16_t		cmd_register_id;
371 	uint32_t		cmd_argument;
372 } __packed __aligned(4);
373 
374 struct mcx_cmd_access_reg_out {
375 	uint8_t			cmd_status;
376 	uint8_t			cmd_reserved0[3];
377 	uint32_t		cmd_syndrome;
378 	uint8_t			cmd_reserved1[8];
379 } __packed __aligned(4);
380 
381 struct mcx_reg_pmtu {
382 	uint8_t			rp_reserved1;
383 	uint8_t			rp_local_port;
384 	uint8_t			rp_reserved2[2];
385 	uint16_t		rp_max_mtu;
386 	uint8_t			rp_reserved3[2];
387 	uint16_t		rp_admin_mtu;
388 	uint8_t			rp_reserved4[2];
389 	uint16_t		rp_oper_mtu;
390 	uint8_t			rp_reserved5[2];
391 } __packed __aligned(4);
392 
393 struct mcx_reg_ptys {
394 	uint8_t			rp_reserved1;
395 	uint8_t			rp_local_port;
396 	uint8_t			rp_reserved2;
397 	uint8_t			rp_proto_mask;
398 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
399 	uint8_t			rp_reserved3[8];
400 	uint32_t		rp_eth_proto_cap;
401 	uint8_t			rp_reserved4[8];
402 	uint32_t		rp_eth_proto_admin;
403 	uint8_t			rp_reserved5[8];
404 	uint32_t		rp_eth_proto_oper;
405 	uint8_t			rp_reserved6[24];
406 } __packed __aligned(4);
407 
408 struct mcx_reg_paos {
409 	uint8_t			rp_reserved1;
410 	uint8_t			rp_local_port;
411 	uint8_t			rp_admin_status;
412 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
413 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
414 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
415 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
416 	uint8_t			rp_oper_status;
417 #define MCX_REG_PAOS_OPER_STATUS_UP		1
418 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
419 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
420 	uint8_t			rp_admin_state_update;
421 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
422 	uint8_t			rp_reserved2[11];
423 } __packed __aligned(4);
424 
425 struct mcx_reg_pfcc {
426 	uint8_t			rp_reserved1;
427 	uint8_t			rp_local_port;
428 	uint8_t			rp_reserved2[3];
429 	uint8_t			rp_prio_mask_tx;
430 	uint8_t			rp_reserved3;
431 	uint8_t			rp_prio_mask_rx;
432 	uint8_t			rp_pptx_aptx;
433 	uint8_t			rp_pfctx;
434 	uint8_t			rp_fctx_dis;
435 	uint8_t			rp_reserved4;
436 	uint8_t			rp_pprx_aprx;
437 	uint8_t			rp_pfcrx;
438 	uint8_t			rp_reserved5[2];
439 	uint16_t		rp_dev_stall_min;
440 	uint16_t		rp_dev_stall_crit;
441 	uint8_t			rp_reserved6[12];
442 } __packed __aligned(4);
443 
444 #define MCX_PMLP_MODULE_NUM_MASK	0xff
445 struct mcx_reg_pmlp {
446 	uint8_t			rp_rxtx;
447 	uint8_t			rp_local_port;
448 	uint8_t			rp_reserved0;
449 	uint8_t			rp_width;
450 	uint32_t		rp_lane0_mapping;
451 	uint32_t		rp_lane1_mapping;
452 	uint32_t		rp_lane2_mapping;
453 	uint32_t		rp_lane3_mapping;
454 	uint8_t			rp_reserved1[44];
455 } __packed __aligned(4);
456 
457 struct mcx_reg_ppcnt {
458 	uint8_t			ppcnt_swid;
459 	uint8_t			ppcnt_local_port;
460 	uint8_t			ppcnt_pnat;
461 	uint8_t			ppcnt_grp;
462 #define MCX_REG_PPCNT_GRP_IEEE8023		0x00
463 #define MCX_REG_PPCNT_GRP_RFC2863		0x01
464 #define MCX_REG_PPCNT_GRP_RFC2819		0x02
465 #define MCX_REG_PPCNT_GRP_RFC3635		0x03
466 #define MCX_REG_PPCNT_GRP_PER_PRIO		0x10
467 #define MCX_REG_PPCNT_GRP_PER_TC		0x11
468 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER		0x11
469 
470 	uint8_t			ppcnt_clr;
471 	uint8_t			ppcnt_reserved1[2];
472 	uint8_t			ppcnt_prio_tc;
473 #define MCX_REG_PPCNT_CLR			(1 << 7)
474 
475 	uint8_t			ppcnt_counter_set[248];
476 } __packed __aligned(8);
477 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
478 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
479     sizeof(uint64_t)) == 0);
480 
481 enum mcx_ppcnt_ieee8023 {
482 	frames_transmitted_ok,
483 	frames_received_ok,
484 	frame_check_sequence_errors,
485 	alignment_errors,
486 	octets_transmitted_ok,
487 	octets_received_ok,
488 	multicast_frames_xmitted_ok,
489 	broadcast_frames_xmitted_ok,
490 	multicast_frames_received_ok,
491 	broadcast_frames_received_ok,
492 	in_range_length_errors,
493 	out_of_range_length_field,
494 	frame_too_long_errors,
495 	symbol_error_during_carrier,
496 	mac_control_frames_transmitted,
497 	mac_control_frames_received,
498 	unsupported_opcodes_received,
499 	pause_mac_ctrl_frames_received,
500 	pause_mac_ctrl_frames_transmitted,
501 
502 	mcx_ppcnt_ieee8023_count
503 };
504 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
505 
506 enum mcx_ppcnt_rfc2863 {
507 	in_octets,
508 	in_ucast_pkts,
509 	in_discards,
510 	in_errors,
511 	in_unknown_protos,
512 	out_octets,
513 	out_ucast_pkts,
514 	out_discards,
515 	out_errors,
516 	in_multicast_pkts,
517 	in_broadcast_pkts,
518 	out_multicast_pkts,
519 	out_broadcast_pkts,
520 
521 	mcx_ppcnt_rfc2863_count
522 };
523 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
524 
525 enum mcx_ppcnt_rfc2819 {
526 	drop_events,
527 	octets,
528 	pkts,
529 	broadcast_pkts,
530 	multicast_pkts,
531 	crc_align_errors,
532 	undersize_pkts,
533 	oversize_pkts,
534 	fragments,
535 	jabbers,
536 	collisions,
537 	pkts64octets,
538 	pkts65to127octets,
539 	pkts128to255octets,
540 	pkts256to511octets,
541 	pkts512to1023octets,
542 	pkts1024to1518octets,
543 	pkts1519to2047octets,
544 	pkts2048to4095octets,
545 	pkts4096to8191octets,
546 	pkts8192to10239octets,
547 
548 	mcx_ppcnt_rfc2819_count
549 };
550 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
551 
552 enum mcx_ppcnt_rfc3635 {
553 	dot3stats_alignment_errors,
554 	dot3stats_fcs_errors,
555 	dot3stats_single_collision_frames,
556 	dot3stats_multiple_collision_frames,
557 	dot3stats_sqe_test_errors,
558 	dot3stats_deferred_transmissions,
559 	dot3stats_late_collisions,
560 	dot3stats_excessive_collisions,
561 	dot3stats_internal_mac_transmit_errors,
562 	dot3stats_carrier_sense_errors,
563 	dot3stats_frame_too_longs,
564 	dot3stats_internal_mac_receive_errors,
565 	dot3stats_symbol_errors,
566 	dot3control_in_unknown_opcodes,
567 	dot3in_pause_frames,
568 	dot3out_pause_frames,
569 
570 	mcx_ppcnt_rfc3635_count
571 };
572 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
573 
574 struct mcx_reg_mcam {
575 	uint8_t			_reserved1[1];
576 	uint8_t			mcam_feature_group;
577 	uint8_t			_reserved2[1];
578 	uint8_t			mcam_access_reg_group;
579 	uint8_t			_reserved3[4];
580 	uint8_t			mcam_access_reg_cap_mask[16];
581 	uint8_t			_reserved4[16];
582 	uint8_t			mcam_feature_cap_mask[16];
583 	uint8_t			_reserved5[16];
584 } __packed __aligned(4);
585 
586 #define MCX_BITFIELD_BIT(bf, b)	(bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
587 
588 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP	6
589 
590 struct mcx_reg_mtcap {
591 	uint8_t			_reserved1[3];
592 	uint8_t			mtcap_sensor_count;
593 	uint8_t			_reserved2[4];
594 
595 	uint64_t		mtcap_sensor_map;
596 };
597 
598 struct mcx_reg_mtmp {
599 	uint8_t			_reserved1[2];
600 	uint16_t		mtmp_sensor_index;
601 
602 	uint8_t			_reserved2[2];
603 	uint16_t		mtmp_temperature;
604 
605 	uint16_t		mtmp_mte_mtr;
606 #define MCX_REG_MTMP_MTE		(1 << 15)
607 #define MCX_REG_MTMP_MTR		(1 << 14)
608 	uint16_t		mtmp_max_temperature;
609 
610 	uint16_t		mtmp_tee;
611 #define MCX_REG_MTMP_TEE_NOPE		(0 << 14)
612 #define MCX_REG_MTMP_TEE_GENERATE	(1 << 14)
613 #define MCX_REG_MTMP_TEE_GENERATE_ONE	(2 << 14)
614 	uint16_t		mtmp_temperature_threshold_hi;
615 
616 	uint8_t			_reserved3[2];
617 	uint16_t		mtmp_temperature_threshold_lo;
618 
619 	uint8_t			_reserved4[4];
620 
621 	uint8_t			mtmp_sensor_name[8];
622 };
623 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
624 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
625 
626 #define MCX_MCIA_EEPROM_BYTES	32
627 struct mcx_reg_mcia {
628 	uint8_t			rm_l;
629 	uint8_t			rm_module;
630 	uint8_t			rm_reserved0;
631 	uint8_t			rm_status;
632 	uint8_t			rm_i2c_addr;
633 	uint8_t			rm_page_num;
634 	uint16_t		rm_dev_addr;
635 	uint16_t		rm_reserved1;
636 	uint16_t		rm_size;
637 	uint32_t		rm_reserved2;
638 	uint8_t			rm_data[48];
639 } __packed __aligned(4);
640 
641 struct mcx_cmd_query_issi_in {
642 	uint16_t		cmd_opcode;
643 	uint8_t			cmd_reserved0[4];
644 	uint16_t		cmd_op_mod;
645 	uint8_t			cmd_reserved1[8];
646 } __packed __aligned(4);
647 
648 struct mcx_cmd_query_issi_il_out {
649 	uint8_t			cmd_status;
650 	uint8_t			cmd_reserved0[3];
651 	uint32_t		cmd_syndrome;
652 	uint8_t			cmd_reserved1[2];
653 	uint16_t		cmd_current_issi;
654 	uint8_t			cmd_reserved2[4];
655 } __packed __aligned(4);
656 
657 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
658 
659 struct mcx_cmd_query_issi_mb_out {
660 	uint8_t			cmd_reserved2[16];
661 	uint8_t			cmd_supported_issi[80]; /* very big endian */
662 } __packed __aligned(4);
663 
664 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
665 
666 struct mcx_cmd_set_issi_in {
667 	uint16_t		cmd_opcode;
668 	uint8_t			cmd_reserved0[4];
669 	uint16_t		cmd_op_mod;
670 	uint8_t			cmd_reserved1[2];
671 	uint16_t		cmd_current_issi;
672 	uint8_t			cmd_reserved2[4];
673 } __packed __aligned(4);
674 
675 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
676 
677 struct mcx_cmd_set_issi_out {
678 	uint8_t			cmd_status;
679 	uint8_t			cmd_reserved0[3];
680 	uint32_t		cmd_syndrome;
681 	uint8_t			cmd_reserved1[8];
682 } __packed __aligned(4);
683 
684 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
685 
686 struct mcx_cmd_query_pages_in {
687 	uint16_t		cmd_opcode;
688 	uint8_t			cmd_reserved0[4];
689 	uint16_t		cmd_op_mod;
690 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
691 #define MCX_CMD_QUERY_PAGES_INIT	0x02
692 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
693 	uint8_t			cmd_reserved1[8];
694 } __packed __aligned(4);
695 
696 struct mcx_cmd_query_pages_out {
697 	uint8_t			cmd_status;
698 	uint8_t			cmd_reserved0[3];
699 	uint32_t		cmd_syndrome;
700 	uint8_t			cmd_reserved1[2];
701 	uint16_t		cmd_func_id;
702 	int32_t			cmd_num_pages;
703 } __packed __aligned(4);
704 
705 struct mcx_cmd_manage_pages_in {
706 	uint16_t		cmd_opcode;
707 	uint8_t			cmd_reserved0[4];
708 	uint16_t		cmd_op_mod;
709 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
710 					0x00
711 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
712 					0x01
713 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
714 					0x02
715 	uint8_t			cmd_reserved1[2];
716 	uint16_t		cmd_func_id;
717 	uint32_t		cmd_input_num_entries;
718 } __packed __aligned(4);
719 
720 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
721 
722 struct mcx_cmd_manage_pages_out {
723 	uint8_t			cmd_status;
724 	uint8_t			cmd_reserved0[3];
725 	uint32_t		cmd_syndrome;
726 	uint32_t		cmd_output_num_entries;
727 	uint8_t			cmd_reserved1[4];
728 } __packed __aligned(4);
729 
730 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
731 
732 struct mcx_cmd_query_hca_cap_in {
733 	uint16_t		cmd_opcode;
734 	uint8_t			cmd_reserved0[4];
735 	uint16_t		cmd_op_mod;
736 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
737 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
738 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
739 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
740 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
741 	uint8_t			cmd_reserved1[8];
742 } __packed __aligned(4);
743 
744 struct mcx_cmd_query_hca_cap_out {
745 	uint8_t			cmd_status;
746 	uint8_t			cmd_reserved0[3];
747 	uint32_t		cmd_syndrome;
748 	uint8_t			cmd_reserved1[8];
749 } __packed __aligned(4);
750 
751 #define MCX_HCA_CAP_LEN			0x1000
752 #define MCX_HCA_CAP_NMAILBOXES		\
753 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
754 
755 #if __GNUC_PREREQ__(4, 3)
756 #define __counter__		__COUNTER__
757 #else
758 #define __counter__		__LINE__
759 #endif
760 
761 #define __token(_tok, _num)	_tok##_num
762 #define _token(_tok, _num)	__token(_tok, _num)
763 #define __reserved__		_token(__reserved, __counter__)
764 
765 struct mcx_cap_device {
766 	uint8_t			reserved0[16];
767 
768 	uint8_t			log_max_srq_sz;
769 	uint8_t			log_max_qp_sz;
770 	uint8_t			__reserved__[1];
771 	uint8_t			log_max_qp; /* 5 bits */
772 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
773 
774 	uint8_t			__reserved__[1];
775 	uint8_t			log_max_srq; /* 5 bits */
776 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
777 	uint8_t			__reserved__[2];
778 
779 	uint8_t			__reserved__[1];
780 	uint8_t			log_max_cq_sz;
781 	uint8_t			__reserved__[1];
782 	uint8_t			log_max_cq; /* 5 bits */
783 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
784 
785 	uint8_t			log_max_eq_sz;
786 	uint8_t			log_max_mkey; /* 6 bits */
787 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
788 	uint8_t			__reserved__[1];
789 	uint8_t			log_max_eq; /* 4 bits */
790 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
791 
792 	uint8_t			max_indirection;
793 	uint8_t			log_max_mrw_sz; /* 7 bits */
794 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
795 	uint8_t			teardown_log_max_msf_list_size;
796 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
797 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
798 					0x3f
799 	uint8_t			log_max_klm_list_size; /* 6 bits */
800 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
801 					0x3f
802 
803 	uint8_t			__reserved__[1];
804 	uint8_t			log_max_ra_req_dc; /* 6 bits */
805 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
806 	uint8_t			__reserved__[1];
807 	uint8_t			log_max_ra_res_dc; /* 6 bits */
808 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
809 					0x3f
810 
811 	uint8_t			__reserved__[1];
812 	uint8_t			log_max_ra_req_qp; /* 6 bits */
813 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
814 					0x3f
815 	uint8_t			__reserved__[1];
816 	uint8_t			log_max_ra_res_qp; /* 6 bits */
817 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
818 					0x3f
819 
820 	uint8_t			flags1;
821 #define MCX_CAP_DEVICE_END_PAD		0x80
822 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
823 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
824 					0x20
825 #define MCX_CAP_DEVICE_START_PAD	0x10
826 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
827 					0x08
828 	uint8_t			__reserved__[1];
829 	uint16_t		gid_table_size;
830 
831 	uint16_t		flags2;
832 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
833 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
834 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
835 					0x2000
836 #define MCX_CAP_DEVICE_DEBUG		0x1000
837 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
838 					0x8000
839 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
840 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
841 	uint16_t		pkey_table_size;
842 
843 	uint8_t			flags3;
844 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
845 					0x80
846 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
847 					0x40
848 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
849 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
850 #define MCX_CAP_DEVICE_ETS		0x04
851 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
852 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
853 					0x01
854 	uint8_t			local_ca_ack_delay; /* 5 bits */
855 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
856 					0x1f
857 #define MCX_CAP_DEVICE_MCAM_REG		0x40
858 	uint8_t			port_type;
859 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
860 					0x80
861 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
862 #define MCX_CAP_DEVICE_PORT_TYPE_ETH	0x01
863 	uint8_t			num_ports;
864 
865 	uint8_t			snapshot_log_max_msg;
866 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
867 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
868 	uint8_t			max_tc; /* 4 bits */
869 #define MCX_CAP_DEVICE_MAX_TC		0x0f
870 	uint8_t			flags4;
871 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
872 #define MCX_CAP_DEVICE_DCBX		0x40
873 #define MCX_CAP_DEVICE_ROL_S		0x02
874 #define MCX_CAP_DEVICE_ROL_G		0x01
875 	uint8_t			wol;
876 #define MCX_CAP_DEVICE_WOL_S		0x40
877 #define MCX_CAP_DEVICE_WOL_G		0x20
878 #define MCX_CAP_DEVICE_WOL_A		0x10
879 #define MCX_CAP_DEVICE_WOL_B		0x08
880 #define MCX_CAP_DEVICE_WOL_M		0x04
881 #define MCX_CAP_DEVICE_WOL_U		0x02
882 #define MCX_CAP_DEVICE_WOL_P		0x01
883 
884 	uint16_t		stat_rate_support;
885 	uint8_t			__reserved__[1];
886 	uint8_t			cqe_version; /* 4 bits */
887 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
888 
889 	uint32_t		flags5;
890 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
891 					0x80000000
892 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
893 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
894 					0x10000000
895 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
896 					0x08000000
897 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
898 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
899 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
900 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
901 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
902 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
903 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
904 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
905 #define MCX_CAP_DEVICE_SHO		0x00000100
906 #define MCX_CAP_DEVICE_TPH		0x00000080
907 #define MCX_CAP_DEVICE_RF		0x00000040
908 #define MCX_CAP_DEVICE_DCT		0x00000020
909 #define MCX_CAP_DEVICE_QOS		0x00000010
910 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
911 #define MCX_CAP_DEVICE_ROCE		0x00000004
912 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
913 
914 	uint32_t		flags6;
915 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
916 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
917 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
918 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
919 					0x10000000
920 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
921 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
922 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
923 #define MCX_CAP_DEVICE_PG		0x01000000
924 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
925 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
926 					0x00400000
927 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
928 					0x00200000
929 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
930 					0x00100000
931 #define MCX_CAP_DEVICE_CD		0x00080000
932 #define MCX_CAP_DEVICE_ATM		0x00040000
933 #define MCX_CAP_DEVICE_APM		0x00020000
934 #define MCX_CAP_DEVICE_IMAICL		0x00010000
935 #define MCX_CAP_DEVICE_QKV		0x00000200
936 #define MCX_CAP_DEVICE_PKV		0x00000100
937 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
938 #define MCX_CAP_DEVICE_XRC		0x00000008
939 #define MCX_CAP_DEVICE_UD		0x00000004
940 #define MCX_CAP_DEVICE_UC		0x00000002
941 #define MCX_CAP_DEVICE_RC		0x00000001
942 
943 	uint8_t			uar_flags;
944 #define MCX_CAP_DEVICE_UAR_4K		0x80
945 	uint8_t			uar_sz;	/* 6 bits */
946 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
947 	uint8_t			__reserved__[1];
948 	uint8_t			log_pg_sz;
949 
950 	uint8_t			flags7;
951 #define MCX_CAP_DEVICE_BF		0x80
952 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
953 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
954 					0x20
955 	uint8_t			log_bf_reg_size; /* 5 bits */
956 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
957 	uint8_t			__reserved__[2];
958 
959 	uint16_t		num_of_diagnostic_counters;
960 	uint16_t		max_wqe_sz_sq;
961 
962 	uint8_t			__reserved__[2];
963 	uint16_t		max_wqe_sz_rq;
964 
965 	uint8_t			__reserved__[2];
966 	uint16_t		max_wqe_sz_sq_dc;
967 
968 	uint32_t		max_qp_mcg; /* 25 bits */
969 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
970 
971 	uint8_t			__reserved__[3];
972 	uint8_t			log_max_mcq;
973 
974 	uint8_t			log_max_transport_domain; /* 5 bits */
975 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
976 					0x1f
977 	uint8_t			log_max_pd; /* 5 bits */
978 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
979 	uint8_t			__reserved__[1];
980 	uint8_t			log_max_xrcd; /* 5 bits */
981 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
982 
983 	uint8_t			__reserved__[2];
984 	uint16_t		max_flow_counter;
985 
986 	uint8_t			log_max_rq; /* 5 bits */
987 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
988 	uint8_t			log_max_sq; /* 5 bits */
989 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
990 	uint8_t			log_max_tir; /* 5 bits */
991 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
992 	uint8_t			log_max_tis; /* 5 bits */
993 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
994 
995 	uint8_t 		flags8;
996 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
997 					0x80
998 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
999 	uint8_t			log_max_rqt; /* 5 bits */
1000 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
1001 	uint8_t			log_max_rqt_size; /* 5 bits */
1002 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
1003 	uint8_t			log_max_tis_per_sq; /* 5 bits */
1004 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
1005 					0x1f
1006 
1007 	uint8_t			flags9;
1008 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
1009 					0x80
1010 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
1011 					0x1f
1012 	uint8_t			log_min_stride_sz_rq; /* 5 bits */
1013 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
1014 					0x1f
1015 	uint8_t			log_max_stride_sz_sq; /* 5 bits */
1016 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
1017 					0x1f
1018 	uint8_t			log_min_stride_sz_sq; /* 5 bits */
1019 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
1020 					0x1f
1021 
1022 	uint8_t			log_max_hairpin_queues;
1023 #define MXC_CAP_DEVICE_HAIRPIN		0x80
1024 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
1025 					0x1f
1026 	uint8_t			log_min_hairpin_queues;
1027 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
1028 					0x1f
1029 	uint8_t			log_max_hairpin_num_packets;
1030 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
1031 					0x1f
1032 	uint8_t			log_max_mq_sz;
1033 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
1034 					0x1f
1035 
1036 	uint8_t			log_min_hairpin_wq_data_sz;
1037 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
1038 					0x80
1039 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
1040 					0x40
1041 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
1042 					0x20
1043 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
1044 					0x1f
1045 	uint8_t			log_max_vlan_list;
1046 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
1047 					0x80
1048 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
1049 					0x1f
1050 	uint8_t			log_max_current_mc_list;
1051 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
1052 					0x1f
1053 	uint8_t			log_max_current_uc_list;
1054 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
1055 					0x1f
1056 
1057 	uint8_t			__reserved__[4];
1058 
1059 	uint32_t		create_qp_start_hint; /* 24 bits */
1060 
1061 	uint8_t			log_max_uctx; /* 5 bits */
1062 #define MXC_CAP_DEVICE_LOG_MAX_UCTX	0x1f
1063 	uint8_t			log_max_umem; /* 5 bits */
1064 #define MXC_CAP_DEVICE_LOG_MAX_UMEM	0x1f
1065 	uint16_t		max_num_eqs;
1066 
1067 	uint8_t			log_max_l2_table; /* 5 bits */
1068 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE	0x1f
1069 	uint8_t			__reserved__[1];
1070 	uint16_t		log_uar_page_sz;
1071 
1072 	uint8_t			__reserved__[8];
1073 
1074 	uint32_t		device_frequency_mhz;
1075 	uint32_t		device_frequency_khz;
1076 } __packed __aligned(8);
1077 
1078 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
1079 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
1080 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
1081 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
1082 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
1083 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
1084 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
1085 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
1086 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
1087 
1088 struct mcx_cmd_set_driver_version_in {
1089 	uint16_t		cmd_opcode;
1090 	uint8_t			cmd_reserved0[4];
1091 	uint16_t		cmd_op_mod;
1092 	uint8_t			cmd_reserved1[8];
1093 } __packed __aligned(4);
1094 
1095 struct mcx_cmd_set_driver_version_out {
1096 	uint8_t			cmd_status;
1097 	uint8_t			cmd_reserved0[3];
1098 	uint32_t		cmd_syndrome;
1099 	uint8_t			cmd_reserved1[8];
1100 } __packed __aligned(4);
1101 
1102 struct mcx_cmd_set_driver_version {
1103 	uint8_t			cmd_driver_version[64];
1104 } __packed __aligned(8);
1105 
1106 struct mcx_cmd_modify_nic_vport_context_in {
1107 	uint16_t		cmd_opcode;
1108 	uint8_t			cmd_reserved0[4];
1109 	uint16_t		cmd_op_mod;
1110 	uint8_t			cmd_reserved1[4];
1111 	uint32_t		cmd_field_select;
1112 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
1113 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
1114 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
1115 } __packed __aligned(4);
1116 
1117 struct mcx_cmd_modify_nic_vport_context_out {
1118 	uint8_t			cmd_status;
1119 	uint8_t			cmd_reserved0[3];
1120 	uint32_t		cmd_syndrome;
1121 	uint8_t			cmd_reserved1[8];
1122 } __packed __aligned(4);
1123 
1124 struct mcx_cmd_query_nic_vport_context_in {
1125 	uint16_t		cmd_opcode;
1126 	uint8_t			cmd_reserved0[4];
1127 	uint16_t		cmd_op_mod;
1128 	uint8_t			cmd_reserved1[4];
1129 	uint8_t			cmd_allowed_list_type;
1130 	uint8_t			cmd_reserved2[3];
1131 } __packed __aligned(4);
1132 
1133 struct mcx_cmd_query_nic_vport_context_out {
1134 	uint8_t			cmd_status;
1135 	uint8_t			cmd_reserved0[3];
1136 	uint32_t		cmd_syndrome;
1137 	uint8_t			cmd_reserved1[8];
1138 } __packed __aligned(4);
1139 
1140 struct mcx_nic_vport_ctx {
1141 	uint32_t		vp_min_wqe_inline_mode;
1142 	uint8_t			vp_reserved0[32];
1143 	uint32_t		vp_mtu;
1144 	uint8_t			vp_reserved1[200];
1145 	uint16_t		vp_flags;
1146 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
1147 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
1148 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
1149 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
1150 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
1151 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
1152 	uint16_t		vp_allowed_list_size;
1153 	uint64_t		vp_perm_addr;
1154 	uint8_t			vp_reserved2[4];
1155 	/* allowed list follows */
1156 } __packed __aligned(4);
1157 
1158 struct mcx_counter {
1159 	uint64_t		packets;
1160 	uint64_t		octets;
1161 } __packed __aligned(4);
1162 
1163 struct mcx_nic_vport_counters {
1164 	struct mcx_counter	rx_err;
1165 	struct mcx_counter	tx_err;
1166 	uint8_t			reserved0[64]; /* 0x30 */
1167 	struct mcx_counter	rx_bcast;
1168 	struct mcx_counter	tx_bcast;
1169 	struct mcx_counter	rx_ucast;
1170 	struct mcx_counter	tx_ucast;
1171 	struct mcx_counter	rx_mcast;
1172 	struct mcx_counter	tx_mcast;
1173 	uint8_t			reserved1[0x210 - 0xd0];
1174 } __packed __aligned(4);
1175 
1176 struct mcx_cmd_query_vport_counters_in {
1177 	uint16_t		cmd_opcode;
1178 	uint8_t			cmd_reserved0[4];
1179 	uint16_t		cmd_op_mod;
1180 	uint8_t			cmd_reserved1[8];
1181 } __packed __aligned(4);
1182 
1183 struct mcx_cmd_query_vport_counters_mb_in {
1184 	uint8_t			cmd_reserved0[8];
1185 	uint8_t			cmd_clear;
1186 	uint8_t			cmd_reserved1[7];
1187 } __packed __aligned(4);
1188 
1189 struct mcx_cmd_query_vport_counters_out {
1190 	uint8_t			cmd_status;
1191 	uint8_t			cmd_reserved0[3];
1192 	uint32_t		cmd_syndrome;
1193 	uint8_t			cmd_reserved1[8];
1194 } __packed __aligned(4);
1195 
1196 struct mcx_cmd_query_flow_counter_in {
1197 	uint16_t		cmd_opcode;
1198 	uint8_t			cmd_reserved0[4];
1199 	uint16_t		cmd_op_mod;
1200 	uint8_t			cmd_reserved1[8];
1201 } __packed __aligned(4);
1202 
1203 struct mcx_cmd_query_flow_counter_mb_in {
1204 	uint8_t			cmd_reserved0[8];
1205 	uint8_t			cmd_clear;
1206 	uint8_t			cmd_reserved1[5];
1207 	uint16_t		cmd_flow_counter_id;
1208 } __packed __aligned(4);
1209 
1210 struct mcx_cmd_query_flow_counter_out {
1211 	uint8_t			cmd_status;
1212 	uint8_t			cmd_reserved0[3];
1213 	uint32_t		cmd_syndrome;
1214 	uint8_t			cmd_reserved1[8];
1215 } __packed __aligned(4);
1216 
1217 struct mcx_cmd_alloc_uar_in {
1218 	uint16_t		cmd_opcode;
1219 	uint8_t			cmd_reserved0[4];
1220 	uint16_t		cmd_op_mod;
1221 	uint8_t			cmd_reserved1[8];
1222 } __packed __aligned(4);
1223 
1224 struct mcx_cmd_alloc_uar_out {
1225 	uint8_t			cmd_status;
1226 	uint8_t			cmd_reserved0[3];
1227 	uint32_t		cmd_syndrome;
1228 	uint32_t		cmd_uar;
1229 	uint8_t			cmd_reserved1[4];
1230 } __packed __aligned(4);
1231 
1232 struct mcx_cmd_query_special_ctx_in {
1233 	uint16_t		cmd_opcode;
1234 	uint8_t			cmd_reserved0[4];
1235 	uint16_t		cmd_op_mod;
1236 	uint8_t			cmd_reserved1[8];
1237 } __packed __aligned(4);
1238 
1239 struct mcx_cmd_query_special_ctx_out {
1240 	uint8_t			cmd_status;
1241 	uint8_t			cmd_reserved0[3];
1242 	uint32_t		cmd_syndrome;
1243 	uint8_t			cmd_reserved1[4];
1244 	uint32_t		cmd_resd_lkey;
1245 } __packed __aligned(4);
1246 
1247 struct mcx_eq_ctx {
1248 	uint32_t		eq_status;
1249 #define MCX_EQ_CTX_STATE_SHIFT		8
1250 #define MCX_EQ_CTX_STATE_MASK		(0xf << MCX_EQ_CTX_STATE_SHIFT)
1251 #define MCX_EQ_CTX_STATE_ARMED		0x9
1252 #define MCX_EQ_CTX_STATE_FIRED		0xa
1253 #define MCX_EQ_CTX_OI_SHIFT		17
1254 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
1255 #define MCX_EQ_CTX_EC_SHIFT		18
1256 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
1257 #define MCX_EQ_CTX_STATUS_SHIFT		28
1258 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
1259 #define MCX_EQ_CTX_STATUS_OK		0x0
1260 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
1261 	uint32_t		eq_reserved1;
1262 	uint32_t		eq_page_offset;
1263 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
1264 	uint32_t		eq_uar_size;
1265 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
1266 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
1267 	uint32_t		eq_reserved2;
1268 	uint8_t			eq_reserved3[3];
1269 	uint8_t			eq_intr;
1270 	uint32_t		eq_log_page_size;
1271 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1272 	uint32_t		eq_reserved4[3];
1273 	uint32_t		eq_consumer_counter;
1274 	uint32_t		eq_producer_counter;
1275 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
1276 	uint32_t		eq_reserved5[4];
1277 } __packed __aligned(4);
1278 
1279 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1280 
1281 struct mcx_cmd_create_eq_in {
1282 	uint16_t		cmd_opcode;
1283 	uint8_t			cmd_reserved0[4];
1284 	uint16_t		cmd_op_mod;
1285 	uint8_t			cmd_reserved1[8];
1286 } __packed __aligned(4);
1287 
1288 struct mcx_cmd_create_eq_mb_in {
1289 	struct mcx_eq_ctx	cmd_eq_ctx;
1290 	uint8_t			cmd_reserved0[8];
1291 	uint64_t		cmd_event_bitmask;
1292 #define MCX_EVENT_TYPE_COMPLETION	0x00
1293 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
1294 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
1295 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
1296 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
1297 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
1298 #define MCX_EVENT_TYPE_LAST_WQE		0x13
1299 	uint8_t			cmd_reserved1[176];
1300 } __packed __aligned(4);
1301 
1302 struct mcx_cmd_create_eq_out {
1303 	uint8_t			cmd_status;
1304 	uint8_t			cmd_reserved0[3];
1305 	uint32_t		cmd_syndrome;
1306 	uint32_t		cmd_eqn;
1307 	uint8_t			cmd_reserved1[4];
1308 } __packed __aligned(4);
1309 
1310 struct mcx_cmd_query_eq_in {
1311 	uint16_t		cmd_opcode;
1312 	uint8_t			cmd_reserved0[4];
1313 	uint16_t		cmd_op_mod;
1314 	uint32_t		cmd_eqn;
1315 	uint8_t			cmd_reserved1[4];
1316 } __packed __aligned(4);
1317 
1318 struct mcx_cmd_query_eq_out {
1319 	uint8_t			cmd_status;
1320 	uint8_t			cmd_reserved0[3];
1321 	uint32_t		cmd_syndrome;
1322 	uint8_t			cmd_reserved1[8];
1323 } __packed __aligned(4);
1324 
1325 struct mcx_eq_entry {
1326 	uint8_t			eq_reserved1;
1327 	uint8_t			eq_event_type;
1328 	uint8_t			eq_reserved2;
1329 	uint8_t			eq_event_sub_type;
1330 
1331 	uint8_t			eq_reserved3[28];
1332 	uint32_t		eq_event_data[7];
1333 	uint8_t			eq_reserved4[2];
1334 	uint8_t			eq_signature;
1335 	uint8_t			eq_owner;
1336 #define MCX_EQ_ENTRY_OWNER_INIT			1
1337 } __packed __aligned(4);
1338 
1339 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1340 
1341 struct mcx_cmd_alloc_pd_in {
1342 	uint16_t		cmd_opcode;
1343 	uint8_t			cmd_reserved0[4];
1344 	uint16_t		cmd_op_mod;
1345 	uint8_t			cmd_reserved1[8];
1346 } __packed __aligned(4);
1347 
1348 struct mcx_cmd_alloc_pd_out {
1349 	uint8_t			cmd_status;
1350 	uint8_t			cmd_reserved0[3];
1351 	uint32_t		cmd_syndrome;
1352 	uint32_t		cmd_pd;
1353 	uint8_t			cmd_reserved1[4];
1354 } __packed __aligned(4);
1355 
1356 struct mcx_cmd_alloc_td_in {
1357 	uint16_t		cmd_opcode;
1358 	uint8_t			cmd_reserved0[4];
1359 	uint16_t		cmd_op_mod;
1360 	uint8_t			cmd_reserved1[8];
1361 } __packed __aligned(4);
1362 
1363 struct mcx_cmd_alloc_td_out {
1364 	uint8_t			cmd_status;
1365 	uint8_t			cmd_reserved0[3];
1366 	uint32_t		cmd_syndrome;
1367 	uint32_t		cmd_tdomain;
1368 	uint8_t			cmd_reserved1[4];
1369 } __packed __aligned(4);
1370 
1371 struct mcx_cmd_create_tir_in {
1372 	uint16_t		cmd_opcode;
1373 	uint8_t			cmd_reserved0[4];
1374 	uint16_t		cmd_op_mod;
1375 	uint8_t			cmd_reserved1[8];
1376 } __packed __aligned(4);
1377 
1378 struct mcx_cmd_create_tir_mb_in {
1379 	uint8_t			cmd_reserved0[20];
1380 	uint32_t		cmd_disp_type;
1381 #define MCX_TIR_CTX_DISP_TYPE_DIRECT	0
1382 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT	1
1383 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
1384 	uint8_t			cmd_reserved1[8];
1385 	uint32_t		cmd_lro;
1386 	uint8_t			cmd_reserved2[8];
1387 	uint32_t		cmd_inline_rqn;
1388 	uint32_t		cmd_indir_table;
1389 	uint32_t		cmd_tdomain;
1390 #define MCX_TIR_CTX_HASH_TOEPLITZ	2
1391 #define MCX_TIR_CTX_HASH_SHIFT		28
1392 	uint8_t			cmd_rx_hash_key[40];
1393 	uint32_t		cmd_rx_hash_sel_outer;
1394 #define MCX_TIR_CTX_HASH_SEL_SRC_IP	(1 << 0)
1395 #define MCX_TIR_CTX_HASH_SEL_DST_IP	(1 << 1)
1396 #define MCX_TIR_CTX_HASH_SEL_SPORT	(1 << 2)
1397 #define MCX_TIR_CTX_HASH_SEL_DPORT	(1 << 3)
1398 #define MCX_TIR_CTX_HASH_SEL_IPV4	(0 << 31)
1399 #define MCX_TIR_CTX_HASH_SEL_IPV6	(1 << 31)
1400 #define MCX_TIR_CTX_HASH_SEL_TCP	(0 << 30)
1401 #define MCX_TIR_CTX_HASH_SEL_UDP	(1 << 30)
1402 	uint32_t		cmd_rx_hash_sel_inner;
1403 	uint8_t			cmd_reserved3[152];
1404 } __packed __aligned(4);
1405 
1406 struct mcx_cmd_create_tir_out {
1407 	uint8_t			cmd_status;
1408 	uint8_t			cmd_reserved0[3];
1409 	uint32_t		cmd_syndrome;
1410 	uint32_t		cmd_tirn;
1411 	uint8_t			cmd_reserved1[4];
1412 } __packed __aligned(4);
1413 
1414 struct mcx_cmd_destroy_tir_in {
1415 	uint16_t		cmd_opcode;
1416 	uint8_t			cmd_reserved0[4];
1417 	uint16_t		cmd_op_mod;
1418 	uint32_t		cmd_tirn;
1419 	uint8_t			cmd_reserved1[4];
1420 } __packed __aligned(4);
1421 
1422 struct mcx_cmd_destroy_tir_out {
1423 	uint8_t			cmd_status;
1424 	uint8_t			cmd_reserved0[3];
1425 	uint32_t		cmd_syndrome;
1426 	uint8_t			cmd_reserved1[8];
1427 } __packed __aligned(4);
1428 
1429 struct mcx_cmd_create_tis_in {
1430 	uint16_t		cmd_opcode;
1431 	uint8_t			cmd_reserved0[4];
1432 	uint16_t		cmd_op_mod;
1433 	uint8_t			cmd_reserved1[8];
1434 } __packed __aligned(4);
1435 
1436 struct mcx_cmd_create_tis_mb_in {
1437 	uint8_t			cmd_reserved[16];
1438 	uint32_t		cmd_prio;
1439 	uint8_t			cmd_reserved1[32];
1440 	uint32_t		cmd_tdomain;
1441 	uint8_t			cmd_reserved2[120];
1442 } __packed __aligned(4);
1443 
1444 struct mcx_cmd_create_tis_out {
1445 	uint8_t			cmd_status;
1446 	uint8_t			cmd_reserved0[3];
1447 	uint32_t		cmd_syndrome;
1448 	uint32_t		cmd_tisn;
1449 	uint8_t			cmd_reserved1[4];
1450 } __packed __aligned(4);
1451 
1452 struct mcx_cmd_destroy_tis_in {
1453 	uint16_t		cmd_opcode;
1454 	uint8_t			cmd_reserved0[4];
1455 	uint16_t		cmd_op_mod;
1456 	uint32_t		cmd_tisn;
1457 	uint8_t			cmd_reserved1[4];
1458 } __packed __aligned(4);
1459 
1460 struct mcx_cmd_destroy_tis_out {
1461 	uint8_t			cmd_status;
1462 	uint8_t			cmd_reserved0[3];
1463 	uint32_t		cmd_syndrome;
1464 	uint8_t			cmd_reserved1[8];
1465 } __packed __aligned(4);
1466 
1467 struct mcx_cmd_create_rqt_in {
1468 	uint16_t		cmd_opcode;
1469 	uint8_t			cmd_reserved0[4];
1470 	uint16_t		cmd_op_mod;
1471 	uint8_t			cmd_reserved1[8];
1472 } __packed __aligned(4);
1473 
1474 struct mcx_rqt_ctx {
1475 	uint8_t			cmd_reserved0[20];
1476 	uint16_t		cmd_reserved1;
1477 	uint16_t		cmd_rqt_max_size;
1478 	uint16_t		cmd_reserved2;
1479 	uint16_t		cmd_rqt_actual_size;
1480 	uint8_t			cmd_reserved3[212];
1481 } __packed __aligned(4);
1482 
1483 struct mcx_cmd_create_rqt_mb_in {
1484 	uint8_t			cmd_reserved0[16];
1485 	struct mcx_rqt_ctx	cmd_rqt;
1486 } __packed __aligned(4);
1487 
1488 struct mcx_cmd_create_rqt_out {
1489 	uint8_t			cmd_status;
1490 	uint8_t			cmd_reserved0[3];
1491 	uint32_t		cmd_syndrome;
1492 	uint32_t		cmd_rqtn;
1493 	uint8_t			cmd_reserved1[4];
1494 } __packed __aligned(4);
1495 
1496 struct mcx_cmd_destroy_rqt_in {
1497 	uint16_t		cmd_opcode;
1498 	uint8_t			cmd_reserved0[4];
1499 	uint16_t		cmd_op_mod;
1500 	uint32_t		cmd_rqtn;
1501 	uint8_t			cmd_reserved1[4];
1502 } __packed __aligned(4);
1503 
1504 struct mcx_cmd_destroy_rqt_out {
1505 	uint8_t			cmd_status;
1506 	uint8_t			cmd_reserved0[3];
1507 	uint32_t		cmd_syndrome;
1508 	uint8_t			cmd_reserved1[8];
1509 } __packed __aligned(4);
1510 
1511 struct mcx_cq_ctx {
1512 	uint32_t		cq_status;
1513 #define MCX_CQ_CTX_STATUS_SHIFT		28
1514 #define MCX_CQ_CTX_STATUS_MASK		(0xf << MCX_CQ_CTX_STATUS_SHIFT)
1515 #define MCX_CQ_CTX_STATUS_OK		0x0
1516 #define MCX_CQ_CTX_STATUS_OVERFLOW	0x9
1517 #define MCX_CQ_CTX_STATUS_WRITE_FAIL	0xa
1518 #define MCX_CQ_CTX_STATE_SHIFT		8
1519 #define MCX_CQ_CTX_STATE_MASK		(0xf << MCX_CQ_CTX_STATE_SHIFT)
1520 #define MCX_CQ_CTX_STATE_SOLICITED	0x6
1521 #define MCX_CQ_CTX_STATE_ARMED		0x9
1522 #define MCX_CQ_CTX_STATE_FIRED		0xa
1523 	uint32_t		cq_reserved1;
1524 	uint32_t		cq_page_offset;
1525 	uint32_t		cq_uar_size;
1526 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
1527 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
1528 	uint32_t		cq_period_max_count;
1529 #define MCX_CQ_CTX_PERIOD_SHIFT		16
1530 	uint32_t		cq_eqn;
1531 	uint32_t		cq_log_page_size;
1532 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1533 	uint32_t		cq_reserved2;
1534 	uint32_t		cq_last_notified;
1535 	uint32_t		cq_last_solicit;
1536 	uint32_t		cq_consumer_counter;
1537 	uint32_t		cq_producer_counter;
1538 	uint8_t			cq_reserved3[8];
1539 	uint64_t		cq_doorbell;
1540 } __packed __aligned(4);
1541 
1542 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1543 
1544 struct mcx_cmd_create_cq_in {
1545 	uint16_t		cmd_opcode;
1546 	uint8_t			cmd_reserved0[4];
1547 	uint16_t		cmd_op_mod;
1548 	uint8_t			cmd_reserved1[8];
1549 } __packed __aligned(4);
1550 
1551 struct mcx_cmd_create_cq_mb_in {
1552 	struct mcx_cq_ctx	cmd_cq_ctx;
1553 	uint8_t			cmd_reserved1[192];
1554 } __packed __aligned(4);
1555 
1556 struct mcx_cmd_create_cq_out {
1557 	uint8_t			cmd_status;
1558 	uint8_t			cmd_reserved0[3];
1559 	uint32_t		cmd_syndrome;
1560 	uint32_t		cmd_cqn;
1561 	uint8_t			cmd_reserved1[4];
1562 } __packed __aligned(4);
1563 
1564 struct mcx_cmd_destroy_cq_in {
1565 	uint16_t		cmd_opcode;
1566 	uint8_t			cmd_reserved0[4];
1567 	uint16_t		cmd_op_mod;
1568 	uint32_t		cmd_cqn;
1569 	uint8_t			cmd_reserved1[4];
1570 } __packed __aligned(4);
1571 
1572 struct mcx_cmd_destroy_cq_out {
1573 	uint8_t			cmd_status;
1574 	uint8_t			cmd_reserved0[3];
1575 	uint32_t		cmd_syndrome;
1576 	uint8_t			cmd_reserved1[8];
1577 } __packed __aligned(4);
1578 
1579 struct mcx_cmd_query_cq_in {
1580 	uint16_t		cmd_opcode;
1581 	uint8_t			cmd_reserved0[4];
1582 	uint16_t		cmd_op_mod;
1583 	uint32_t		cmd_cqn;
1584 	uint8_t			cmd_reserved1[4];
1585 } __packed __aligned(4);
1586 
1587 struct mcx_cmd_query_cq_out {
1588 	uint8_t			cmd_status;
1589 	uint8_t			cmd_reserved0[3];
1590 	uint32_t		cmd_syndrome;
1591 	uint8_t			cmd_reserved1[8];
1592 } __packed __aligned(4);
1593 
1594 struct mcx_cq_entry {
1595 	uint32_t		__reserved__;
1596 	uint32_t		cq_lro;
1597 	uint32_t		cq_lro_ack_seq_num;
1598 	uint32_t		cq_rx_hash;
1599 	uint8_t			cq_rx_hash_type;
1600 	uint8_t			cq_ml_path;
1601 	uint16_t		__reserved__;
1602 	uint32_t		cq_checksum;
1603 	uint32_t		__reserved__;
1604 	uint32_t		cq_flags;
1605 #define MCX_CQ_ENTRY_FLAGS_L4_OK		(1 << 26)
1606 #define MCX_CQ_ENTRY_FLAGS_L3_OK		(1 << 25)
1607 #define MCX_CQ_ENTRY_FLAGS_L2_OK		(1 << 24)
1608 #define MCX_CQ_ENTRY_FLAGS_CV			(1 << 16)
1609 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK		(0xffff)
1610 
1611 	uint32_t		cq_lro_srqn;
1612 	uint32_t		__reserved__[2];
1613 	uint32_t		cq_byte_cnt;
1614 	uint64_t		cq_timestamp;
1615 	uint8_t			cq_rx_drops;
1616 	uint8_t			cq_flow_tag[3];
1617 	uint16_t		cq_wqe_count;
1618 	uint8_t			cq_signature;
1619 	uint8_t			cq_opcode_owner;
1620 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
1621 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
1622 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
1623 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
1624 
1625 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
1626 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
1627 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
1628 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
1629 
1630 #define MCX_CQ_ENTRY_OPCODE_REQ			0
1631 #define MCX_CQ_ENTRY_OPCODE_SEND		2
1632 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
1633 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
1634 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
1635 
1636 } __packed __aligned(4);
1637 
1638 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1639 
1640 struct mcx_cq_doorbell {
1641 	uint32_t		 db_update_ci;
1642 	uint32_t		 db_arm_ci;
1643 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
1644 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
1645 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
1646 } __packed __aligned(8);
1647 
1648 struct mcx_wq_ctx {
1649 	uint8_t			 wq_type;
1650 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
1651 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
1652 	uint8_t			 wq_reserved0[5];
1653 	uint16_t		 wq_lwm;
1654 	uint32_t		 wq_pd;
1655 	uint32_t		 wq_uar_page;
1656 	uint64_t		 wq_doorbell;
1657 	uint32_t		 wq_hw_counter;
1658 	uint32_t		 wq_sw_counter;
1659 	uint16_t		 wq_log_stride;
1660 	uint8_t			 wq_log_page_sz;
1661 	uint8_t			 wq_log_size;
1662 	uint8_t			 wq_reserved1[156];
1663 } __packed __aligned(4);
1664 
1665 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1666 
1667 struct mcx_sq_ctx {
1668 	uint32_t		sq_flags;
1669 #define MCX_SQ_CTX_RLKEY			(1 << 31)
1670 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
1671 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
1672 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
1673 #define MCX_SQ_CTX_STATE_SHIFT			20
1674 #define MCX_SQ_CTX_STATE_MASK			(0xf << 20)
1675 #define MCX_SQ_CTX_STATE_RST			0
1676 #define MCX_SQ_CTX_STATE_RDY			1
1677 #define MCX_SQ_CTX_STATE_ERR			3
1678 	uint32_t		sq_user_index;
1679 	uint32_t		sq_cqn;
1680 	uint32_t		sq_reserved1[5];
1681 	uint32_t		sq_tis_lst_sz;
1682 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
1683 	uint32_t		sq_reserved2[2];
1684 	uint32_t		sq_tis_num;
1685 	struct mcx_wq_ctx	sq_wq;
1686 } __packed __aligned(4);
1687 
1688 struct mcx_sq_entry_seg {
1689 	uint32_t		sqs_byte_count;
1690 	uint32_t		sqs_lkey;
1691 	uint64_t		sqs_addr;
1692 } __packed __aligned(4);
1693 
1694 struct mcx_sq_entry {
1695 	/* control segment */
1696 	uint32_t		sqe_opcode_index;
1697 #define MCX_SQE_WQE_INDEX_SHIFT			8
1698 #define MCX_SQE_WQE_OPCODE_NOP			0x00
1699 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
1700 	uint32_t		sqe_ds_sq_num;
1701 #define MCX_SQE_SQ_NUM_SHIFT			8
1702 	uint32_t		sqe_signature;
1703 #define MCX_SQE_SIGNATURE_SHIFT			24
1704 #define MCX_SQE_SOLICITED_EVENT			0x02
1705 #define MCX_SQE_CE_CQE_ON_ERR			0x00
1706 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
1707 #define MCX_SQE_CE_CQE_ALWAYS			0x08
1708 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
1709 #define MCX_SQE_FM_NO_FENCE			0x00
1710 #define MCX_SQE_FM_SMALL_FENCE			0x40
1711 	uint32_t		sqe_mkey;
1712 
1713 	/* ethernet segment */
1714 	uint32_t		sqe_reserved1;
1715 	uint32_t		sqe_mss_csum;
1716 #define MCX_SQE_L4_CSUM				(1 << 31)
1717 #define MCX_SQE_L3_CSUM				(1 << 30)
1718 	uint32_t		sqe_reserved2;
1719 	uint16_t		sqe_inline_header_size;
1720 	uint16_t		sqe_inline_headers[9];
1721 
1722 	/* data segment */
1723 	struct mcx_sq_entry_seg sqe_segs[1];
1724 } __packed __aligned(64);
1725 
1726 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1727 
1728 struct mcx_cmd_create_sq_in {
1729 	uint16_t		cmd_opcode;
1730 	uint8_t			cmd_reserved0[4];
1731 	uint16_t		cmd_op_mod;
1732 	uint8_t			cmd_reserved1[8];
1733 } __packed __aligned(4);
1734 
1735 struct mcx_cmd_create_sq_out {
1736 	uint8_t			cmd_status;
1737 	uint8_t			cmd_reserved0[3];
1738 	uint32_t		cmd_syndrome;
1739 	uint32_t		cmd_sqn;
1740 	uint8_t			cmd_reserved1[4];
1741 } __packed __aligned(4);
1742 
1743 struct mcx_cmd_modify_sq_in {
1744 	uint16_t		cmd_opcode;
1745 	uint8_t			cmd_reserved0[4];
1746 	uint16_t		cmd_op_mod;
1747 	uint32_t		cmd_sq_state;
1748 	uint8_t			cmd_reserved1[4];
1749 } __packed __aligned(4);
1750 
1751 struct mcx_cmd_modify_sq_mb_in {
1752 	uint32_t		cmd_modify_hi;
1753 	uint32_t		cmd_modify_lo;
1754 	uint8_t			cmd_reserved0[8];
1755 	struct mcx_sq_ctx	cmd_sq_ctx;
1756 } __packed __aligned(4);
1757 
1758 struct mcx_cmd_modify_sq_out {
1759 	uint8_t			cmd_status;
1760 	uint8_t			cmd_reserved0[3];
1761 	uint32_t		cmd_syndrome;
1762 	uint8_t			cmd_reserved1[8];
1763 } __packed __aligned(4);
1764 
1765 struct mcx_cmd_destroy_sq_in {
1766 	uint16_t		cmd_opcode;
1767 	uint8_t			cmd_reserved0[4];
1768 	uint16_t		cmd_op_mod;
1769 	uint32_t		cmd_sqn;
1770 	uint8_t			cmd_reserved1[4];
1771 } __packed __aligned(4);
1772 
1773 struct mcx_cmd_destroy_sq_out {
1774 	uint8_t			cmd_status;
1775 	uint8_t			cmd_reserved0[3];
1776 	uint32_t		cmd_syndrome;
1777 	uint8_t			cmd_reserved1[8];
1778 } __packed __aligned(4);
1779 
1780 
1781 struct mcx_rq_ctx {
1782 	uint32_t		rq_flags;
1783 #define MCX_RQ_CTX_RLKEY			(1 << 31)
1784 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
1785 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
1786 #define MCX_RQ_CTX_STATE_SHIFT			20
1787 #define MCX_RQ_CTX_STATE_MASK			(0xf << 20)
1788 #define MCX_RQ_CTX_STATE_RST			0
1789 #define MCX_RQ_CTX_STATE_RDY			1
1790 #define MCX_RQ_CTX_STATE_ERR			3
1791 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
1792 	uint32_t		rq_user_index;
1793 	uint32_t		rq_cqn;
1794 	uint32_t		rq_reserved1;
1795 	uint32_t		rq_rmpn;
1796 	uint32_t		rq_reserved2[7];
1797 	struct mcx_wq_ctx	rq_wq;
1798 } __packed __aligned(4);
1799 
1800 struct mcx_rq_entry {
1801 	uint32_t		rqe_byte_count;
1802 	uint32_t		rqe_lkey;
1803 	uint64_t		rqe_addr;
1804 } __packed __aligned(16);
1805 
1806 struct mcx_cmd_create_rq_in {
1807 	uint16_t		cmd_opcode;
1808 	uint8_t			cmd_reserved0[4];
1809 	uint16_t		cmd_op_mod;
1810 	uint8_t			cmd_reserved1[8];
1811 } __packed __aligned(4);
1812 
1813 struct mcx_cmd_create_rq_out {
1814 	uint8_t			cmd_status;
1815 	uint8_t			cmd_reserved0[3];
1816 	uint32_t		cmd_syndrome;
1817 	uint32_t		cmd_rqn;
1818 	uint8_t			cmd_reserved1[4];
1819 } __packed __aligned(4);
1820 
1821 struct mcx_cmd_modify_rq_in {
1822 	uint16_t		cmd_opcode;
1823 	uint8_t			cmd_reserved0[4];
1824 	uint16_t		cmd_op_mod;
1825 	uint32_t		cmd_rq_state;
1826 	uint8_t			cmd_reserved1[4];
1827 } __packed __aligned(4);
1828 
1829 struct mcx_cmd_modify_rq_mb_in {
1830 	uint32_t		cmd_modify_hi;
1831 	uint32_t		cmd_modify_lo;
1832 	uint8_t			cmd_reserved0[8];
1833 	struct mcx_rq_ctx	cmd_rq_ctx;
1834 } __packed __aligned(4);
1835 
1836 struct mcx_cmd_modify_rq_out {
1837 	uint8_t			cmd_status;
1838 	uint8_t			cmd_reserved0[3];
1839 	uint32_t		cmd_syndrome;
1840 	uint8_t			cmd_reserved1[8];
1841 } __packed __aligned(4);
1842 
1843 struct mcx_cmd_destroy_rq_in {
1844 	uint16_t		cmd_opcode;
1845 	uint8_t			cmd_reserved0[4];
1846 	uint16_t		cmd_op_mod;
1847 	uint32_t		cmd_rqn;
1848 	uint8_t			cmd_reserved1[4];
1849 } __packed __aligned(4);
1850 
1851 struct mcx_cmd_destroy_rq_out {
1852 	uint8_t			cmd_status;
1853 	uint8_t			cmd_reserved0[3];
1854 	uint32_t		cmd_syndrome;
1855 	uint8_t			cmd_reserved1[8];
1856 } __packed __aligned(4);
1857 
1858 struct mcx_cmd_create_flow_table_in {
1859 	uint16_t		cmd_opcode;
1860 	uint8_t			cmd_reserved0[4];
1861 	uint16_t		cmd_op_mod;
1862 	uint8_t			cmd_reserved1[8];
1863 } __packed __aligned(4);
1864 
1865 struct mcx_flow_table_ctx {
1866 	uint8_t			ft_miss_action;
1867 	uint8_t			ft_level;
1868 	uint8_t			ft_reserved0;
1869 	uint8_t			ft_log_size;
1870 	uint32_t		ft_table_miss_id;
1871 	uint8_t			ft_reserved1[28];
1872 } __packed __aligned(4);
1873 
1874 struct mcx_cmd_create_flow_table_mb_in {
1875 	uint8_t			cmd_table_type;
1876 	uint8_t			cmd_reserved0[7];
1877 	struct mcx_flow_table_ctx cmd_ctx;
1878 } __packed __aligned(4);
1879 
1880 struct mcx_cmd_create_flow_table_out {
1881 	uint8_t			cmd_status;
1882 	uint8_t			cmd_reserved0[3];
1883 	uint32_t		cmd_syndrome;
1884 	uint32_t		cmd_table_id;
1885 	uint8_t			cmd_reserved1[4];
1886 } __packed __aligned(4);
1887 
1888 struct mcx_cmd_destroy_flow_table_in {
1889 	uint16_t		cmd_opcode;
1890 	uint8_t			cmd_reserved0[4];
1891 	uint16_t		cmd_op_mod;
1892 	uint8_t			cmd_reserved1[8];
1893 } __packed __aligned(4);
1894 
1895 struct mcx_cmd_destroy_flow_table_mb_in {
1896 	uint8_t			cmd_table_type;
1897 	uint8_t			cmd_reserved0[3];
1898 	uint32_t		cmd_table_id;
1899 	uint8_t			cmd_reserved1[40];
1900 } __packed __aligned(4);
1901 
1902 struct mcx_cmd_destroy_flow_table_out {
1903 	uint8_t			cmd_status;
1904 	uint8_t			cmd_reserved0[3];
1905 	uint32_t		cmd_syndrome;
1906 	uint8_t			cmd_reserved1[8];
1907 } __packed __aligned(4);
1908 
1909 struct mcx_cmd_set_flow_table_root_in {
1910 	uint16_t		cmd_opcode;
1911 	uint8_t			cmd_reserved0[4];
1912 	uint16_t		cmd_op_mod;
1913 	uint8_t			cmd_reserved1[8];
1914 } __packed __aligned(4);
1915 
1916 struct mcx_cmd_set_flow_table_root_mb_in {
1917 	uint8_t			cmd_table_type;
1918 	uint8_t			cmd_reserved0[3];
1919 	uint32_t		cmd_table_id;
1920 	uint8_t			cmd_reserved1[56];
1921 } __packed __aligned(4);
1922 
1923 struct mcx_cmd_set_flow_table_root_out {
1924 	uint8_t			cmd_status;
1925 	uint8_t			cmd_reserved0[3];
1926 	uint32_t		cmd_syndrome;
1927 	uint8_t			cmd_reserved1[8];
1928 } __packed __aligned(4);
1929 
1930 struct mcx_flow_match {
1931 	/* outer headers */
1932 	uint8_t			mc_src_mac[6];
1933 	uint16_t		mc_ethertype;
1934 	uint8_t			mc_dest_mac[6];
1935 	uint16_t		mc_first_vlan;
1936 	uint8_t			mc_ip_proto;
1937 	uint8_t			mc_ip_dscp_ecn;
1938 	uint8_t			mc_vlan_flags;
1939 #define MCX_FLOW_MATCH_IP_FRAG	(1 << 5)
1940 	uint8_t			mc_tcp_flags;
1941 	uint16_t		mc_tcp_sport;
1942 	uint16_t		mc_tcp_dport;
1943 	uint32_t		mc_reserved0;
1944 	uint16_t		mc_udp_sport;
1945 	uint16_t		mc_udp_dport;
1946 	uint8_t			mc_src_ip[16];
1947 	uint8_t			mc_dest_ip[16];
1948 
1949 	/* misc parameters */
1950 	uint8_t			mc_reserved1[8];
1951 	uint16_t		mc_second_vlan;
1952 	uint8_t			mc_reserved2[2];
1953 	uint8_t			mc_second_vlan_flags;
1954 	uint8_t			mc_reserved3[15];
1955 	uint32_t		mc_outer_ipv6_flow_label;
1956 	uint8_t			mc_reserved4[32];
1957 
1958 	uint8_t			mc_reserved[384];
1959 } __packed __aligned(4);
1960 
1961 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1962 
1963 struct mcx_cmd_create_flow_group_in {
1964 	uint16_t		cmd_opcode;
1965 	uint8_t			cmd_reserved0[4];
1966 	uint16_t		cmd_op_mod;
1967 	uint8_t			cmd_reserved1[8];
1968 } __packed __aligned(4);
1969 
1970 struct mcx_cmd_create_flow_group_mb_in {
1971 	uint8_t			cmd_table_type;
1972 	uint8_t			cmd_reserved0[3];
1973 	uint32_t		cmd_table_id;
1974 	uint8_t			cmd_reserved1[4];
1975 	uint32_t		cmd_start_flow_index;
1976 	uint8_t			cmd_reserved2[4];
1977 	uint32_t		cmd_end_flow_index;
1978 	uint8_t			cmd_reserved3[23];
1979 	uint8_t			cmd_match_criteria_enable;
1980 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
1981 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
1982 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
1983 	struct mcx_flow_match	cmd_match_criteria;
1984 	uint8_t			cmd_reserved4[448];
1985 } __packed __aligned(4);
1986 
1987 struct mcx_cmd_create_flow_group_out {
1988 	uint8_t			cmd_status;
1989 	uint8_t			cmd_reserved0[3];
1990 	uint32_t		cmd_syndrome;
1991 	uint32_t		cmd_group_id;
1992 	uint8_t			cmd_reserved1[4];
1993 } __packed __aligned(4);
1994 
1995 struct mcx_flow_ctx {
1996 	uint8_t			fc_reserved0[4];
1997 	uint32_t		fc_group_id;
1998 	uint32_t		fc_flow_tag;
1999 	uint32_t		fc_action;
2000 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
2001 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
2002 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
2003 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
2004 	uint32_t		fc_dest_list_size;
2005 	uint32_t		fc_counter_list_size;
2006 	uint8_t			fc_reserved1[40];
2007 	struct mcx_flow_match	fc_match_value;
2008 	uint8_t			fc_reserved2[192];
2009 } __packed __aligned(4);
2010 
2011 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
2012 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
2013 
2014 struct mcx_cmd_destroy_flow_group_in {
2015 	uint16_t		cmd_opcode;
2016 	uint8_t			cmd_reserved0[4];
2017 	uint16_t		cmd_op_mod;
2018 	uint8_t			cmd_reserved1[8];
2019 } __packed __aligned(4);
2020 
2021 struct mcx_cmd_destroy_flow_group_mb_in {
2022 	uint8_t			cmd_table_type;
2023 	uint8_t			cmd_reserved0[3];
2024 	uint32_t		cmd_table_id;
2025 	uint32_t		cmd_group_id;
2026 	uint8_t			cmd_reserved1[36];
2027 } __packed __aligned(4);
2028 
2029 struct mcx_cmd_destroy_flow_group_out {
2030 	uint8_t			cmd_status;
2031 	uint8_t			cmd_reserved0[3];
2032 	uint32_t		cmd_syndrome;
2033 	uint8_t			cmd_reserved1[8];
2034 } __packed __aligned(4);
2035 
2036 struct mcx_cmd_set_flow_table_entry_in {
2037 	uint16_t		cmd_opcode;
2038 	uint8_t			cmd_reserved0[4];
2039 	uint16_t		cmd_op_mod;
2040 	uint8_t			cmd_reserved1[8];
2041 } __packed __aligned(4);
2042 
2043 struct mcx_cmd_set_flow_table_entry_mb_in {
2044 	uint8_t			cmd_table_type;
2045 	uint8_t			cmd_reserved0[3];
2046 	uint32_t		cmd_table_id;
2047 	uint32_t		cmd_modify_enable_mask;
2048 	uint8_t			cmd_reserved1[4];
2049 	uint32_t		cmd_flow_index;
2050 	uint8_t			cmd_reserved2[28];
2051 	struct mcx_flow_ctx	cmd_flow_ctx;
2052 } __packed __aligned(4);
2053 
2054 struct mcx_cmd_set_flow_table_entry_out {
2055 	uint8_t			cmd_status;
2056 	uint8_t			cmd_reserved0[3];
2057 	uint32_t		cmd_syndrome;
2058 	uint8_t			cmd_reserved1[8];
2059 } __packed __aligned(4);
2060 
2061 struct mcx_cmd_query_flow_table_entry_in {
2062 	uint16_t		cmd_opcode;
2063 	uint8_t			cmd_reserved0[4];
2064 	uint16_t		cmd_op_mod;
2065 	uint8_t			cmd_reserved1[8];
2066 } __packed __aligned(4);
2067 
2068 struct mcx_cmd_query_flow_table_entry_mb_in {
2069 	uint8_t			cmd_table_type;
2070 	uint8_t			cmd_reserved0[3];
2071 	uint32_t		cmd_table_id;
2072 	uint8_t			cmd_reserved1[8];
2073 	uint32_t		cmd_flow_index;
2074 	uint8_t			cmd_reserved2[28];
2075 } __packed __aligned(4);
2076 
2077 struct mcx_cmd_query_flow_table_entry_out {
2078 	uint8_t			cmd_status;
2079 	uint8_t			cmd_reserved0[3];
2080 	uint32_t		cmd_syndrome;
2081 	uint8_t			cmd_reserved1[8];
2082 } __packed __aligned(4);
2083 
2084 struct mcx_cmd_query_flow_table_entry_mb_out {
2085 	uint8_t			cmd_reserved0[48];
2086 	struct mcx_flow_ctx	cmd_flow_ctx;
2087 } __packed __aligned(4);
2088 
2089 struct mcx_cmd_delete_flow_table_entry_in {
2090 	uint16_t		cmd_opcode;
2091 	uint8_t			cmd_reserved0[4];
2092 	uint16_t		cmd_op_mod;
2093 	uint8_t			cmd_reserved1[8];
2094 } __packed __aligned(4);
2095 
2096 struct mcx_cmd_delete_flow_table_entry_mb_in {
2097 	uint8_t			cmd_table_type;
2098 	uint8_t			cmd_reserved0[3];
2099 	uint32_t		cmd_table_id;
2100 	uint8_t			cmd_reserved1[8];
2101 	uint32_t		cmd_flow_index;
2102 	uint8_t			cmd_reserved2[28];
2103 } __packed __aligned(4);
2104 
2105 struct mcx_cmd_delete_flow_table_entry_out {
2106 	uint8_t			cmd_status;
2107 	uint8_t			cmd_reserved0[3];
2108 	uint32_t		cmd_syndrome;
2109 	uint8_t			cmd_reserved1[8];
2110 } __packed __aligned(4);
2111 
2112 struct mcx_cmd_query_flow_group_in {
2113 	uint16_t		cmd_opcode;
2114 	uint8_t			cmd_reserved0[4];
2115 	uint16_t		cmd_op_mod;
2116 	uint8_t			cmd_reserved1[8];
2117 } __packed __aligned(4);
2118 
2119 struct mcx_cmd_query_flow_group_mb_in {
2120 	uint8_t			cmd_table_type;
2121 	uint8_t			cmd_reserved0[3];
2122 	uint32_t		cmd_table_id;
2123 	uint32_t		cmd_group_id;
2124 	uint8_t			cmd_reserved1[36];
2125 } __packed __aligned(4);
2126 
2127 struct mcx_cmd_query_flow_group_out {
2128 	uint8_t			cmd_status;
2129 	uint8_t			cmd_reserved0[3];
2130 	uint32_t		cmd_syndrome;
2131 	uint8_t			cmd_reserved1[8];
2132 } __packed __aligned(4);
2133 
2134 struct mcx_cmd_query_flow_group_mb_out {
2135 	uint8_t			cmd_reserved0[12];
2136 	uint32_t		cmd_start_flow_index;
2137 	uint8_t			cmd_reserved1[4];
2138 	uint32_t		cmd_end_flow_index;
2139 	uint8_t			cmd_reserved2[20];
2140 	uint32_t		cmd_match_criteria_enable;
2141 	uint8_t			cmd_match_criteria[512];
2142 	uint8_t			cmd_reserved4[448];
2143 } __packed __aligned(4);
2144 
2145 struct mcx_cmd_query_flow_table_in {
2146 	uint16_t		cmd_opcode;
2147 	uint8_t			cmd_reserved0[4];
2148 	uint16_t		cmd_op_mod;
2149 	uint8_t			cmd_reserved1[8];
2150 } __packed __aligned(4);
2151 
2152 struct mcx_cmd_query_flow_table_mb_in {
2153 	uint8_t			cmd_table_type;
2154 	uint8_t			cmd_reserved0[3];
2155 	uint32_t		cmd_table_id;
2156 	uint8_t			cmd_reserved1[40];
2157 } __packed __aligned(4);
2158 
2159 struct mcx_cmd_query_flow_table_out {
2160 	uint8_t			cmd_status;
2161 	uint8_t			cmd_reserved0[3];
2162 	uint32_t		cmd_syndrome;
2163 	uint8_t			cmd_reserved1[8];
2164 } __packed __aligned(4);
2165 
2166 struct mcx_cmd_query_flow_table_mb_out {
2167 	uint8_t			cmd_reserved0[4];
2168 	struct mcx_flow_table_ctx cmd_ctx;
2169 } __packed __aligned(4);
2170 
2171 struct mcx_cmd_alloc_flow_counter_in {
2172 	uint16_t		cmd_opcode;
2173 	uint8_t			cmd_reserved0[4];
2174 	uint16_t		cmd_op_mod;
2175 	uint8_t			cmd_reserved1[8];
2176 } __packed __aligned(4);
2177 
2178 struct mcx_cmd_query_rq_in {
2179 	uint16_t		cmd_opcode;
2180 	uint8_t			cmd_reserved0[4];
2181 	uint16_t		cmd_op_mod;
2182 	uint32_t		cmd_rqn;
2183 	uint8_t			cmd_reserved1[4];
2184 } __packed __aligned(4);
2185 
2186 struct mcx_cmd_query_rq_out {
2187 	uint8_t			cmd_status;
2188 	uint8_t			cmd_reserved0[3];
2189 	uint32_t		cmd_syndrome;
2190 	uint8_t			cmd_reserved1[8];
2191 } __packed __aligned(4);
2192 
2193 struct mcx_cmd_query_rq_mb_out {
2194 	uint8_t			cmd_reserved0[16];
2195 	struct mcx_rq_ctx	cmd_ctx;
2196 };
2197 
2198 struct mcx_cmd_query_sq_in {
2199 	uint16_t		cmd_opcode;
2200 	uint8_t			cmd_reserved0[4];
2201 	uint16_t		cmd_op_mod;
2202 	uint32_t		cmd_sqn;
2203 	uint8_t			cmd_reserved1[4];
2204 } __packed __aligned(4);
2205 
2206 struct mcx_cmd_query_sq_out {
2207 	uint8_t			cmd_status;
2208 	uint8_t			cmd_reserved0[3];
2209 	uint32_t		cmd_syndrome;
2210 	uint8_t			cmd_reserved1[8];
2211 } __packed __aligned(4);
2212 
2213 struct mcx_cmd_query_sq_mb_out {
2214 	uint8_t			cmd_reserved0[16];
2215 	struct mcx_sq_ctx	cmd_ctx;
2216 };
2217 
2218 struct mcx_cmd_alloc_flow_counter_out {
2219 	uint8_t			cmd_status;
2220 	uint8_t			cmd_reserved0[3];
2221 	uint32_t		cmd_syndrome;
2222 	uint8_t			cmd_reserved1[2];
2223 	uint16_t		cmd_flow_counter_id;
2224 	uint8_t			cmd_reserved2[4];
2225 } __packed __aligned(4);
2226 
2227 struct mcx_wq_doorbell {
2228 	uint32_t		 db_recv_counter;
2229 	uint32_t		 db_send_counter;
2230 } __packed __aligned(8);
2231 
2232 struct mcx_dmamem {
2233 	bus_dmamap_t		 mxm_map;
2234 	bus_dma_segment_t	 mxm_seg;
2235 	int			 mxm_nsegs;
2236 	size_t			 mxm_size;
2237 	caddr_t			 mxm_kva;
2238 };
2239 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
2240 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
2241 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
2242 #define MCX_DMA_OFF(_mxm, _off)	((void *)((_mxm)->mxm_kva + (_off)))
2243 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
2244 
2245 struct mcx_hwmem {
2246 	bus_dmamap_t		 mhm_map;
2247 	bus_dma_segment_t	*mhm_segs;
2248 	unsigned int		 mhm_seg_count;
2249 	unsigned int		 mhm_npages;
2250 };
2251 
2252 struct mcx_slot {
2253 	bus_dmamap_t		 ms_map;
2254 	struct mbuf		*ms_m;
2255 };
2256 
2257 struct mcx_eq {
2258 	int			 eq_n;
2259 	uint32_t		 eq_cons;
2260 	struct mcx_dmamem	 eq_mem;
2261 };
2262 
2263 struct mcx_cq {
2264 	int			 cq_n;
2265 	struct mcx_dmamem	 cq_mem;
2266 	bus_addr_t		 cq_doorbell;
2267 	uint32_t		 cq_cons;
2268 	uint32_t		 cq_count;
2269 };
2270 
2271 struct mcx_calibration {
2272 	uint64_t		 c_timestamp;	/* previous mcx chip time */
2273 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
2274 	uint64_t		 c_tbase;	/* mcx chip time */
2275 	uint64_t		 c_ubase;	/* kernel nanouptime */
2276 	uint64_t		 c_ratio;
2277 };
2278 
2279 #define MCX_CALIBRATE_FIRST    2
2280 #define MCX_CALIBRATE_NORMAL   32
2281 
2282 struct mcx_rx {
2283 	struct mcx_softc	*rx_softc;
2284 	struct ifiqueue		*rx_ifiq;
2285 
2286 	int			 rx_rqn;
2287 	struct mcx_dmamem	 rx_rq_mem;
2288 	struct mcx_slot		*rx_slots;
2289 	bus_addr_t		 rx_doorbell;
2290 
2291 	uint32_t		 rx_prod;
2292 	struct timeout		 rx_refill;
2293 	struct if_rxring	 rx_rxr;
2294 } __aligned(64);
2295 
2296 struct mcx_tx {
2297 	struct mcx_softc	*tx_softc;
2298 	struct ifqueue		*tx_ifq;
2299 
2300 	int			 tx_uar;
2301 	int			 tx_sqn;
2302 	struct mcx_dmamem	 tx_sq_mem;
2303 	struct mcx_slot		*tx_slots;
2304 	bus_addr_t		 tx_doorbell;
2305 	int			 tx_bf_offset;
2306 
2307 	uint32_t		 tx_cons;
2308 	uint32_t		 tx_prod;
2309 } __aligned(64);
2310 
2311 struct mcx_queues {
2312 	char			 q_name[16];
2313 	void			*q_ihc;
2314 	struct mcx_softc	*q_sc;
2315 	int			 q_uar;
2316 	int			 q_index;
2317 	struct mcx_rx		 q_rx;
2318 	struct mcx_tx		 q_tx;
2319 	struct mcx_cq		 q_cq;
2320 	struct mcx_eq		 q_eq;
2321 #if NKSTAT > 0
2322 	struct kstat		*q_kstat;
2323 #endif
2324 };
2325 
2326 struct mcx_flow_group {
2327 	int			 g_id;
2328 	int			 g_table;
2329 	int			 g_start;
2330 	int			 g_size;
2331 };
2332 
2333 #define MCX_FLOW_GROUP_PROMISC	 0
2334 #define MCX_FLOW_GROUP_ALLMULTI	 1
2335 #define MCX_FLOW_GROUP_MAC	 2
2336 #define MCX_FLOW_GROUP_RSS_L4	 3
2337 #define MCX_FLOW_GROUP_RSS_L3	 4
2338 #define MCX_FLOW_GROUP_RSS_NONE	 5
2339 #define MCX_NUM_FLOW_GROUPS	 6
2340 
2341 #define MCX_HASH_SEL_L3		MCX_TIR_CTX_HASH_SEL_SRC_IP | \
2342 				MCX_TIR_CTX_HASH_SEL_DST_IP
2343 #define MCX_HASH_SEL_L4		MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
2344 				MCX_TIR_CTX_HASH_SEL_DPORT
2345 
2346 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP  |\
2347 				MCX_TIR_CTX_HASH_SEL_IPV4
2348 #define MCX_RSS_HASH_SEL_V6_TCP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
2349 				MCX_TIR_CTX_HASH_SEL_IPV6
2350 #define MCX_RSS_HASH_SEL_V4_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2351 				MCX_TIR_CTX_HASH_SEL_IPV4
2352 #define MCX_RSS_HASH_SEL_V6_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2353 				MCX_TIR_CTX_HASH_SEL_IPV6
2354 #define MCX_RSS_HASH_SEL_V4	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
2355 #define MCX_RSS_HASH_SEL_V6	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
2356 
2357 /*
2358  * There are a few different pieces involved in configuring RSS.
2359  * A Receive Queue Table (RQT) is the indirection table that maps packets to
2360  * different rx queues based on a hash value.  We only create one, because
2361  * we want to scatter any traffic we can apply RSS to across all our rx
2362  * queues.  Anything else will only be delivered to the first rx queue,
2363  * which doesn't require an RQT.
2364  *
2365  * A Transport Interface Receive (TIR) delivers packets to either a single rx
2366  * queue or an RQT, and in the latter case, specifies the set of fields
2367  * hashed, the hash function, and the hash key.  We need one of these for each
2368  * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
2369  * and one for non-RSS traffic.
2370  *
2371  * Flow tables hold flow table entries in sequence.  The first entry that
2372  * matches a packet is applied, sending the packet to either another flow
2373  * table or a TIR.  We use one flow table to select packets based on
2374  * destination MAC address, and a second to apply RSS.  The entries in the
2375  * first table send matching packets to the second, and the entries in the
2376  * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
2377  *
2378  * The flow table entry that delivers packets to an RSS TIR must include match
2379  * criteria that ensure packets delivered to the TIR include all the fields
2380  * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
2381  * only accept v4 TCP packets.  Accordingly, we need flow table entries for
2382  * each TIR.
2383  *
2384  * All of this is a lot more flexible than we need, and we can describe most
2385  * of the stuff we need with a simple array.
2386  *
2387  * An RSS config creates a TIR with hashing enabled on a set of fields,
2388  * pointing to either the first rx queue or the RQT containing all the rx
2389  * queues, and a flow table entry that matches on an ether type and
2390  * optionally an ip proto, that delivers packets to the TIR.
2391  */
2392 static struct mcx_rss_rule {
2393 	int			hash_sel;
2394 	int			flow_group;
2395 	int			ethertype;
2396 	int			ip_proto;
2397 } mcx_rss_config[] = {
2398 	/* udp and tcp for v4/v6 */
2399 	{ MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
2400 	  ETHERTYPE_IP, IPPROTO_TCP },
2401 	{ MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
2402 	  ETHERTYPE_IPV6, IPPROTO_TCP },
2403 	{ MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
2404 	  ETHERTYPE_IP, IPPROTO_UDP },
2405 	{ MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
2406 	  ETHERTYPE_IPV6, IPPROTO_UDP },
2407 
2408 	/* other v4/v6 */
2409 	{ MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
2410 	  ETHERTYPE_IP, 0 },
2411 	{ MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
2412 	  ETHERTYPE_IPV6, 0 },
2413 
2414 	/* non v4/v6 */
2415 	{ 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
2416 };
2417 
2418 struct mcx_softc {
2419 	struct device		 sc_dev;
2420 	struct arpcom		 sc_ac;
2421 	struct ifmedia		 sc_media;
2422 	uint64_t		 sc_media_status;
2423 	uint64_t		 sc_media_active;
2424 
2425 	pci_chipset_tag_t	 sc_pc;
2426 	pci_intr_handle_t	 sc_ih;
2427 	void			*sc_ihc;
2428 	pcitag_t		 sc_tag;
2429 
2430 	bus_dma_tag_t		 sc_dmat;
2431 	bus_space_tag_t		 sc_memt;
2432 	bus_space_handle_t	 sc_memh;
2433 	bus_size_t		 sc_mems;
2434 
2435 	struct mcx_dmamem	 sc_cmdq_mem;
2436 	unsigned int		 sc_cmdq_mask;
2437 	unsigned int		 sc_cmdq_size;
2438 
2439 	unsigned int		 sc_cmdq_token;
2440 
2441 	struct mcx_hwmem	 sc_boot_pages;
2442 	struct mcx_hwmem	 sc_init_pages;
2443 	struct mcx_hwmem	 sc_regular_pages;
2444 
2445 	int			 sc_uar;
2446 	int			 sc_pd;
2447 	int			 sc_tdomain;
2448 	uint32_t		 sc_lkey;
2449 	int			 sc_tis;
2450 	int			 sc_tir[nitems(mcx_rss_config)];
2451 	int			 sc_rqt;
2452 
2453 	struct mcx_dmamem	 sc_doorbell_mem;
2454 
2455 	struct mcx_eq		 sc_admin_eq;
2456 	struct mcx_eq		 sc_queue_eq;
2457 
2458 	int			 sc_hardmtu;
2459 	int			 sc_rxbufsz;
2460 
2461 	int			 sc_bf_size;
2462 	int			 sc_max_rqt_size;
2463 
2464 	struct task		 sc_port_change;
2465 
2466 	int			 sc_mac_flow_table_id;
2467 	int			 sc_rss_flow_table_id;
2468 	struct mcx_flow_group	 sc_flow_group[MCX_NUM_FLOW_GROUPS];
2469 	int			 sc_promisc_flow_enabled;
2470 	int			 sc_allmulti_flow_enabled;
2471 	int			 sc_mcast_flow_base;
2472 	int			 sc_extra_mcast;
2473 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
2474 
2475 	struct mcx_calibration	 sc_calibration[2];
2476 	unsigned int		 sc_calibration_gen;
2477 	struct timeout		 sc_calibrate;
2478 	uint32_t		 sc_mhz;
2479 	uint32_t		 sc_khz;
2480 
2481 	struct intrmap		*sc_intrmap;
2482 	struct mcx_queues	*sc_queues;
2483 
2484 	int			 sc_mcam_reg;
2485 
2486 #if NKSTAT > 0
2487 	struct kstat		*sc_kstat_ieee8023;
2488 	struct kstat		*sc_kstat_rfc2863;
2489 	struct kstat		*sc_kstat_rfc2819;
2490 	struct kstat		*sc_kstat_rfc3635;
2491 	unsigned int		 sc_kstat_mtmp_count;
2492 	struct kstat		**sc_kstat_mtmp;
2493 #endif
2494 
2495 	struct timecounter	 sc_timecounter;
2496 };
2497 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
2498 
2499 static int	mcx_match(struct device *, void *, void *);
2500 static void	mcx_attach(struct device *, struct device *, void *);
2501 
2502 #if NKSTAT > 0
2503 static void	mcx_kstat_attach(struct mcx_softc *);
2504 #endif
2505 
2506 static void	mcx_timecounter_attach(struct mcx_softc *);
2507 
2508 static int	mcx_version(struct mcx_softc *);
2509 static int	mcx_init_wait(struct mcx_softc *);
2510 static int	mcx_enable_hca(struct mcx_softc *);
2511 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
2512 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2513 		    int);
2514 static int	mcx_issi(struct mcx_softc *);
2515 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2516 static int	mcx_hca_max_caps(struct mcx_softc *);
2517 static int	mcx_hca_set_caps(struct mcx_softc *);
2518 static int	mcx_init_hca(struct mcx_softc *);
2519 static int	mcx_set_driver_version(struct mcx_softc *);
2520 static int	mcx_iff(struct mcx_softc *);
2521 static int	mcx_alloc_uar(struct mcx_softc *, int *);
2522 static int	mcx_alloc_pd(struct mcx_softc *);
2523 static int	mcx_alloc_tdomain(struct mcx_softc *);
2524 static int	mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
2525 		    uint64_t, int);
2526 static int	mcx_query_nic_vport_context(struct mcx_softc *);
2527 static int	mcx_query_special_contexts(struct mcx_softc *);
2528 static int	mcx_set_port_mtu(struct mcx_softc *, int);
2529 static int	mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
2530 		    int);
2531 static int	mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
2532 static int	mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
2533 		    int);
2534 static int	mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
2535 static int	mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
2536 static int	mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
2537 static int	mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
2538 static int	mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
2539 static int	mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
2540 		    int *);
2541 static int	mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
2542 		    int *);
2543 static int	mcx_destroy_tir(struct mcx_softc *, int);
2544 static int	mcx_create_tis(struct mcx_softc *, int *);
2545 static int	mcx_destroy_tis(struct mcx_softc *, int);
2546 static int	mcx_create_rqt(struct mcx_softc *, int, int *, int *);
2547 static int	mcx_destroy_rqt(struct mcx_softc *, int);
2548 static int	mcx_create_flow_table(struct mcx_softc *, int, int, int *);
2549 static int	mcx_set_flow_table_root(struct mcx_softc *, int);
2550 static int	mcx_destroy_flow_table(struct mcx_softc *, int);
2551 static int	mcx_create_flow_group(struct mcx_softc *, int, int, int,
2552 		    int, int, struct mcx_flow_match *);
2553 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
2554 static int	mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
2555 		    uint8_t *, uint32_t);
2556 static int	mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
2557 		    int, int, uint32_t);
2558 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2559 
2560 #if NKSTAT > 0
2561 static int	mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
2562 static int	mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
2563 static int	mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
2564 static int	mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
2565 #endif
2566 
2567 #if 0
2568 static int	mcx_dump_flow_table(struct mcx_softc *, int);
2569 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
2570 static int	mcx_dump_flow_group(struct mcx_softc *, int);
2571 #endif
2572 
2573 
2574 /*
2575 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2576 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2577 */
2578 static void	mcx_refill(void *);
2579 static int	mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
2580 		    struct mcx_cq_entry *, struct mbuf_list *,
2581 		    const struct mcx_calibration *);
2582 static int	mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
2583 		    struct mcx_cq_entry *);
2584 static void	mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
2585 		    struct mcx_cq *);
2586 
2587 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
2588 static void	mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
2589 static int	mcx_admin_intr(void *);
2590 static int	mcx_cq_intr(void *);
2591 
2592 static int	mcx_up(struct mcx_softc *);
2593 static void	mcx_down(struct mcx_softc *);
2594 static int	mcx_ioctl(struct ifnet *, u_long, caddr_t);
2595 static int	mcx_rxrinfo(struct mcx_softc *, struct if_rxrinfo *);
2596 static void	mcx_start(struct ifqueue *);
2597 static void	mcx_watchdog(struct ifnet *);
2598 static void	mcx_media_add_types(struct mcx_softc *);
2599 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
2600 static int	mcx_media_change(struct ifnet *);
2601 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2602 static void	mcx_port_change(void *);
2603 
2604 static void	mcx_calibrate_first(struct mcx_softc *);
2605 static void	mcx_calibrate(void *);
2606 
2607 static inline uint32_t
2608 		mcx_rd(struct mcx_softc *, bus_size_t);
2609 static inline void
2610 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2611 static inline void
2612 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2613 
2614 static uint64_t	mcx_timer(struct mcx_softc *);
2615 
2616 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2617 		    bus_size_t, u_int align);
2618 static void	mcx_dmamem_zero(struct mcx_dmamem *);
2619 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2620 
2621 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2622 		    unsigned int);
2623 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2624 
2625 struct cfdriver mcx_cd = {
2626 	NULL,
2627 	"mcx",
2628 	DV_IFNET,
2629 };
2630 
2631 struct cfattach mcx_ca = {
2632 	sizeof(struct mcx_softc),
2633 	mcx_match,
2634 	mcx_attach,
2635 };
2636 
2637 static const struct pci_matchid mcx_devices[] = {
2638 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
2639 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700VF },
2640 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
2641 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710VF },
2642 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
2643 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800VF },
2644 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
2645 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800VF },
2646 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908 },
2647 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2892  },
2648 };
2649 
2650 struct mcx_eth_proto_capability {
2651 	uint64_t	cap_media;
2652 	uint64_t	cap_baudrate;
2653 };
2654 
2655 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2656 	[MCX_ETHER_CAP_SGMII]		= { IFM_1000_SGMII,	IF_Gbps(1) },
2657 	[MCX_ETHER_CAP_1000_KX]		= { IFM_1000_KX,	IF_Gbps(1) },
2658 	[MCX_ETHER_CAP_10G_CX4]		= { IFM_10G_CX4,	IF_Gbps(10) },
2659 	[MCX_ETHER_CAP_10G_KX4]		= { IFM_10G_KX4,	IF_Gbps(10) },
2660 	[MCX_ETHER_CAP_10G_KR]		= { IFM_10G_KR,		IF_Gbps(10) },
2661 	[MCX_ETHER_CAP_40G_CR4]		= { IFM_40G_CR4,	IF_Gbps(40) },
2662 	[MCX_ETHER_CAP_40G_KR4]		= { IFM_40G_KR4,	IF_Gbps(40) },
2663 	[MCX_ETHER_CAP_10G_CR]		= { IFM_10G_SFP_CU,	IF_Gbps(10) },
2664 	[MCX_ETHER_CAP_10G_SR]		= { IFM_10G_SR,		IF_Gbps(10) },
2665 	[MCX_ETHER_CAP_10G_LR]		= { IFM_10G_LR,		IF_Gbps(10) },
2666 	[MCX_ETHER_CAP_40G_SR4]		= { IFM_40G_SR4,	IF_Gbps(40) },
2667 	[MCX_ETHER_CAP_40G_LR4]		= { IFM_40G_LR4,	IF_Gbps(40) },
2668 	[MCX_ETHER_CAP_50G_SR2]		= { 0 /*IFM_50G_SR2*/,	IF_Gbps(50) },
2669 	[MCX_ETHER_CAP_100G_CR4]	= { IFM_100G_CR4,	IF_Gbps(100) },
2670 	[MCX_ETHER_CAP_100G_SR4]	= { IFM_100G_SR4,	IF_Gbps(100) },
2671 	[MCX_ETHER_CAP_100G_KR4]	= { IFM_100G_KR4,	IF_Gbps(100) },
2672 	[MCX_ETHER_CAP_25G_CR]		= { IFM_25G_CR,		IF_Gbps(25) },
2673 	[MCX_ETHER_CAP_25G_KR]		= { IFM_25G_KR,		IF_Gbps(25) },
2674 	[MCX_ETHER_CAP_25G_SR]		= { IFM_25G_SR,		IF_Gbps(25) },
2675 	[MCX_ETHER_CAP_50G_CR2]		= { IFM_50G_CR2,	IF_Gbps(50) },
2676 	[MCX_ETHER_CAP_50G_KR2]		= { IFM_50G_KR2,	IF_Gbps(50) },
2677 };
2678 
2679 static int
2680 mcx_get_id(uint32_t val)
2681 {
2682 	return betoh32(val) & 0x00ffffff;
2683 }
2684 
2685 static int
2686 mcx_match(struct device *parent, void *match, void *aux)
2687 {
2688 	return (pci_matchbyid(aux, mcx_devices, nitems(mcx_devices)));
2689 }
2690 
2691 void
2692 mcx_attach(struct device *parent, struct device *self, void *aux)
2693 {
2694 	struct mcx_softc *sc = (struct mcx_softc *)self;
2695 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2696 	struct pci_attach_args *pa = aux;
2697 	pcireg_t memtype;
2698 	uint32_t r;
2699 	unsigned int cq_stride;
2700 	unsigned int cq_size;
2701 	const char *intrstr;
2702 	int i, msix;
2703 
2704 	sc->sc_pc = pa->pa_pc;
2705 	sc->sc_tag = pa->pa_tag;
2706 	sc->sc_dmat = pa->pa_dmat;
2707 
2708 	/* Map the PCI memory space */
2709 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2710 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2711 	    BUS_SPACE_MAP_PREFETCHABLE, &sc->sc_memt, &sc->sc_memh,
2712 	    NULL, &sc->sc_mems, 0)) {
2713 		printf(": unable to map register memory\n");
2714 		return;
2715 	}
2716 
2717 	if (mcx_version(sc) != 0) {
2718 		/* error printed by mcx_version */
2719 		goto unmap;
2720 	}
2721 
2722 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2723 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2724 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2725 	if (cq_size > MCX_MAX_CQE) {
2726 		printf(", command queue size overflow %u\n", cq_size);
2727 		goto unmap;
2728 	}
2729 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2730 		printf(", command queue entry size underflow %u\n", cq_stride);
2731 		goto unmap;
2732 	}
2733 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2734 		printf(", command queue page overflow\n");
2735 		goto unmap;
2736 	}
2737 
2738 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
2739 	    MCX_PAGE_SIZE) != 0) {
2740 		printf(", unable to allocate doorbell memory\n");
2741 		goto unmap;
2742 	}
2743 
2744 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2745 	    MCX_PAGE_SIZE) != 0) {
2746 		printf(", unable to allocate command queue\n");
2747 		goto dbfree;
2748 	}
2749 
2750 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2751 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
2752 	    BUS_SPACE_BARRIER_WRITE);
2753 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2754 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
2755 	    BUS_SPACE_BARRIER_WRITE);
2756 
2757 	if (mcx_init_wait(sc) != 0) {
2758 		printf(", timeout waiting for init\n");
2759 		goto cqfree;
2760 	}
2761 
2762 	sc->sc_cmdq_mask = cq_size - 1;
2763 	sc->sc_cmdq_size = cq_stride;
2764 
2765 	if (mcx_enable_hca(sc) != 0) {
2766 		/* error printed by mcx_enable_hca */
2767 		goto cqfree;
2768 	}
2769 
2770 	if (mcx_issi(sc) != 0) {
2771 		/* error printed by mcx_issi */
2772 		goto teardown;
2773 	}
2774 
2775 	if (mcx_pages(sc, &sc->sc_boot_pages,
2776 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2777 		/* error printed by mcx_pages */
2778 		goto teardown;
2779 	}
2780 
2781 	if (mcx_hca_max_caps(sc) != 0) {
2782 		/* error printed by mcx_hca_max_caps */
2783 		goto teardown;
2784 	}
2785 
2786 	if (mcx_hca_set_caps(sc) != 0) {
2787 		/* error printed by mcx_hca_set_caps */
2788 		goto teardown;
2789 	}
2790 
2791 	if (mcx_pages(sc, &sc->sc_init_pages,
2792 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2793 		/* error printed by mcx_pages */
2794 		goto teardown;
2795 	}
2796 
2797 	if (mcx_init_hca(sc) != 0) {
2798 		/* error printed by mcx_init_hca */
2799 		goto teardown;
2800 	}
2801 
2802 	if (mcx_pages(sc, &sc->sc_regular_pages,
2803 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2804 		/* error printed by mcx_pages */
2805 		goto teardown;
2806 	}
2807 
2808 	/* apparently not necessary? */
2809 	if (mcx_set_driver_version(sc) != 0) {
2810 		/* error printed by mcx_set_driver_version */
2811 		goto teardown;
2812 	}
2813 
2814 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
2815 		/* error printed by mcx_iff? */
2816 		goto teardown;
2817 	}
2818 
2819 	if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
2820 		/* error printed by mcx_alloc_uar */
2821 		goto teardown;
2822 	}
2823 
2824 	if (mcx_alloc_pd(sc) != 0) {
2825 		/* error printed by mcx_alloc_pd */
2826 		goto teardown;
2827 	}
2828 
2829 	if (mcx_alloc_tdomain(sc) != 0) {
2830 		/* error printed by mcx_alloc_tdomain */
2831 		goto teardown;
2832 	}
2833 
2834 	msix = pci_intr_msix_count(pa->pa_pc, pa->pa_tag);
2835 	if (msix < 2) {
2836 		printf(": not enough msi-x vectors\n");
2837 		goto teardown;
2838 	}
2839 
2840 	/*
2841 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2842 	 * mellanox support tells me legacy interrupts are not supported,
2843 	 * so we're stuck with just msi-x.
2844 	 */
2845 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
2846 		printf(": unable to map interrupt\n");
2847 		goto teardown;
2848 	}
2849 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_ih);
2850 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
2851 	    IPL_NET | IPL_MPSAFE, mcx_admin_intr, sc, DEVNAME(sc));
2852 	if (sc->sc_ihc == NULL) {
2853 		printf(": unable to establish interrupt");
2854 		if (intrstr != NULL)
2855 			printf(" at %s", intrstr);
2856 		printf("\n");
2857 		goto teardown;
2858 	}
2859 
2860 	if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
2861 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
2862 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
2863 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
2864 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
2865 		/* error printed by mcx_create_eq */
2866 		goto teardown;
2867 	}
2868 
2869 	if (mcx_query_nic_vport_context(sc) != 0) {
2870 		/* error printed by mcx_query_nic_vport_context */
2871 		goto teardown;
2872 	}
2873 
2874 	if (mcx_query_special_contexts(sc) != 0) {
2875 		/* error printed by mcx_query_special_contexts */
2876 		goto teardown;
2877 	}
2878 
2879 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2880 		/* error printed by mcx_set_port_mtu */
2881 		goto teardown;
2882 	}
2883 
2884 	printf(", %s, address %s\n", intrstr,
2885 	    ether_sprintf(sc->sc_ac.ac_enaddr));
2886 
2887 	msix--; /* admin ops took one */
2888 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, msix, MCX_MAX_QUEUES,
2889 	    INTRMAP_POWEROF2);
2890 	if (sc->sc_intrmap == NULL) {
2891 		printf("%s: unable to create interrupt map\n", DEVNAME(sc));
2892 		goto teardown;
2893 	}
2894 	sc->sc_queues = mallocarray(intrmap_count(sc->sc_intrmap),
2895 	    sizeof(*sc->sc_queues), M_DEVBUF, M_WAITOK|M_ZERO);
2896 	if (sc->sc_queues == NULL) {
2897 		printf("%s: unable to create queues\n", DEVNAME(sc));
2898 		goto intrunmap;
2899 	}
2900 
2901 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2902 	ifp->if_softc = sc;
2903 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2904 	ifp->if_xflags = IFXF_MPSAFE;
2905 	ifp->if_ioctl = mcx_ioctl;
2906 	ifp->if_qstart = mcx_start;
2907 	ifp->if_watchdog = mcx_watchdog;
2908 	ifp->if_hardmtu = sc->sc_hardmtu;
2909 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
2910 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_UDPv6 | IFCAP_CSUM_TCPv4 |
2911 	    IFCAP_CSUM_TCPv6;
2912 #if NVLAN > 0
2913 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2914 #endif
2915 	ifq_set_maxlen(&ifp->if_snd, 1024);
2916 
2917 	ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2918 	    mcx_media_status);
2919 	mcx_media_add_types(sc);
2920 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2921 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2922 
2923 	if_attach(ifp);
2924 	ether_ifattach(ifp);
2925 
2926 	if_attach_iqueues(ifp, intrmap_count(sc->sc_intrmap));
2927 	if_attach_queues(ifp, intrmap_count(sc->sc_intrmap));
2928 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
2929 		struct ifiqueue *ifiq = ifp->if_iqs[i];
2930 		struct ifqueue *ifq = ifp->if_ifqs[i];
2931 		struct mcx_queues *q = &sc->sc_queues[i];
2932 		struct mcx_rx *rx = &q->q_rx;
2933 		struct mcx_tx *tx = &q->q_tx;
2934 		pci_intr_handle_t ih;
2935 		int vec;
2936 
2937 		vec = i + 1;
2938 		q->q_sc = sc;
2939 		q->q_index = i;
2940 
2941 		if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
2942 			printf("%s: unable to alloc uar %d\n",
2943 			    DEVNAME(sc), i);
2944 			goto intrdisestablish;
2945 		}
2946 
2947 		if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
2948 			printf("%s: unable to create event queue %d\n",
2949 			    DEVNAME(sc), i);
2950 			goto intrdisestablish;
2951 		}
2952 
2953 		rx->rx_softc = sc;
2954 		rx->rx_ifiq = ifiq;
2955 		timeout_set(&rx->rx_refill, mcx_refill, rx);
2956 		ifiq->ifiq_softc = rx;
2957 
2958 		tx->tx_softc = sc;
2959 		tx->tx_ifq = ifq;
2960 		ifq->ifq_softc = tx;
2961 
2962 		if (pci_intr_map_msix(pa, vec, &ih) != 0) {
2963 			printf("%s: unable to map queue interrupt %d\n",
2964 			    DEVNAME(sc), i);
2965 			goto intrdisestablish;
2966 		}
2967 		snprintf(q->q_name, sizeof(q->q_name), "%s:%d",
2968 		    DEVNAME(sc), i);
2969 		q->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
2970 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
2971 		    mcx_cq_intr, q, q->q_name);
2972 		if (q->q_ihc == NULL) {
2973 			printf("%s: unable to establish interrupt %d\n",
2974 			    DEVNAME(sc), i);
2975 			goto intrdisestablish;
2976 		}
2977 	}
2978 
2979 	timeout_set(&sc->sc_calibrate, mcx_calibrate, sc);
2980 
2981 	task_set(&sc->sc_port_change, mcx_port_change, sc);
2982 	mcx_port_change(sc);
2983 
2984 	sc->sc_mac_flow_table_id = -1;
2985 	sc->sc_rss_flow_table_id = -1;
2986 	sc->sc_rqt = -1;
2987 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2988 		struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
2989 		mfg->g_id = -1;
2990 		mfg->g_table = -1;
2991 		mfg->g_size = 0;
2992 		mfg->g_start = 0;
2993 	}
2994 	sc->sc_extra_mcast = 0;
2995 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2996 
2997 #if NKSTAT > 0
2998 	mcx_kstat_attach(sc);
2999 #endif
3000 	mcx_timecounter_attach(sc);
3001 	return;
3002 
3003 intrdisestablish:
3004 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
3005 		struct mcx_queues *q = &sc->sc_queues[i];
3006 		if (q->q_ihc == NULL)
3007 			continue;
3008 		pci_intr_disestablish(sc->sc_pc, q->q_ihc);
3009 		q->q_ihc = NULL;
3010 	}
3011 	free(sc->sc_queues, M_DEVBUF,
3012 	    intrmap_count(sc->sc_intrmap) * sizeof(*sc->sc_queues));
3013 intrunmap:
3014 	intrmap_destroy(sc->sc_intrmap);
3015 	sc->sc_intrmap = NULL;
3016 teardown:
3017 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
3018 	/* error printed by mcx_teardown_hca, and we're already unwinding */
3019 cqfree:
3020 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
3021 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3022 	    BUS_SPACE_BARRIER_WRITE);
3023 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
3024 	    MCX_CMDQ_INTERFACE_DISABLED);
3025 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
3026 	    BUS_SPACE_BARRIER_WRITE);
3027 
3028 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
3029 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3030 	    BUS_SPACE_BARRIER_WRITE);
3031 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
3032 
3033 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
3034 dbfree:
3035 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
3036 unmap:
3037 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
3038 	sc->sc_mems = 0;
3039 }
3040 
3041 static int
3042 mcx_version(struct mcx_softc *sc)
3043 {
3044 	uint32_t fw0, fw1;
3045 	uint16_t cmdif;
3046 
3047 	fw0 = mcx_rd(sc, MCX_FW_VER);
3048 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
3049 
3050 	printf(": FW %u.%u.%04u", MCX_FW_VER_MAJOR(fw0),
3051 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
3052 
3053 	cmdif = MCX_CMDIF(fw1);
3054 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
3055 		printf(", unsupported command interface %u\n", cmdif);
3056 		return (-1);
3057 	}
3058 
3059 	return (0);
3060 }
3061 
3062 static int
3063 mcx_init_wait(struct mcx_softc *sc)
3064 {
3065 	unsigned int i;
3066 	uint32_t r;
3067 
3068 	for (i = 0; i < 2000; i++) {
3069 		r = mcx_rd(sc, MCX_STATE);
3070 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
3071 			return (0);
3072 
3073 		delay(1000);
3074 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
3075 		    BUS_SPACE_BARRIER_READ);
3076 	}
3077 
3078 	return (-1);
3079 }
3080 
3081 static uint8_t
3082 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3083     unsigned int msec)
3084 {
3085 	unsigned int i;
3086 
3087 	for (i = 0; i < msec; i++) {
3088 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3089 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3090 
3091 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3092 		    MCX_CQ_STATUS_OWN_SW)
3093 			return (0);
3094 
3095 		delay(1000);
3096 	}
3097 
3098 	return (ETIMEDOUT);
3099 }
3100 
3101 static uint32_t
3102 mcx_mix_u64(uint32_t xor, uint64_t u64)
3103 {
3104 	xor ^= u64 >> 32;
3105 	xor ^= u64;
3106 
3107 	return (xor);
3108 }
3109 
3110 static uint32_t
3111 mcx_mix_u32(uint32_t xor, uint32_t u32)
3112 {
3113 	xor ^= u32;
3114 
3115 	return (xor);
3116 }
3117 
3118 static uint32_t
3119 mcx_mix_u8(uint32_t xor, uint8_t u8)
3120 {
3121 	xor ^= u8;
3122 
3123 	return (xor);
3124 }
3125 
3126 static uint8_t
3127 mcx_mix_done(uint32_t xor)
3128 {
3129 	xor ^= xor >> 16;
3130 	xor ^= xor >> 8;
3131 
3132 	return (xor);
3133 }
3134 
3135 static uint8_t
3136 mcx_xor(const void *buf, size_t len)
3137 {
3138 	const uint32_t *dwords = buf;
3139 	uint32_t xor = 0xff;
3140 	size_t i;
3141 
3142 	len /= sizeof(*dwords);
3143 
3144 	for (i = 0; i < len; i++)
3145 		xor ^= dwords[i];
3146 
3147 	return (mcx_mix_done(xor));
3148 }
3149 
3150 static uint8_t
3151 mcx_cmdq_token(struct mcx_softc *sc)
3152 {
3153 	uint8_t token;
3154 
3155 	do {
3156 		token = ++sc->sc_cmdq_token;
3157 	} while (token == 0);
3158 
3159 	return (token);
3160 }
3161 
3162 static void
3163 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3164     uint32_t ilen, uint32_t olen, uint8_t token)
3165 {
3166 	memset(cqe, 0, sc->sc_cmdq_size);
3167 
3168 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
3169 	htobem32(&cqe->cq_input_length, ilen);
3170 	htobem32(&cqe->cq_output_length, olen);
3171 	cqe->cq_token = token;
3172 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
3173 }
3174 
3175 static void
3176 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
3177 {
3178 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
3179 }
3180 
3181 static int
3182 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
3183 {
3184 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
3185 	return (0);
3186 }
3187 
3188 static void *
3189 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
3190 {
3191 	return (&cqe->cq_input_data);
3192 }
3193 
3194 static void *
3195 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
3196 {
3197 	return (&cqe->cq_output_data);
3198 }
3199 
3200 static void
3201 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3202     unsigned int slot)
3203 {
3204 	mcx_cmdq_sign(cqe);
3205 
3206 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3207 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
3208 
3209 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
3210 	mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
3211 	    BUS_SPACE_BARRIER_WRITE);
3212 }
3213 
3214 static int
3215 mcx_enable_hca(struct mcx_softc *sc)
3216 {
3217 	struct mcx_cmdq_entry *cqe;
3218 	struct mcx_cmd_enable_hca_in *in;
3219 	struct mcx_cmd_enable_hca_out *out;
3220 	int error;
3221 	uint8_t status;
3222 
3223 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3224 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3225 
3226 	in = mcx_cmdq_in(cqe);
3227 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
3228 	in->cmd_op_mod = htobe16(0);
3229 	in->cmd_function_id = htobe16(0);
3230 
3231 	mcx_cmdq_post(sc, cqe, 0);
3232 
3233 	error = mcx_cmdq_poll(sc, cqe, 1000);
3234 	if (error != 0) {
3235 		printf(", hca enable timeout\n");
3236 		return (-1);
3237 	}
3238 	if (mcx_cmdq_verify(cqe) != 0) {
3239 		printf(", hca enable command corrupt\n");
3240 		return (-1);
3241 	}
3242 
3243 	status = cqe->cq_output_data[0];
3244 	if (status != MCX_CQ_STATUS_OK) {
3245 		printf(", hca enable failed (%x)\n", status);
3246 		return (-1);
3247 	}
3248 
3249 	return (0);
3250 }
3251 
3252 static int
3253 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
3254 {
3255 	struct mcx_cmdq_entry *cqe;
3256 	struct mcx_cmd_teardown_hca_in *in;
3257 	struct mcx_cmd_teardown_hca_out *out;
3258 	int error;
3259 	uint8_t status;
3260 
3261 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3262 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3263 
3264 	in = mcx_cmdq_in(cqe);
3265 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
3266 	in->cmd_op_mod = htobe16(0);
3267 	in->cmd_profile = profile;
3268 
3269 	mcx_cmdq_post(sc, cqe, 0);
3270 
3271 	error = mcx_cmdq_poll(sc, cqe, 1000);
3272 	if (error != 0) {
3273 		printf(", hca teardown timeout\n");
3274 		return (-1);
3275 	}
3276 	if (mcx_cmdq_verify(cqe) != 0) {
3277 		printf(", hca teardown command corrupt\n");
3278 		return (-1);
3279 	}
3280 
3281 	status = cqe->cq_output_data[0];
3282 	if (status != MCX_CQ_STATUS_OK) {
3283 		printf(", hca teardown failed (%x)\n", status);
3284 		return (-1);
3285 	}
3286 
3287 	return (0);
3288 }
3289 
3290 static int
3291 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
3292     unsigned int nmb, uint64_t *ptr, uint8_t token)
3293 {
3294 	caddr_t kva;
3295 	uint64_t dva;
3296 	int i;
3297 	int error;
3298 
3299 	error = mcx_dmamem_alloc(sc, mxm,
3300 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
3301 	if (error != 0)
3302 		return (error);
3303 
3304 	mcx_dmamem_zero(mxm);
3305 
3306 	dva = MCX_DMA_DVA(mxm);
3307 	kva = MCX_DMA_KVA(mxm);
3308 	for (i = 0; i < nmb; i++) {
3309 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
3310 
3311 		/* patch the cqe or mbox pointing at this one */
3312 		htobem64(ptr, dva);
3313 
3314 		/* fill in this mbox */
3315 		htobem32(&mbox->mb_block_number, i);
3316 		mbox->mb_token = token;
3317 
3318 		/* move to the next one */
3319 		ptr = &mbox->mb_next_ptr;
3320 
3321 		dva += MCX_CMDQ_MAILBOX_SIZE;
3322 		kva += MCX_CMDQ_MAILBOX_SIZE;
3323 	}
3324 
3325 	return (0);
3326 }
3327 
3328 static uint32_t
3329 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
3330 {
3331 	uint32_t xor = 0xff;
3332 
3333 	/* only 3 fields get set, so mix them directly */
3334 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
3335 	xor = mcx_mix_u32(xor, mb->mb_block_number);
3336 	xor = mcx_mix_u8(xor, mb->mb_token);
3337 
3338 	return (mcx_mix_done(xor));
3339 }
3340 
3341 static void
3342 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
3343 {
3344 	caddr_t kva;
3345 	int i;
3346 
3347 	kva = MCX_DMA_KVA(mxm);
3348 
3349 	for (i = 0; i < nmb; i++) {
3350 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
3351 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
3352 		mb->mb_ctrl_signature = sig;
3353 		mb->mb_signature = sig ^
3354 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
3355 
3356 		kva += MCX_CMDQ_MAILBOX_SIZE;
3357 	}
3358 }
3359 
3360 static void
3361 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
3362 {
3363 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
3364 	    0, MCX_DMA_LEN(mxm), ops);
3365 }
3366 
3367 static struct mcx_cmdq_mailbox *
3368 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
3369 {
3370 	caddr_t kva;
3371 
3372 	kva = MCX_DMA_KVA(mxm);
3373 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
3374 
3375 	return ((struct mcx_cmdq_mailbox *)kva);
3376 }
3377 
3378 static inline void *
3379 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
3380 {
3381 	return (&mb->mb_data);
3382 }
3383 
3384 static void
3385 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
3386     void *b, size_t len)
3387 {
3388 	caddr_t buf = b;
3389 	struct mcx_cmdq_mailbox *mb;
3390 	int i;
3391 
3392 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3393 	for (i = 0; i < nmb; i++) {
3394 
3395 		memcpy(mb->mb_data, buf, min(sizeof(mb->mb_data), len));
3396 
3397 		if (sizeof(mb->mb_data) >= len)
3398 			break;
3399 
3400 		buf += sizeof(mb->mb_data);
3401 		len -= sizeof(mb->mb_data);
3402 		mb++;
3403 	}
3404 }
3405 
3406 static void
3407 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
3408     struct mcx_dmamem *buf)
3409 {
3410 	uint64_t *pas;
3411 	int mbox, mbox_pages, i;
3412 
3413 	mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
3414 	offset %= MCX_CMDQ_MAILBOX_DATASIZE;
3415 
3416 	pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3417 	pas += (offset / sizeof(*pas));
3418 	mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
3419 	for (i = 0; i < npages; i++) {
3420 		if (i == mbox_pages) {
3421 			mbox++;
3422 			pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3423 			mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
3424 		}
3425 		*pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
3426 		pas++;
3427 	}
3428 }
3429 
3430 static void
3431 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
3432 {
3433 	caddr_t buf = b;
3434 	struct mcx_cmdq_mailbox *mb;
3435 	int i;
3436 
3437 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3438 	for (i = 0; i < nmb; i++) {
3439 		memcpy(buf, mb->mb_data, min(sizeof(mb->mb_data), len));
3440 
3441 		if (sizeof(mb->mb_data) >= len)
3442 			break;
3443 
3444 		buf += sizeof(mb->mb_data);
3445 		len -= sizeof(mb->mb_data);
3446 		mb++;
3447 	}
3448 }
3449 
3450 static void
3451 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
3452 {
3453 	mcx_dmamem_free(sc, mxm);
3454 }
3455 
3456 #if 0
3457 static void
3458 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
3459 {
3460 	unsigned int i;
3461 
3462 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
3463 	    bemtoh32(&cqe->cq_input_length), bemtoh64(&cqe->cq_input_ptr));
3464 
3465 	printf(", idata ");
3466 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
3467 		printf("%02x", cqe->cq_input_data[i]);
3468 
3469 	printf(", odata ");
3470 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
3471 		printf("%02x", cqe->cq_output_data[i]);
3472 
3473 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
3474 	    bemtoh64(&cqe->cq_output_ptr), bemtoh32(&cqe->cq_output_length),
3475 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
3476 }
3477 
3478 static void
3479 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
3480 {
3481 	int i, j;
3482 	uint8_t *d;
3483 
3484 	for (i = 0; i < num; i++) {
3485 		struct mcx_cmdq_mailbox *mbox;
3486 		mbox = mcx_cq_mbox(mboxes, i);
3487 
3488 		d = mcx_cq_mbox_data(mbox);
3489 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
3490 			if (j != 0 && (j % 16 == 0))
3491 				printf("\n");
3492 			printf("%.2x ", d[j]);
3493 		}
3494 	}
3495 }
3496 #endif
3497 
3498 static int
3499 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
3500     int len)
3501 {
3502 	struct mcx_dmamem mxm;
3503 	struct mcx_cmdq_entry *cqe;
3504 	struct mcx_cmd_access_reg_in *in;
3505 	struct mcx_cmd_access_reg_out *out;
3506 	uint8_t token = mcx_cmdq_token(sc);
3507 	int error, nmb;
3508 
3509 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3510 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
3511 	    token);
3512 
3513 	in = mcx_cmdq_in(cqe);
3514 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
3515 	in->cmd_op_mod = htobe16(op);
3516 	in->cmd_register_id = htobe16(reg);
3517 
3518 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
3519 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3520 	    &cqe->cq_output_ptr, token) != 0) {
3521 		printf(", unable to allocate access reg mailboxen\n");
3522 		return (-1);
3523 	}
3524 	cqe->cq_input_ptr = cqe->cq_output_ptr;
3525 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3526 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3527 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3528 
3529 	mcx_cmdq_post(sc, cqe, 0);
3530 	error = mcx_cmdq_poll(sc, cqe, 1000);
3531 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3532 
3533 	if (error != 0) {
3534 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3535 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3536 		goto free;
3537 	}
3538 	error = mcx_cmdq_verify(cqe);
3539 	if (error != 0) {
3540 		printf("%s: access reg (%s %x) reply corrupt\n",
3541 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3542 		    reg);
3543 		goto free;
3544 	}
3545 
3546 	out = mcx_cmdq_out(cqe);
3547 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3548 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3549 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3550 		    reg, out->cmd_status, betoh32(out->cmd_syndrome));
3551 		error = -1;
3552 		goto free;
3553 	}
3554 
3555 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3556 free:
3557 	mcx_dmamem_free(sc, &mxm);
3558 
3559 	return (error);
3560 }
3561 
3562 static int
3563 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3564     unsigned int slot)
3565 {
3566 	struct mcx_cmd_set_issi_in *in;
3567 	struct mcx_cmd_set_issi_out *out;
3568 	uint8_t status;
3569 
3570 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3571 
3572 	in = mcx_cmdq_in(cqe);
3573 	in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3574 	in->cmd_op_mod = htobe16(0);
3575 	in->cmd_current_issi = htobe16(MCX_ISSI);
3576 
3577 	mcx_cmdq_post(sc, cqe, slot);
3578 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3579 		return (-1);
3580 	if (mcx_cmdq_verify(cqe) != 0)
3581 		return (-1);
3582 
3583 	status = cqe->cq_output_data[0];
3584 	if (status != MCX_CQ_STATUS_OK)
3585 		return (-1);
3586 
3587 	return (0);
3588 }
3589 
3590 static int
3591 mcx_issi(struct mcx_softc *sc)
3592 {
3593 	struct mcx_dmamem mxm;
3594 	struct mcx_cmdq_entry *cqe;
3595 	struct mcx_cmd_query_issi_in *in;
3596 	struct mcx_cmd_query_issi_il_out *out;
3597 	struct mcx_cmd_query_issi_mb_out *mb;
3598 	uint8_t token = mcx_cmdq_token(sc);
3599 	uint8_t status;
3600 	int error;
3601 
3602 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3603 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3604 
3605 	in = mcx_cmdq_in(cqe);
3606 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3607 	in->cmd_op_mod = htobe16(0);
3608 
3609 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3610 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3611 	    &cqe->cq_output_ptr, token) != 0) {
3612 		printf(", unable to allocate query issi mailbox\n");
3613 		return (-1);
3614 	}
3615 	mcx_cmdq_mboxes_sign(&mxm, 1);
3616 
3617 	mcx_cmdq_post(sc, cqe, 0);
3618 	error = mcx_cmdq_poll(sc, cqe, 1000);
3619 	if (error != 0) {
3620 		printf(", query issi timeout\n");
3621 		goto free;
3622 	}
3623 	error = mcx_cmdq_verify(cqe);
3624 	if (error != 0) {
3625 		printf(", query issi reply corrupt\n");
3626 		goto free;
3627 	}
3628 
3629 	status = cqe->cq_output_data[0];
3630 	switch (status) {
3631 	case MCX_CQ_STATUS_OK:
3632 		break;
3633 	case MCX_CQ_STATUS_BAD_OPCODE:
3634 		/* use ISSI 0 */
3635 		goto free;
3636 	default:
3637 		printf(", query issi failed (%x)\n", status);
3638 		error = -1;
3639 		goto free;
3640 	}
3641 
3642 	out = mcx_cmdq_out(cqe);
3643 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3644 		/* use ISSI 1 */
3645 		goto free;
3646 	}
3647 
3648 	/* don't need to read cqe anymore, can be used for SET ISSI */
3649 
3650 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3651 	CTASSERT(MCX_ISSI < NBBY);
3652 	 /* XXX math is hard */
3653 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3654 		/* use ISSI 0 */
3655 		goto free;
3656 	}
3657 
3658 	if (mcx_set_issi(sc, cqe, 0) != 0) {
3659 		/* ignore the error, just use ISSI 0 */
3660 	} else {
3661 		/* use ISSI 1 */
3662 	}
3663 
3664 free:
3665 	mcx_cq_mboxes_free(sc, &mxm);
3666 	return (error);
3667 }
3668 
3669 static int
3670 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3671     int32_t *npages, uint16_t *func_id)
3672 {
3673 	struct mcx_cmdq_entry *cqe;
3674 	struct mcx_cmd_query_pages_in *in;
3675 	struct mcx_cmd_query_pages_out *out;
3676 
3677 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3678 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3679 
3680 	in = mcx_cmdq_in(cqe);
3681 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3682 	in->cmd_op_mod = type;
3683 
3684 	mcx_cmdq_post(sc, cqe, 0);
3685 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3686 		printf(", query pages timeout\n");
3687 		return (-1);
3688 	}
3689 	if (mcx_cmdq_verify(cqe) != 0) {
3690 		printf(", query pages reply corrupt\n");
3691 		return (-1);
3692 	}
3693 
3694 	out = mcx_cmdq_out(cqe);
3695 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3696 		printf(", query pages failed (%x)\n", out->cmd_status);
3697 		return (-1);
3698 	}
3699 
3700 	*func_id = out->cmd_func_id;
3701 	*npages = bemtoh32(&out->cmd_num_pages);
3702 
3703 	return (0);
3704 }
3705 
3706 struct bus_dma_iter {
3707 	bus_dmamap_t		i_map;
3708 	bus_size_t		i_offset;
3709 	unsigned int		i_index;
3710 };
3711 
3712 static void
3713 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3714 {
3715 	i->i_map = map;
3716 	i->i_offset = 0;
3717 	i->i_index = 0;
3718 }
3719 
3720 static bus_addr_t
3721 bus_dma_iter_addr(struct bus_dma_iter *i)
3722 {
3723 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3724 }
3725 
3726 static void
3727 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3728 {
3729 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3730 	bus_size_t diff;
3731 
3732 	do {
3733 		diff = seg->ds_len - i->i_offset;
3734 		if (size < diff)
3735 			break;
3736 
3737 		size -= diff;
3738 
3739 		seg++;
3740 
3741 		i->i_offset = 0;
3742 		i->i_index++;
3743 	} while (size > 0);
3744 
3745 	i->i_offset += size;
3746 }
3747 
3748 static int
3749 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3750 {
3751 	struct mcx_dmamem mxm;
3752 	struct mcx_cmdq_entry *cqe;
3753 	struct mcx_cmd_manage_pages_in *in;
3754 	struct mcx_cmd_manage_pages_out *out;
3755 	unsigned int paslen, nmb, i, j, npages;
3756 	struct bus_dma_iter iter;
3757 	uint64_t *pas;
3758 	uint8_t status;
3759 	uint8_t token = mcx_cmdq_token(sc);
3760 	int error;
3761 
3762 	npages = mhm->mhm_npages;
3763 
3764 	paslen = sizeof(*pas) * npages;
3765 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3766 
3767 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3768 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3769 
3770 	in = mcx_cmdq_in(cqe);
3771 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3772 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3773 	in->cmd_func_id = func_id;
3774 	htobem32(&in->cmd_input_num_entries, npages);
3775 
3776 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3777 	    &cqe->cq_input_ptr, token) != 0) {
3778 		printf(", unable to allocate manage pages mailboxen\n");
3779 		return (-1);
3780 	}
3781 
3782 	bus_dma_iter_init(&iter, mhm->mhm_map);
3783 	for (i = 0; i < nmb; i++) {
3784 		unsigned int lim;
3785 
3786 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3787 		lim = min(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3788 
3789 		for (j = 0; j < lim; j++) {
3790 			htobem64(&pas[j], bus_dma_iter_addr(&iter));
3791 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3792 		}
3793 
3794 		npages -= lim;
3795 	}
3796 
3797 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3798 
3799 	mcx_cmdq_post(sc, cqe, 0);
3800 	error = mcx_cmdq_poll(sc, cqe, 1000);
3801 	if (error != 0) {
3802 		printf(", manage pages timeout\n");
3803 		goto free;
3804 	}
3805 	error = mcx_cmdq_verify(cqe);
3806 	if (error != 0) {
3807 		printf(", manage pages reply corrupt\n");
3808 		goto free;
3809 	}
3810 
3811 	status = cqe->cq_output_data[0];
3812 	if (status != MCX_CQ_STATUS_OK) {
3813 		printf(", manage pages failed (%x)\n", status);
3814 		error = -1;
3815 		goto free;
3816 	}
3817 
3818 free:
3819 	mcx_dmamem_free(sc, &mxm);
3820 
3821 	return (error);
3822 }
3823 
3824 static int
3825 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3826 {
3827 	int32_t npages;
3828 	uint16_t func_id;
3829 
3830 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3831 		/* error printed by mcx_query_pages */
3832 		return (-1);
3833 	}
3834 
3835 	if (npages < 1)
3836 		return (0);
3837 
3838 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3839 		printf(", unable to allocate hwmem\n");
3840 		return (-1);
3841 	}
3842 
3843 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
3844 		printf(", unable to add hwmem\n");
3845 		goto free;
3846 	}
3847 
3848 	return (0);
3849 
3850 free:
3851 	mcx_hwmem_free(sc, mhm);
3852 
3853 	return (-1);
3854 }
3855 
3856 static int
3857 mcx_hca_max_caps(struct mcx_softc *sc)
3858 {
3859 	struct mcx_dmamem mxm;
3860 	struct mcx_cmdq_entry *cqe;
3861 	struct mcx_cmd_query_hca_cap_in *in;
3862 	struct mcx_cmd_query_hca_cap_out *out;
3863 	struct mcx_cmdq_mailbox *mb;
3864 	struct mcx_cap_device *hca;
3865 	uint8_t status;
3866 	uint8_t token = mcx_cmdq_token(sc);
3867 	int error;
3868 
3869 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3870 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3871 	    token);
3872 
3873 	in = mcx_cmdq_in(cqe);
3874 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3875 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3876 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3877 
3878 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3879 	    &cqe->cq_output_ptr, token) != 0) {
3880 		printf(", unable to allocate query hca caps mailboxen\n");
3881 		return (-1);
3882 	}
3883 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3884 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3885 
3886 	mcx_cmdq_post(sc, cqe, 0);
3887 	error = mcx_cmdq_poll(sc, cqe, 1000);
3888 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3889 
3890 	if (error != 0) {
3891 		printf(", query hca caps timeout\n");
3892 		goto free;
3893 	}
3894 	error = mcx_cmdq_verify(cqe);
3895 	if (error != 0) {
3896 		printf(", query hca caps reply corrupt\n");
3897 		goto free;
3898 	}
3899 
3900 	status = cqe->cq_output_data[0];
3901 	if (status != MCX_CQ_STATUS_OK) {
3902 		printf(", query hca caps failed (%x)\n", status);
3903 		error = -1;
3904 		goto free;
3905 	}
3906 
3907 	mb = mcx_cq_mbox(&mxm, 0);
3908 	hca = mcx_cq_mbox_data(mb);
3909 
3910 	if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
3911 	    != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
3912 		printf(", not in ethernet mode\n");
3913 		error = -1;
3914 		goto free;
3915 	}
3916 	if (hca->log_pg_sz > PAGE_SHIFT) {
3917 		printf(", minimum system page shift %u is too large\n",
3918 		    hca->log_pg_sz);
3919 		error = -1;
3920 		goto free;
3921 	}
3922 	/*
3923 	 * blueflame register is split into two buffers, and we must alternate
3924 	 * between the two of them.
3925 	 */
3926 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3927 	sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
3928 
3929 	if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
3930 		sc->sc_mcam_reg = 1;
3931 
3932 	sc->sc_mhz = bemtoh32(&hca->device_frequency_mhz);
3933 	sc->sc_khz = bemtoh32(&hca->device_frequency_khz);
3934 
3935 free:
3936 	mcx_dmamem_free(sc, &mxm);
3937 
3938 	return (error);
3939 }
3940 
3941 static int
3942 mcx_hca_set_caps(struct mcx_softc *sc)
3943 {
3944 	struct mcx_dmamem mxm;
3945 	struct mcx_cmdq_entry *cqe;
3946 	struct mcx_cmd_query_hca_cap_in *in;
3947 	struct mcx_cmd_query_hca_cap_out *out;
3948 	struct mcx_cmdq_mailbox *mb;
3949 	struct mcx_cap_device *hca;
3950 	uint8_t status;
3951 	uint8_t token = mcx_cmdq_token(sc);
3952 	int error;
3953 
3954 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3955 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3956 	    token);
3957 
3958 	in = mcx_cmdq_in(cqe);
3959 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3960 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3961 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3962 
3963 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3964 	    &cqe->cq_output_ptr, token) != 0) {
3965 		printf(", unable to allocate manage pages mailboxen\n");
3966 		return (-1);
3967 	}
3968 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3969 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3970 
3971 	mcx_cmdq_post(sc, cqe, 0);
3972 	error = mcx_cmdq_poll(sc, cqe, 1000);
3973 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3974 
3975 	if (error != 0) {
3976 		printf(", query hca caps timeout\n");
3977 		goto free;
3978 	}
3979 	error = mcx_cmdq_verify(cqe);
3980 	if (error != 0) {
3981 		printf(", query hca caps reply corrupt\n");
3982 		goto free;
3983 	}
3984 
3985 	status = cqe->cq_output_data[0];
3986 	if (status != MCX_CQ_STATUS_OK) {
3987 		printf(", query hca caps failed (%x)\n", status);
3988 		error = -1;
3989 		goto free;
3990 	}
3991 
3992 	mb = mcx_cq_mbox(&mxm, 0);
3993 	hca = mcx_cq_mbox_data(mb);
3994 
3995 	hca->log_pg_sz = PAGE_SHIFT;
3996 
3997 free:
3998 	mcx_dmamem_free(sc, &mxm);
3999 
4000 	return (error);
4001 }
4002 
4003 
4004 static int
4005 mcx_init_hca(struct mcx_softc *sc)
4006 {
4007 	struct mcx_cmdq_entry *cqe;
4008 	struct mcx_cmd_init_hca_in *in;
4009 	struct mcx_cmd_init_hca_out *out;
4010 	int error;
4011 	uint8_t status;
4012 
4013 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4014 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4015 
4016 	in = mcx_cmdq_in(cqe);
4017 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
4018 	in->cmd_op_mod = htobe16(0);
4019 
4020 	mcx_cmdq_post(sc, cqe, 0);
4021 
4022 	error = mcx_cmdq_poll(sc, cqe, 1000);
4023 	if (error != 0) {
4024 		printf(", hca init timeout\n");
4025 		return (-1);
4026 	}
4027 	if (mcx_cmdq_verify(cqe) != 0) {
4028 		printf(", hca init command corrupt\n");
4029 		return (-1);
4030 	}
4031 
4032 	status = cqe->cq_output_data[0];
4033 	if (status != MCX_CQ_STATUS_OK) {
4034 		printf(", hca init failed (%x)\n", status);
4035 		return (-1);
4036 	}
4037 
4038 	return (0);
4039 }
4040 
4041 static int
4042 mcx_set_driver_version(struct mcx_softc *sc)
4043 {
4044 	struct mcx_dmamem mxm;
4045 	struct mcx_cmdq_entry *cqe;
4046 	struct mcx_cmd_set_driver_version_in *in;
4047 	struct mcx_cmd_set_driver_version_out *out;
4048 	int error;
4049 	int token;
4050 	uint8_t status;
4051 
4052 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4053 	token = mcx_cmdq_token(sc);
4054 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
4055 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
4056 
4057 	in = mcx_cmdq_in(cqe);
4058 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
4059 	in->cmd_op_mod = htobe16(0);
4060 
4061 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4062 	    &cqe->cq_input_ptr, token) != 0) {
4063 		printf(", unable to allocate set driver version mailboxen\n");
4064 		return (-1);
4065 	}
4066 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
4067 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
4068 
4069 	mcx_cmdq_mboxes_sign(&mxm, 1);
4070 	mcx_cmdq_post(sc, cqe, 0);
4071 
4072 	error = mcx_cmdq_poll(sc, cqe, 1000);
4073 	if (error != 0) {
4074 		printf(", set driver version timeout\n");
4075 		goto free;
4076 	}
4077 	if (mcx_cmdq_verify(cqe) != 0) {
4078 		printf(", set driver version command corrupt\n");
4079 		goto free;
4080 	}
4081 
4082 	status = cqe->cq_output_data[0];
4083 	if (status != MCX_CQ_STATUS_OK) {
4084 		printf(", set driver version failed (%x)\n", status);
4085 		error = -1;
4086 		goto free;
4087 	}
4088 
4089 free:
4090 	mcx_dmamem_free(sc, &mxm);
4091 
4092 	return (error);
4093 }
4094 
4095 static int
4096 mcx_iff(struct mcx_softc *sc)
4097 {
4098 	struct ifnet *ifp = &sc->sc_ac.ac_if;
4099 	struct mcx_dmamem mxm;
4100 	struct mcx_cmdq_entry *cqe;
4101 	struct mcx_cmd_modify_nic_vport_context_in *in;
4102 	struct mcx_cmd_modify_nic_vport_context_out *out;
4103 	struct mcx_nic_vport_ctx *ctx;
4104 	int error;
4105 	int token;
4106 	int insize;
4107 	uint32_t dest;
4108 
4109 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
4110 	    sc->sc_rss_flow_table_id;
4111 
4112 	/* enable or disable the promisc flow */
4113 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
4114 		if (sc->sc_promisc_flow_enabled == 0) {
4115 			mcx_set_flow_table_entry_mac(sc,
4116 			    MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
4117 			sc->sc_promisc_flow_enabled = 1;
4118 		}
4119 	} else if (sc->sc_promisc_flow_enabled != 0) {
4120 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
4121 		sc->sc_promisc_flow_enabled = 0;
4122 	}
4123 
4124 	/* enable or disable the all-multicast flow */
4125 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
4126 		if (sc->sc_allmulti_flow_enabled == 0) {
4127 			uint8_t mcast[ETHER_ADDR_LEN];
4128 
4129 			memset(mcast, 0, sizeof(mcast));
4130 			mcast[0] = 0x01;
4131 			mcx_set_flow_table_entry_mac(sc,
4132 			    MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
4133 			sc->sc_allmulti_flow_enabled = 1;
4134 		}
4135 	} else if (sc->sc_allmulti_flow_enabled != 0) {
4136 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
4137 		sc->sc_allmulti_flow_enabled = 0;
4138 	}
4139 
4140 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
4141 
4142 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4143 	token = mcx_cmdq_token(sc);
4144 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4145 
4146 	in = mcx_cmdq_in(cqe);
4147 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
4148 	in->cmd_op_mod = htobe16(0);
4149 	in->cmd_field_select = htobe32(
4150 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
4151 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
4152 
4153 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4154 		printf(", unable to allocate modify "
4155 		    "nic vport context mailboxen\n");
4156 		return (-1);
4157 	}
4158 	ctx = (struct mcx_nic_vport_ctx *)
4159 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
4160 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
4161 	/*
4162          * always leave promisc-all enabled on the vport since we
4163          * can't give it a vlan list, and we're already doing multicast
4164          * filtering in the flow table.
4165 	 */
4166 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
4167 
4168 	mcx_cmdq_mboxes_sign(&mxm, 1);
4169 	mcx_cmdq_post(sc, cqe, 0);
4170 
4171 	error = mcx_cmdq_poll(sc, cqe, 1000);
4172 	if (error != 0) {
4173 		printf(", modify nic vport context timeout\n");
4174 		goto free;
4175 	}
4176 	if (mcx_cmdq_verify(cqe) != 0) {
4177 		printf(", modify nic vport context command corrupt\n");
4178 		goto free;
4179 	}
4180 
4181 	out = mcx_cmdq_out(cqe);
4182 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4183 		printf(", modify nic vport context failed (%x, %x)\n",
4184 		    out->cmd_status, betoh32(out->cmd_syndrome));
4185 		error = -1;
4186 		goto free;
4187 	}
4188 
4189 free:
4190 	mcx_dmamem_free(sc, &mxm);
4191 
4192 	return (error);
4193 }
4194 
4195 static int
4196 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
4197 {
4198 	struct mcx_cmdq_entry *cqe;
4199 	struct mcx_cmd_alloc_uar_in *in;
4200 	struct mcx_cmd_alloc_uar_out *out;
4201 	int error;
4202 
4203 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4204 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4205 
4206 	in = mcx_cmdq_in(cqe);
4207 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
4208 	in->cmd_op_mod = htobe16(0);
4209 
4210 	mcx_cmdq_post(sc, cqe, 0);
4211 
4212 	error = mcx_cmdq_poll(sc, cqe, 1000);
4213 	if (error != 0) {
4214 		printf(", alloc uar timeout\n");
4215 		return (-1);
4216 	}
4217 	if (mcx_cmdq_verify(cqe) != 0) {
4218 		printf(", alloc uar command corrupt\n");
4219 		return (-1);
4220 	}
4221 
4222 	out = mcx_cmdq_out(cqe);
4223 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4224 		printf(", alloc uar failed (%x)\n", out->cmd_status);
4225 		return (-1);
4226 	}
4227 
4228 	*uar = mcx_get_id(out->cmd_uar);
4229 	return (0);
4230 }
4231 
4232 static int
4233 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
4234     uint64_t events, int vector)
4235 {
4236 	struct mcx_cmdq_entry *cqe;
4237 	struct mcx_dmamem mxm;
4238 	struct mcx_cmd_create_eq_in *in;
4239 	struct mcx_cmd_create_eq_mb_in *mbin;
4240 	struct mcx_cmd_create_eq_out *out;
4241 	struct mcx_eq_entry *eqe;
4242 	int error;
4243 	uint64_t *pas;
4244 	int insize, npages, paslen, i, token;
4245 
4246 	eq->eq_cons = 0;
4247 
4248 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
4249 	    MCX_PAGE_SIZE);
4250 	paslen = npages * sizeof(*pas);
4251 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
4252 
4253 	if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
4254 	    MCX_PAGE_SIZE) != 0) {
4255 		printf(", unable to allocate event queue memory\n");
4256 		return (-1);
4257 	}
4258 
4259 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
4260 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
4261 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
4262 	}
4263 
4264 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4265 	token = mcx_cmdq_token(sc);
4266 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4267 
4268 	in = mcx_cmdq_in(cqe);
4269 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
4270 	in->cmd_op_mod = htobe16(0);
4271 
4272 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4273 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4274 	    &cqe->cq_input_ptr, token) != 0) {
4275 		printf(", unable to allocate create eq mailboxen\n");
4276 		goto free_eq;
4277 	}
4278 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4279 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
4280 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
4281 	mbin->cmd_eq_ctx.eq_intr = vector;
4282 	mbin->cmd_event_bitmask = htobe64(events);
4283 
4284 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4285 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
4286 
4287 	/* physical addresses follow the mailbox in data */
4288 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
4289 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
4290 	mcx_cmdq_post(sc, cqe, 0);
4291 
4292 	error = mcx_cmdq_poll(sc, cqe, 1000);
4293 	if (error != 0) {
4294 		printf(", create eq timeout\n");
4295 		goto free_mxm;
4296 	}
4297 	if (mcx_cmdq_verify(cqe) != 0) {
4298 		printf(", create eq command corrupt\n");
4299 		goto free_mxm;
4300 	}
4301 
4302 	out = mcx_cmdq_out(cqe);
4303 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4304 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
4305 		    betoh32(out->cmd_syndrome));
4306 		goto free_mxm;
4307 	}
4308 
4309 	eq->eq_n = mcx_get_id(out->cmd_eqn);
4310 
4311 	mcx_dmamem_free(sc, &mxm);
4312 
4313 	mcx_arm_eq(sc, eq, uar);
4314 
4315 	return (0);
4316 
4317 free_mxm:
4318 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4319 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
4320 	mcx_dmamem_free(sc, &mxm);
4321 free_eq:
4322 	mcx_dmamem_free(sc, &eq->eq_mem);
4323 	return (-1);
4324 }
4325 
4326 static int
4327 mcx_alloc_pd(struct mcx_softc *sc)
4328 {
4329 	struct mcx_cmdq_entry *cqe;
4330 	struct mcx_cmd_alloc_pd_in *in;
4331 	struct mcx_cmd_alloc_pd_out *out;
4332 	int error;
4333 
4334 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4335 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4336 
4337 	in = mcx_cmdq_in(cqe);
4338 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
4339 	in->cmd_op_mod = htobe16(0);
4340 
4341 	mcx_cmdq_post(sc, cqe, 0);
4342 
4343 	error = mcx_cmdq_poll(sc, cqe, 1000);
4344 	if (error != 0) {
4345 		printf(", alloc pd timeout\n");
4346 		return (-1);
4347 	}
4348 	if (mcx_cmdq_verify(cqe) != 0) {
4349 		printf(", alloc pd command corrupt\n");
4350 		return (-1);
4351 	}
4352 
4353 	out = mcx_cmdq_out(cqe);
4354 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4355 		printf(", alloc pd failed (%x)\n", out->cmd_status);
4356 		return (-1);
4357 	}
4358 
4359 	sc->sc_pd = mcx_get_id(out->cmd_pd);
4360 	return (0);
4361 }
4362 
4363 static int
4364 mcx_alloc_tdomain(struct mcx_softc *sc)
4365 {
4366 	struct mcx_cmdq_entry *cqe;
4367 	struct mcx_cmd_alloc_td_in *in;
4368 	struct mcx_cmd_alloc_td_out *out;
4369 	int error;
4370 
4371 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4372 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4373 
4374 	in = mcx_cmdq_in(cqe);
4375 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
4376 	in->cmd_op_mod = htobe16(0);
4377 
4378 	mcx_cmdq_post(sc, cqe, 0);
4379 
4380 	error = mcx_cmdq_poll(sc, cqe, 1000);
4381 	if (error != 0) {
4382 		printf(", alloc transport domain timeout\n");
4383 		return (-1);
4384 	}
4385 	if (mcx_cmdq_verify(cqe) != 0) {
4386 		printf(", alloc transport domain command corrupt\n");
4387 		return (-1);
4388 	}
4389 
4390 	out = mcx_cmdq_out(cqe);
4391 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4392 		printf(", alloc transport domain failed (%x)\n",
4393 		    out->cmd_status);
4394 		return (-1);
4395 	}
4396 
4397 	sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
4398 	return (0);
4399 }
4400 
4401 static int
4402 mcx_query_nic_vport_context(struct mcx_softc *sc)
4403 {
4404 	struct mcx_dmamem mxm;
4405 	struct mcx_cmdq_entry *cqe;
4406 	struct mcx_cmd_query_nic_vport_context_in *in;
4407 	struct mcx_cmd_query_nic_vport_context_out *out;
4408 	struct mcx_nic_vport_ctx *ctx;
4409 	uint8_t *addr;
4410 	int error, token, i;
4411 
4412 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4413 	token = mcx_cmdq_token(sc);
4414 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
4415 
4416 	in = mcx_cmdq_in(cqe);
4417 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
4418 	in->cmd_op_mod = htobe16(0);
4419 	in->cmd_allowed_list_type = 0;
4420 
4421 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4422 	    &cqe->cq_output_ptr, token) != 0) {
4423 		printf(", unable to allocate "
4424 		    "query nic vport context mailboxen\n");
4425 		return (-1);
4426 	}
4427 	mcx_cmdq_mboxes_sign(&mxm, 1);
4428 	mcx_cmdq_post(sc, cqe, 0);
4429 
4430 	error = mcx_cmdq_poll(sc, cqe, 1000);
4431 	if (error != 0) {
4432 		printf(", query nic vport context timeout\n");
4433 		goto free;
4434 	}
4435 	if (mcx_cmdq_verify(cqe) != 0) {
4436 		printf(", query nic vport context command corrupt\n");
4437 		goto free;
4438 	}
4439 
4440 	out = mcx_cmdq_out(cqe);
4441 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4442 		printf(", query nic vport context failed (%x, %x)\n",
4443 		    out->cmd_status, betoh32(out->cmd_syndrome));
4444 		error = -1;
4445 		goto free;
4446 	}
4447 
4448 	ctx = (struct mcx_nic_vport_ctx *)
4449 	    mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4450 	addr = (uint8_t *)&ctx->vp_perm_addr;
4451 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
4452 		sc->sc_ac.ac_enaddr[i] = addr[i + 2];
4453 	}
4454 free:
4455 	mcx_dmamem_free(sc, &mxm);
4456 
4457 	return (error);
4458 }
4459 
4460 static int
4461 mcx_query_special_contexts(struct mcx_softc *sc)
4462 {
4463 	struct mcx_cmdq_entry *cqe;
4464 	struct mcx_cmd_query_special_ctx_in *in;
4465 	struct mcx_cmd_query_special_ctx_out *out;
4466 	int error;
4467 
4468 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4469 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4470 
4471 	in = mcx_cmdq_in(cqe);
4472 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
4473 	in->cmd_op_mod = htobe16(0);
4474 
4475 	mcx_cmdq_post(sc, cqe, 0);
4476 
4477 	error = mcx_cmdq_poll(sc, cqe, 1000);
4478 	if (error != 0) {
4479 		printf(", query special contexts timeout\n");
4480 		return (-1);
4481 	}
4482 	if (mcx_cmdq_verify(cqe) != 0) {
4483 		printf(", query special contexts command corrupt\n");
4484 		return (-1);
4485 	}
4486 
4487 	out = mcx_cmdq_out(cqe);
4488 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4489 		printf(", query special contexts failed (%x)\n",
4490 		    out->cmd_status);
4491 		return (-1);
4492 	}
4493 
4494 	sc->sc_lkey = betoh32(out->cmd_resd_lkey);
4495 	return (0);
4496 }
4497 
4498 static int
4499 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
4500 {
4501 	struct mcx_reg_pmtu pmtu;
4502 	int error;
4503 
4504 	/* read max mtu */
4505 	memset(&pmtu, 0, sizeof(pmtu));
4506 	pmtu.rp_local_port = 1;
4507 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
4508 	    sizeof(pmtu));
4509 	if (error != 0) {
4510 		printf(", unable to get port MTU\n");
4511 		return error;
4512 	}
4513 
4514 	mtu = min(mtu, betoh16(pmtu.rp_max_mtu));
4515 	pmtu.rp_admin_mtu = htobe16(mtu);
4516 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
4517 	    sizeof(pmtu));
4518 	if (error != 0) {
4519 		printf(", unable to set port MTU\n");
4520 		return error;
4521 	}
4522 
4523 	sc->sc_hardmtu = mtu;
4524 	sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
4525 	return 0;
4526 }
4527 
4528 static int
4529 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
4530 {
4531 	struct mcx_cmdq_entry *cmde;
4532 	struct mcx_cq_entry *cqe;
4533 	struct mcx_dmamem mxm;
4534 	struct mcx_cmd_create_cq_in *in;
4535 	struct mcx_cmd_create_cq_mb_in *mbin;
4536 	struct mcx_cmd_create_cq_out *out;
4537 	int error;
4538 	uint64_t *pas;
4539 	int insize, npages, paslen, i, token;
4540 
4541 	cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
4542 
4543 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
4544 	    MCX_PAGE_SIZE);
4545 	paslen = npages * sizeof(*pas);
4546 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
4547 
4548 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
4549 	    MCX_PAGE_SIZE) != 0) {
4550 		printf("%s: unable to allocate completion queue memory\n",
4551 		    DEVNAME(sc));
4552 		return (-1);
4553 	}
4554 	cqe = MCX_DMA_KVA(&cq->cq_mem);
4555 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4556 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4557 	}
4558 
4559 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4560 	token = mcx_cmdq_token(sc);
4561 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4562 
4563 	in = mcx_cmdq_in(cmde);
4564 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4565 	in->cmd_op_mod = htobe16(0);
4566 
4567 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4568 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4569 	    &cmde->cq_input_ptr, token) != 0) {
4570 		printf("%s: unable to allocate create cq mailboxen\n",
4571 		    DEVNAME(sc));
4572 		goto free_cq;
4573 	}
4574 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4575 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4576 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
4577 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4578 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4579 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4580 	    MCX_CQ_MOD_COUNTER);
4581 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4582 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
4583 
4584 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4585 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
4586 
4587 	/* physical addresses follow the mailbox in data */
4588 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4589 	mcx_cmdq_post(sc, cmde, 0);
4590 
4591 	error = mcx_cmdq_poll(sc, cmde, 1000);
4592 	if (error != 0) {
4593 		printf("%s: create cq timeout\n", DEVNAME(sc));
4594 		goto free_mxm;
4595 	}
4596 	if (mcx_cmdq_verify(cmde) != 0) {
4597 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
4598 		goto free_mxm;
4599 	}
4600 
4601 	out = mcx_cmdq_out(cmde);
4602 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4603 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4604 		    out->cmd_status, betoh32(out->cmd_syndrome));
4605 		goto free_mxm;
4606 	}
4607 
4608 	cq->cq_n = mcx_get_id(out->cmd_cqn);
4609 	cq->cq_cons = 0;
4610 	cq->cq_count = 0;
4611 
4612 	mcx_dmamem_free(sc, &mxm);
4613 
4614 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4615 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4616 	    BUS_DMASYNC_PREWRITE);
4617 
4618 	mcx_arm_cq(sc, cq, uar);
4619 
4620 	return (0);
4621 
4622 free_mxm:
4623 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4624 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4625 	mcx_dmamem_free(sc, &mxm);
4626 free_cq:
4627 	mcx_dmamem_free(sc, &cq->cq_mem);
4628 	return (-1);
4629 }
4630 
4631 static int
4632 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
4633 {
4634 	struct mcx_cmdq_entry *cqe;
4635 	struct mcx_cmd_destroy_cq_in *in;
4636 	struct mcx_cmd_destroy_cq_out *out;
4637 	int error;
4638 	int token;
4639 
4640 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4641 	token = mcx_cmdq_token(sc);
4642 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4643 
4644 	in = mcx_cmdq_in(cqe);
4645 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4646 	in->cmd_op_mod = htobe16(0);
4647 	in->cmd_cqn = htobe32(cq->cq_n);
4648 
4649 	mcx_cmdq_post(sc, cqe, 0);
4650 	error = mcx_cmdq_poll(sc, cqe, 1000);
4651 	if (error != 0) {
4652 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
4653 		return error;
4654 	}
4655 	if (mcx_cmdq_verify(cqe) != 0) {
4656 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4657 		return error;
4658 	}
4659 
4660 	out = mcx_cmdq_out(cqe);
4661 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4662 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4663 		    out->cmd_status, betoh32(out->cmd_syndrome));
4664 		return -1;
4665 	}
4666 
4667 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4668 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4669 	    BUS_DMASYNC_POSTWRITE);
4670 
4671 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4672 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4673 	mcx_dmamem_free(sc, &cq->cq_mem);
4674 
4675 	cq->cq_n = 0;
4676 	cq->cq_cons = 0;
4677 	cq->cq_count = 0;
4678 	return 0;
4679 }
4680 
4681 static int
4682 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
4683 {
4684 	struct mcx_cmdq_entry *cqe;
4685 	struct mcx_dmamem mxm;
4686 	struct mcx_cmd_create_rq_in *in;
4687 	struct mcx_cmd_create_rq_out *out;
4688 	struct mcx_rq_ctx *mbin;
4689 	int error;
4690 	uint64_t *pas;
4691 	uint32_t rq_flags;
4692 	int insize, npages, paslen, token;
4693 
4694 	rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
4695 	    (db * MCX_WQ_DOORBELL_STRIDE);
4696 
4697 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4698 	    MCX_PAGE_SIZE);
4699 	paslen = npages * sizeof(*pas);
4700 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4701 
4702 	if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
4703 	    MCX_PAGE_SIZE) != 0) {
4704 		printf("%s: unable to allocate receive queue memory\n",
4705 		    DEVNAME(sc));
4706 		return (-1);
4707 	}
4708 
4709 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4710 	token = mcx_cmdq_token(sc);
4711 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4712 
4713 	in = mcx_cmdq_in(cqe);
4714 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4715 	in->cmd_op_mod = htobe16(0);
4716 
4717 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4718 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4719 	    &cqe->cq_input_ptr, token) != 0) {
4720 		printf("%s: unable to allocate create rq mailboxen\n",
4721 		    DEVNAME(sc));
4722 		goto free_rq;
4723 	}
4724 	mbin = (struct mcx_rq_ctx *)
4725 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4726 	rq_flags = MCX_RQ_CTX_RLKEY;
4727 #if NVLAN == 0
4728 	rq_flags |= MCX_RQ_CTX_VLAN_STRIP_DIS;
4729 #endif
4730 	mbin->rq_flags = htobe32(rq_flags);
4731 	mbin->rq_cqn = htobe32(cqn);
4732 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4733 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4734 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4735 	    rx->rx_doorbell);
4736 	mbin->rq_wq.wq_log_stride = htobe16(4);
4737 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4738 
4739 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4740 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
4741 
4742 	/* physical addresses follow the mailbox in data */
4743 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
4744 	mcx_cmdq_post(sc, cqe, 0);
4745 
4746 	error = mcx_cmdq_poll(sc, cqe, 1000);
4747 	if (error != 0) {
4748 		printf("%s: create rq timeout\n", DEVNAME(sc));
4749 		goto free_mxm;
4750 	}
4751 	if (mcx_cmdq_verify(cqe) != 0) {
4752 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
4753 		goto free_mxm;
4754 	}
4755 
4756 	out = mcx_cmdq_out(cqe);
4757 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4758 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4759 		    out->cmd_status, betoh32(out->cmd_syndrome));
4760 		goto free_mxm;
4761 	}
4762 
4763 	rx->rx_rqn = mcx_get_id(out->cmd_rqn);
4764 
4765 	mcx_dmamem_free(sc, &mxm);
4766 
4767 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4768 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
4769 
4770 	return (0);
4771 
4772 free_mxm:
4773 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4774 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4775 	mcx_dmamem_free(sc, &mxm);
4776 free_rq:
4777 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4778 	return (-1);
4779 }
4780 
4781 static int
4782 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4783 {
4784 	struct mcx_cmdq_entry *cqe;
4785 	struct mcx_dmamem mxm;
4786 	struct mcx_cmd_modify_rq_in *in;
4787 	struct mcx_cmd_modify_rq_mb_in *mbin;
4788 	struct mcx_cmd_modify_rq_out *out;
4789 	int error;
4790 	int token;
4791 
4792 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4793 	token = mcx_cmdq_token(sc);
4794 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4795 	    sizeof(*out), token);
4796 
4797 	in = mcx_cmdq_in(cqe);
4798 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4799 	in->cmd_op_mod = htobe16(0);
4800 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
4801 
4802 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4803 	    &cqe->cq_input_ptr, token) != 0) {
4804 		printf("%s: unable to allocate modify rq mailbox\n",
4805 		    DEVNAME(sc));
4806 		return (-1);
4807 	}
4808 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4809 	mbin->cmd_rq_ctx.rq_flags = htobe32(
4810 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4811 
4812 	mcx_cmdq_mboxes_sign(&mxm, 1);
4813 	mcx_cmdq_post(sc, cqe, 0);
4814 	error = mcx_cmdq_poll(sc, cqe, 1000);
4815 	if (error != 0) {
4816 		printf("%s: modify rq timeout\n", DEVNAME(sc));
4817 		goto free;
4818 	}
4819 	if (mcx_cmdq_verify(cqe) != 0) {
4820 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4821 		goto free;
4822 	}
4823 
4824 	out = mcx_cmdq_out(cqe);
4825 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4826 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4827 		    out->cmd_status, betoh32(out->cmd_syndrome));
4828 		error = -1;
4829 		goto free;
4830 	}
4831 
4832 free:
4833 	mcx_dmamem_free(sc, &mxm);
4834 	return (error);
4835 }
4836 
4837 static int
4838 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4839 {
4840 	struct mcx_cmdq_entry *cqe;
4841 	struct mcx_cmd_destroy_rq_in *in;
4842 	struct mcx_cmd_destroy_rq_out *out;
4843 	int error;
4844 	int token;
4845 
4846 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4847 	token = mcx_cmdq_token(sc);
4848 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4849 
4850 	in = mcx_cmdq_in(cqe);
4851 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4852 	in->cmd_op_mod = htobe16(0);
4853 	in->cmd_rqn = htobe32(rx->rx_rqn);
4854 
4855 	mcx_cmdq_post(sc, cqe, 0);
4856 	error = mcx_cmdq_poll(sc, cqe, 1000);
4857 	if (error != 0) {
4858 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
4859 		return error;
4860 	}
4861 	if (mcx_cmdq_verify(cqe) != 0) {
4862 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4863 		return error;
4864 	}
4865 
4866 	out = mcx_cmdq_out(cqe);
4867 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4868 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4869 		    out->cmd_status, betoh32(out->cmd_syndrome));
4870 		return -1;
4871 	}
4872 
4873 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4874 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
4875 
4876 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4877 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4878 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4879 
4880 	rx->rx_rqn = 0;
4881 	return 0;
4882 }
4883 
4884 static int
4885 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
4886 {
4887 	struct mcx_cmdq_entry *cqe;
4888 	struct mcx_dmamem mxm;
4889 	struct mcx_cmd_create_tir_in *in;
4890 	struct mcx_cmd_create_tir_mb_in *mbin;
4891 	struct mcx_cmd_create_tir_out *out;
4892 	int error;
4893 	int token;
4894 
4895 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4896 	token = mcx_cmdq_token(sc);
4897 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4898 	    sizeof(*out), token);
4899 
4900 	in = mcx_cmdq_in(cqe);
4901 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4902 	in->cmd_op_mod = htobe16(0);
4903 
4904 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4905 	    &cqe->cq_input_ptr, token) != 0) {
4906 		printf("%s: unable to allocate create tir mailbox\n",
4907 		    DEVNAME(sc));
4908 		return (-1);
4909 	}
4910 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4911 	/* leave disp_type = 0, so packets get sent to the inline rqn */
4912 	mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
4913 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4914 
4915 	mcx_cmdq_post(sc, cqe, 0);
4916 	error = mcx_cmdq_poll(sc, cqe, 1000);
4917 	if (error != 0) {
4918 		printf("%s: create tir timeout\n", DEVNAME(sc));
4919 		goto free;
4920 	}
4921 	if (mcx_cmdq_verify(cqe) != 0) {
4922 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
4923 		goto free;
4924 	}
4925 
4926 	out = mcx_cmdq_out(cqe);
4927 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4928 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4929 		    out->cmd_status, betoh32(out->cmd_syndrome));
4930 		error = -1;
4931 		goto free;
4932 	}
4933 
4934 	*tirn = mcx_get_id(out->cmd_tirn);
4935 free:
4936 	mcx_dmamem_free(sc, &mxm);
4937 	return (error);
4938 }
4939 
4940 static int
4941 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
4942     int *tirn)
4943 {
4944 	struct mcx_cmdq_entry *cqe;
4945 	struct mcx_dmamem mxm;
4946 	struct mcx_cmd_create_tir_in *in;
4947 	struct mcx_cmd_create_tir_mb_in *mbin;
4948 	struct mcx_cmd_create_tir_out *out;
4949 	int error;
4950 	int token;
4951 
4952 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4953 	token = mcx_cmdq_token(sc);
4954 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4955 	    sizeof(*out), token);
4956 
4957 	in = mcx_cmdq_in(cqe);
4958 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4959 	in->cmd_op_mod = htobe16(0);
4960 
4961 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4962 	    &cqe->cq_input_ptr, token) != 0) {
4963 		printf("%s: unable to allocate create tir mailbox\n",
4964 		    DEVNAME(sc));
4965 		return (-1);
4966 	}
4967 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4968 	mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
4969 	    << MCX_TIR_CTX_DISP_TYPE_SHIFT);
4970 	mbin->cmd_indir_table = htobe32(rqtn);
4971 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
4972 	    MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
4973 	mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
4974 	stoeplitz_to_key(&mbin->cmd_rx_hash_key,
4975 	    sizeof(mbin->cmd_rx_hash_key));
4976 
4977 	mcx_cmdq_post(sc, cqe, 0);
4978 	error = mcx_cmdq_poll(sc, cqe, 1000);
4979 	if (error != 0) {
4980 		printf("%s: create tir timeout\n", DEVNAME(sc));
4981 		goto free;
4982 	}
4983 	if (mcx_cmdq_verify(cqe) != 0) {
4984 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
4985 		goto free;
4986 	}
4987 
4988 	out = mcx_cmdq_out(cqe);
4989 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4990 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4991 		    out->cmd_status, betoh32(out->cmd_syndrome));
4992 		error = -1;
4993 		goto free;
4994 	}
4995 
4996 	*tirn = mcx_get_id(out->cmd_tirn);
4997 free:
4998 	mcx_dmamem_free(sc, &mxm);
4999 	return (error);
5000 }
5001 
5002 static int
5003 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
5004 {
5005 	struct mcx_cmdq_entry *cqe;
5006 	struct mcx_cmd_destroy_tir_in *in;
5007 	struct mcx_cmd_destroy_tir_out *out;
5008 	int error;
5009 	int token;
5010 
5011 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5012 	token = mcx_cmdq_token(sc);
5013 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5014 
5015 	in = mcx_cmdq_in(cqe);
5016 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
5017 	in->cmd_op_mod = htobe16(0);
5018 	in->cmd_tirn = htobe32(tirn);
5019 
5020 	mcx_cmdq_post(sc, cqe, 0);
5021 	error = mcx_cmdq_poll(sc, cqe, 1000);
5022 	if (error != 0) {
5023 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
5024 		return error;
5025 	}
5026 	if (mcx_cmdq_verify(cqe) != 0) {
5027 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
5028 		return error;
5029 	}
5030 
5031 	out = mcx_cmdq_out(cqe);
5032 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5033 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
5034 		    out->cmd_status, betoh32(out->cmd_syndrome));
5035 		return -1;
5036 	}
5037 
5038 	return (0);
5039 }
5040 
5041 static int
5042 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
5043     int cqn)
5044 {
5045 	struct mcx_cmdq_entry *cqe;
5046 	struct mcx_dmamem mxm;
5047 	struct mcx_cmd_create_sq_in *in;
5048 	struct mcx_sq_ctx *mbin;
5049 	struct mcx_cmd_create_sq_out *out;
5050 	int error;
5051 	uint64_t *pas;
5052 	int insize, npages, paslen, token;
5053 
5054 	tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
5055 	    (db * MCX_WQ_DOORBELL_STRIDE) + 4;
5056 
5057 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
5058 	    MCX_PAGE_SIZE);
5059 	paslen = npages * sizeof(*pas);
5060 	insize = sizeof(struct mcx_sq_ctx) + paslen;
5061 
5062 	if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
5063 	    MCX_PAGE_SIZE) != 0) {
5064 		printf("%s: unable to allocate send queue memory\n",
5065 		    DEVNAME(sc));
5066 		return (-1);
5067 	}
5068 
5069 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5070 	token = mcx_cmdq_token(sc);
5071 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
5072 	    token);
5073 
5074 	in = mcx_cmdq_in(cqe);
5075 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
5076 	in->cmd_op_mod = htobe16(0);
5077 
5078 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
5079 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
5080 	    &cqe->cq_input_ptr, token) != 0) {
5081 		printf("%s: unable to allocate create sq mailboxen\n",
5082 		    DEVNAME(sc));
5083 		goto free_sq;
5084 	}
5085 	mbin = (struct mcx_sq_ctx *)
5086 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
5087 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
5088 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
5089 	mbin->sq_cqn = htobe32(cqn);
5090 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
5091 	mbin->sq_tis_num = htobe32(sc->sc_tis);
5092 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
5093 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
5094 	mbin->sq_wq.wq_uar_page = htobe32(uar);
5095 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
5096 	    tx->tx_doorbell);
5097 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
5098 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
5099 
5100 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5101 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
5102 
5103 	/* physical addresses follow the mailbox in data */
5104 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
5105 	    npages, &tx->tx_sq_mem);
5106 	mcx_cmdq_post(sc, cqe, 0);
5107 
5108 	error = mcx_cmdq_poll(sc, cqe, 1000);
5109 	if (error != 0) {
5110 		printf("%s: create sq timeout\n", DEVNAME(sc));
5111 		goto free_mxm;
5112 	}
5113 	if (mcx_cmdq_verify(cqe) != 0) {
5114 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
5115 		goto free_mxm;
5116 	}
5117 
5118 	out = mcx_cmdq_out(cqe);
5119 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5120 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
5121 		    out->cmd_status, betoh32(out->cmd_syndrome));
5122 		goto free_mxm;
5123 	}
5124 
5125 	tx->tx_uar = uar;
5126 	tx->tx_sqn = mcx_get_id(out->cmd_sqn);
5127 
5128 	mcx_dmamem_free(sc, &mxm);
5129 
5130 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5131 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
5132 
5133 	return (0);
5134 
5135 free_mxm:
5136 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5137 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5138 	mcx_dmamem_free(sc, &mxm);
5139 free_sq:
5140 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5141 	return (-1);
5142 }
5143 
5144 static int
5145 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5146 {
5147 	struct mcx_cmdq_entry *cqe;
5148 	struct mcx_cmd_destroy_sq_in *in;
5149 	struct mcx_cmd_destroy_sq_out *out;
5150 	int error;
5151 	int token;
5152 
5153 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5154 	token = mcx_cmdq_token(sc);
5155 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5156 
5157 	in = mcx_cmdq_in(cqe);
5158 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
5159 	in->cmd_op_mod = htobe16(0);
5160 	in->cmd_sqn = htobe32(tx->tx_sqn);
5161 
5162 	mcx_cmdq_post(sc, cqe, 0);
5163 	error = mcx_cmdq_poll(sc, cqe, 1000);
5164 	if (error != 0) {
5165 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
5166 		return error;
5167 	}
5168 	if (mcx_cmdq_verify(cqe) != 0) {
5169 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
5170 		return error;
5171 	}
5172 
5173 	out = mcx_cmdq_out(cqe);
5174 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5175 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
5176 		    out->cmd_status, betoh32(out->cmd_syndrome));
5177 		return -1;
5178 	}
5179 
5180 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5181 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
5182 
5183 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5184 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5185 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5186 
5187 	tx->tx_sqn = 0;
5188 	return 0;
5189 }
5190 
5191 static int
5192 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5193 {
5194 	struct mcx_cmdq_entry *cqe;
5195 	struct mcx_dmamem mxm;
5196 	struct mcx_cmd_modify_sq_in *in;
5197 	struct mcx_cmd_modify_sq_mb_in *mbin;
5198 	struct mcx_cmd_modify_sq_out *out;
5199 	int error;
5200 	int token;
5201 
5202 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5203 	token = mcx_cmdq_token(sc);
5204 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5205 	    sizeof(*out), token);
5206 
5207 	in = mcx_cmdq_in(cqe);
5208 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
5209 	in->cmd_op_mod = htobe16(0);
5210 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
5211 
5212 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5213 	    &cqe->cq_input_ptr, token) != 0) {
5214 		printf("%s: unable to allocate modify sq mailbox\n",
5215 		    DEVNAME(sc));
5216 		return (-1);
5217 	}
5218 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5219 	mbin->cmd_sq_ctx.sq_flags = htobe32(
5220 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
5221 
5222 	mcx_cmdq_mboxes_sign(&mxm, 1);
5223 	mcx_cmdq_post(sc, cqe, 0);
5224 	error = mcx_cmdq_poll(sc, cqe, 1000);
5225 	if (error != 0) {
5226 		printf("%s: modify sq timeout\n", DEVNAME(sc));
5227 		goto free;
5228 	}
5229 	if (mcx_cmdq_verify(cqe) != 0) {
5230 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
5231 		goto free;
5232 	}
5233 
5234 	out = mcx_cmdq_out(cqe);
5235 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5236 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
5237 		    out->cmd_status, betoh32(out->cmd_syndrome));
5238 		error = -1;
5239 		goto free;
5240 	}
5241 
5242 free:
5243 	mcx_dmamem_free(sc, &mxm);
5244 	return (error);
5245 }
5246 
5247 static int
5248 mcx_create_tis(struct mcx_softc *sc, int *tis)
5249 {
5250 	struct mcx_cmdq_entry *cqe;
5251 	struct mcx_dmamem mxm;
5252 	struct mcx_cmd_create_tis_in *in;
5253 	struct mcx_cmd_create_tis_mb_in *mbin;
5254 	struct mcx_cmd_create_tis_out *out;
5255 	int error;
5256 	int token;
5257 
5258 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5259 	token = mcx_cmdq_token(sc);
5260 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5261 	    sizeof(*out), token);
5262 
5263 	in = mcx_cmdq_in(cqe);
5264 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
5265 	in->cmd_op_mod = htobe16(0);
5266 
5267 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5268 	    &cqe->cq_input_ptr, token) != 0) {
5269 		printf("%s: unable to allocate create tis mailbox\n",
5270 		    DEVNAME(sc));
5271 		return (-1);
5272 	}
5273 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5274 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5275 
5276 	mcx_cmdq_mboxes_sign(&mxm, 1);
5277 	mcx_cmdq_post(sc, cqe, 0);
5278 	error = mcx_cmdq_poll(sc, cqe, 1000);
5279 	if (error != 0) {
5280 		printf("%s: create tis timeout\n", DEVNAME(sc));
5281 		goto free;
5282 	}
5283 	if (mcx_cmdq_verify(cqe) != 0) {
5284 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
5285 		goto free;
5286 	}
5287 
5288 	out = mcx_cmdq_out(cqe);
5289 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5290 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
5291 		    out->cmd_status, betoh32(out->cmd_syndrome));
5292 		error = -1;
5293 		goto free;
5294 	}
5295 
5296 	*tis = mcx_get_id(out->cmd_tisn);
5297 free:
5298 	mcx_dmamem_free(sc, &mxm);
5299 	return (error);
5300 }
5301 
5302 static int
5303 mcx_destroy_tis(struct mcx_softc *sc, int tis)
5304 {
5305 	struct mcx_cmdq_entry *cqe;
5306 	struct mcx_cmd_destroy_tis_in *in;
5307 	struct mcx_cmd_destroy_tis_out *out;
5308 	int error;
5309 	int token;
5310 
5311 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5312 	token = mcx_cmdq_token(sc);
5313 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5314 
5315 	in = mcx_cmdq_in(cqe);
5316 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
5317 	in->cmd_op_mod = htobe16(0);
5318 	in->cmd_tisn = htobe32(tis);
5319 
5320 	mcx_cmdq_post(sc, cqe, 0);
5321 	error = mcx_cmdq_poll(sc, cqe, 1000);
5322 	if (error != 0) {
5323 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
5324 		return error;
5325 	}
5326 	if (mcx_cmdq_verify(cqe) != 0) {
5327 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
5328 		return error;
5329 	}
5330 
5331 	out = mcx_cmdq_out(cqe);
5332 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5333 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
5334 		    out->cmd_status, betoh32(out->cmd_syndrome));
5335 		return -1;
5336 	}
5337 
5338 	return 0;
5339 }
5340 
5341 static int
5342 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
5343 {
5344 	struct mcx_cmdq_entry *cqe;
5345 	struct mcx_dmamem mxm;
5346 	struct mcx_cmd_create_rqt_in *in;
5347 	struct mcx_cmd_create_rqt_mb_in *mbin;
5348 	struct mcx_cmd_create_rqt_out *out;
5349 	struct mcx_rqt_ctx *rqt_ctx;
5350 	int *rqtn;
5351 	int error;
5352 	int token;
5353 	int i;
5354 
5355 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5356 	token = mcx_cmdq_token(sc);
5357 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
5358 	    (size * sizeof(int)), sizeof(*out), token);
5359 
5360 	in = mcx_cmdq_in(cqe);
5361 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
5362 	in->cmd_op_mod = htobe16(0);
5363 
5364 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5365 	    &cqe->cq_input_ptr, token) != 0) {
5366 		printf("%s: unable to allocate create rqt mailbox\n",
5367 		    DEVNAME(sc));
5368 		return (-1);
5369 	}
5370 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5371 	rqt_ctx = &mbin->cmd_rqt;
5372 	rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
5373 	rqt_ctx->cmd_rqt_actual_size = htobe16(size);
5374 
5375 	/* rqt list follows the rqt context */
5376 	rqtn = (int *)(rqt_ctx + 1);
5377 	for (i = 0; i < size; i++) {
5378 		rqtn[i] = htobe32(rqns[i]);
5379 	}
5380 
5381 	mcx_cmdq_mboxes_sign(&mxm, 1);
5382 	mcx_cmdq_post(sc, cqe, 0);
5383 	error = mcx_cmdq_poll(sc, cqe, 1000);
5384 	if (error != 0) {
5385 		printf("%s: create rqt timeout\n", DEVNAME(sc));
5386 		goto free;
5387 	}
5388 	if (mcx_cmdq_verify(cqe) != 0) {
5389 		printf("%s: create rqt command corrupt\n", DEVNAME(sc));
5390 		goto free;
5391 	}
5392 
5393 	out = mcx_cmdq_out(cqe);
5394 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5395 		printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
5396 		    out->cmd_status, betoh32(out->cmd_syndrome));
5397 		error = -1;
5398 		goto free;
5399 	}
5400 
5401 	*rqt = mcx_get_id(out->cmd_rqtn);
5402 	return (0);
5403 free:
5404 	mcx_dmamem_free(sc, &mxm);
5405 	return (error);
5406 }
5407 
5408 static int
5409 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
5410 {
5411 	struct mcx_cmdq_entry *cqe;
5412 	struct mcx_cmd_destroy_rqt_in *in;
5413 	struct mcx_cmd_destroy_rqt_out *out;
5414 	int error;
5415 	int token;
5416 
5417 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5418 	token = mcx_cmdq_token(sc);
5419 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5420 
5421 	in = mcx_cmdq_in(cqe);
5422 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
5423 	in->cmd_op_mod = htobe16(0);
5424 	in->cmd_rqtn = htobe32(rqt);
5425 
5426 	mcx_cmdq_post(sc, cqe, 0);
5427 	error = mcx_cmdq_poll(sc, cqe, 1000);
5428 	if (error != 0) {
5429 		printf("%s: destroy rqt timeout\n", DEVNAME(sc));
5430 		return error;
5431 	}
5432 	if (mcx_cmdq_verify(cqe) != 0) {
5433 		printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
5434 		return error;
5435 	}
5436 
5437 	out = mcx_cmdq_out(cqe);
5438 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5439 		printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
5440 		    out->cmd_status, betoh32(out->cmd_syndrome));
5441 		return -1;
5442 	}
5443 
5444 	return 0;
5445 }
5446 
5447 #if 0
5448 static int
5449 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
5450 {
5451 	struct mcx_cmdq_entry *cqe;
5452 	struct mcx_cmd_alloc_flow_counter_in *in;
5453 	struct mcx_cmd_alloc_flow_counter_out *out;
5454 	int error;
5455 
5456 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5457 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
5458 
5459 	in = mcx_cmdq_in(cqe);
5460 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
5461 	in->cmd_op_mod = htobe16(0);
5462 
5463 	mcx_cmdq_post(sc, cqe, 0);
5464 
5465 	error = mcx_cmdq_poll(sc, cqe, 1000);
5466 	if (error != 0) {
5467 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
5468 		return (-1);
5469 	}
5470 	if (mcx_cmdq_verify(cqe) != 0) {
5471 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
5472 		return (-1);
5473 	}
5474 
5475 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
5476 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5477 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
5478 		    out->cmd_status);
5479 		return (-1);
5480 	}
5481 
5482 	sc->sc_flow_counter_id[i]  = betoh16(out->cmd_flow_counter_id);
5483 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
5484 
5485 	return (0);
5486 }
5487 #endif
5488 
5489 static int
5490 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
5491     int *flow_table_id)
5492 {
5493 	struct mcx_cmdq_entry *cqe;
5494 	struct mcx_dmamem mxm;
5495 	struct mcx_cmd_create_flow_table_in *in;
5496 	struct mcx_cmd_create_flow_table_mb_in *mbin;
5497 	struct mcx_cmd_create_flow_table_out *out;
5498 	int error;
5499 	int token;
5500 
5501 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5502 	token = mcx_cmdq_token(sc);
5503 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5504 	    sizeof(*out), token);
5505 
5506 	in = mcx_cmdq_in(cqe);
5507 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
5508 	in->cmd_op_mod = htobe16(0);
5509 
5510 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5511 	    &cqe->cq_input_ptr, token) != 0) {
5512 		printf("%s: unable to allocate create flow table mailbox\n",
5513 		    DEVNAME(sc));
5514 		return (-1);
5515 	}
5516 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5517 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5518 	mbin->cmd_ctx.ft_log_size = log_size;
5519 	mbin->cmd_ctx.ft_level = level;
5520 
5521 	mcx_cmdq_mboxes_sign(&mxm, 1);
5522 	mcx_cmdq_post(sc, cqe, 0);
5523 	error = mcx_cmdq_poll(sc, cqe, 1000);
5524 	if (error != 0) {
5525 		printf("%s: create flow table timeout\n", DEVNAME(sc));
5526 		goto free;
5527 	}
5528 	if (mcx_cmdq_verify(cqe) != 0) {
5529 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
5530 		goto free;
5531 	}
5532 
5533 	out = mcx_cmdq_out(cqe);
5534 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5535 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
5536 		    out->cmd_status, betoh32(out->cmd_syndrome));
5537 		error = -1;
5538 		goto free;
5539 	}
5540 
5541 	*flow_table_id = mcx_get_id(out->cmd_table_id);
5542 free:
5543 	mcx_dmamem_free(sc, &mxm);
5544 	return (error);
5545 }
5546 
5547 static int
5548 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
5549 {
5550 	struct mcx_cmdq_entry *cqe;
5551 	struct mcx_dmamem mxm;
5552 	struct mcx_cmd_set_flow_table_root_in *in;
5553 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
5554 	struct mcx_cmd_set_flow_table_root_out *out;
5555 	int error;
5556 	int token;
5557 
5558 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5559 	token = mcx_cmdq_token(sc);
5560 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5561 	    sizeof(*out), token);
5562 
5563 	in = mcx_cmdq_in(cqe);
5564 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
5565 	in->cmd_op_mod = htobe16(0);
5566 
5567 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5568 	    &cqe->cq_input_ptr, token) != 0) {
5569 		printf("%s: unable to allocate set flow table root mailbox\n",
5570 		    DEVNAME(sc));
5571 		return (-1);
5572 	}
5573 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5574 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5575 	mbin->cmd_table_id = htobe32(flow_table_id);
5576 
5577 	mcx_cmdq_mboxes_sign(&mxm, 1);
5578 	mcx_cmdq_post(sc, cqe, 0);
5579 	error = mcx_cmdq_poll(sc, cqe, 1000);
5580 	if (error != 0) {
5581 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
5582 		goto free;
5583 	}
5584 	if (mcx_cmdq_verify(cqe) != 0) {
5585 		printf("%s: set flow table root command corrupt\n",
5586 		    DEVNAME(sc));
5587 		goto free;
5588 	}
5589 
5590 	out = mcx_cmdq_out(cqe);
5591 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5592 		printf("%s: set flow table root failed (%x, %x)\n",
5593 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5594 		error = -1;
5595 		goto free;
5596 	}
5597 
5598 free:
5599 	mcx_dmamem_free(sc, &mxm);
5600 	return (error);
5601 }
5602 
5603 static int
5604 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
5605 {
5606 	struct mcx_cmdq_entry *cqe;
5607 	struct mcx_dmamem mxm;
5608 	struct mcx_cmd_destroy_flow_table_in *in;
5609 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
5610 	struct mcx_cmd_destroy_flow_table_out *out;
5611 	int error;
5612 	int token;
5613 
5614 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5615 	token = mcx_cmdq_token(sc);
5616 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5617 
5618 	in = mcx_cmdq_in(cqe);
5619 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
5620 	in->cmd_op_mod = htobe16(0);
5621 
5622 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5623 	    &cqe->cq_input_ptr, token) != 0) {
5624 		printf("%s: unable to allocate destroy flow table mailbox\n",
5625 		    DEVNAME(sc));
5626 		return (-1);
5627 	}
5628 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5629 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5630 	mb->cmd_table_id = htobe32(flow_table_id);
5631 
5632 	mcx_cmdq_mboxes_sign(&mxm, 1);
5633 	mcx_cmdq_post(sc, cqe, 0);
5634 	error = mcx_cmdq_poll(sc, cqe, 1000);
5635 	if (error != 0) {
5636 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
5637 		goto free;
5638 	}
5639 	if (mcx_cmdq_verify(cqe) != 0) {
5640 		printf("%s: destroy flow table command corrupt\n",
5641 		    DEVNAME(sc));
5642 		goto free;
5643 	}
5644 
5645 	out = mcx_cmdq_out(cqe);
5646 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5647 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
5648 		    out->cmd_status, betoh32(out->cmd_syndrome));
5649 		error = -1;
5650 		goto free;
5651 	}
5652 
5653 free:
5654 	mcx_dmamem_free(sc, &mxm);
5655 	return (error);
5656 }
5657 
5658 
5659 static int
5660 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
5661     int start, int size, int match_enable, struct mcx_flow_match *match)
5662 {
5663 	struct mcx_cmdq_entry *cqe;
5664 	struct mcx_dmamem mxm;
5665 	struct mcx_cmd_create_flow_group_in *in;
5666 	struct mcx_cmd_create_flow_group_mb_in *mbin;
5667 	struct mcx_cmd_create_flow_group_out *out;
5668 	struct mcx_flow_group *mfg;
5669 	int error;
5670 	int token;
5671 
5672 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5673 	token = mcx_cmdq_token(sc);
5674 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5675 	    token);
5676 
5677 	in = mcx_cmdq_in(cqe);
5678 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
5679 	in->cmd_op_mod = htobe16(0);
5680 
5681 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5682 	    != 0) {
5683 		printf("%s: unable to allocate create flow group mailbox\n",
5684 		    DEVNAME(sc));
5685 		return (-1);
5686 	}
5687 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5688 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5689 	mbin->cmd_table_id = htobe32(flow_table_id);
5690 	mbin->cmd_start_flow_index = htobe32(start);
5691 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
5692 
5693 	mbin->cmd_match_criteria_enable = match_enable;
5694 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
5695 
5696 	mcx_cmdq_mboxes_sign(&mxm, 2);
5697 	mcx_cmdq_post(sc, cqe, 0);
5698 	error = mcx_cmdq_poll(sc, cqe, 1000);
5699 	if (error != 0) {
5700 		printf("%s: create flow group timeout\n", DEVNAME(sc));
5701 		goto free;
5702 	}
5703 	if (mcx_cmdq_verify(cqe) != 0) {
5704 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
5705 		goto free;
5706 	}
5707 
5708 	out = mcx_cmdq_out(cqe);
5709 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5710 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
5711 		    out->cmd_status, betoh32(out->cmd_syndrome));
5712 		error = -1;
5713 		goto free;
5714 	}
5715 
5716 	mfg = &sc->sc_flow_group[group];
5717 	mfg->g_id = mcx_get_id(out->cmd_group_id);
5718 	mfg->g_table = flow_table_id;
5719 	mfg->g_start = start;
5720 	mfg->g_size = size;
5721 
5722 free:
5723 	mcx_dmamem_free(sc, &mxm);
5724 	return (error);
5725 }
5726 
5727 static int
5728 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
5729 {
5730 	struct mcx_cmdq_entry *cqe;
5731 	struct mcx_dmamem mxm;
5732 	struct mcx_cmd_destroy_flow_group_in *in;
5733 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
5734 	struct mcx_cmd_destroy_flow_group_out *out;
5735 	struct mcx_flow_group *mfg;
5736 	int error;
5737 	int token;
5738 
5739 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5740 	token = mcx_cmdq_token(sc);
5741 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5742 
5743 	in = mcx_cmdq_in(cqe);
5744 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
5745 	in->cmd_op_mod = htobe16(0);
5746 
5747 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5748 	    &cqe->cq_input_ptr, token) != 0) {
5749 		printf("%s: unable to allocate destroy flow group mailbox\n",
5750 		    DEVNAME(sc));
5751 		return (-1);
5752 	}
5753 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5754 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5755 	mfg = &sc->sc_flow_group[group];
5756 	mb->cmd_table_id = htobe32(mfg->g_table);
5757 	mb->cmd_group_id = htobe32(mfg->g_id);
5758 
5759 	mcx_cmdq_mboxes_sign(&mxm, 2);
5760 	mcx_cmdq_post(sc, cqe, 0);
5761 	error = mcx_cmdq_poll(sc, cqe, 1000);
5762 	if (error != 0) {
5763 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
5764 		goto free;
5765 	}
5766 	if (mcx_cmdq_verify(cqe) != 0) {
5767 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
5768 		goto free;
5769 	}
5770 
5771 	out = mcx_cmdq_out(cqe);
5772 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5773 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
5774 		    out->cmd_status, betoh32(out->cmd_syndrome));
5775 		error = -1;
5776 		goto free;
5777 	}
5778 
5779 	mfg->g_id = -1;
5780 	mfg->g_table = -1;
5781 	mfg->g_size = 0;
5782 	mfg->g_start = 0;
5783 free:
5784 	mcx_dmamem_free(sc, &mxm);
5785 	return (error);
5786 }
5787 
5788 static int
5789 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
5790     uint8_t *macaddr, uint32_t dest)
5791 {
5792 	struct mcx_cmdq_entry *cqe;
5793 	struct mcx_dmamem mxm;
5794 	struct mcx_cmd_set_flow_table_entry_in *in;
5795 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5796 	struct mcx_cmd_set_flow_table_entry_out *out;
5797 	struct mcx_flow_group *mfg;
5798 	uint32_t *pdest;
5799 	int error;
5800 	int token;
5801 
5802 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5803 	token = mcx_cmdq_token(sc);
5804 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5805 	    sizeof(*out), token);
5806 
5807 	in = mcx_cmdq_in(cqe);
5808 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5809 	in->cmd_op_mod = htobe16(0);
5810 
5811 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5812 	    != 0) {
5813 		printf("%s: unable to allocate set flow table entry mailbox\n",
5814 		    DEVNAME(sc));
5815 		return (-1);
5816 	}
5817 
5818 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5819 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5820 
5821 	mfg = &sc->sc_flow_group[group];
5822 	mbin->cmd_table_id = htobe32(mfg->g_table);
5823 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5824 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5825 
5826 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
5827 	pdest = (uint32_t *)
5828 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5829 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5830 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5831 	*pdest = htobe32(dest);
5832 
5833 	/* the only thing we match on at the moment is the dest mac address */
5834 	if (macaddr != NULL) {
5835 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5836 		    ETHER_ADDR_LEN);
5837 	}
5838 
5839 	mcx_cmdq_mboxes_sign(&mxm, 2);
5840 	mcx_cmdq_post(sc, cqe, 0);
5841 	error = mcx_cmdq_poll(sc, cqe, 1000);
5842 	if (error != 0) {
5843 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5844 		goto free;
5845 	}
5846 	if (mcx_cmdq_verify(cqe) != 0) {
5847 		printf("%s: set flow table entry command corrupt\n",
5848 		    DEVNAME(sc));
5849 		goto free;
5850 	}
5851 
5852 	out = mcx_cmdq_out(cqe);
5853 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5854 		printf("%s: set flow table entry failed (%x, %x)\n",
5855 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5856 		error = -1;
5857 		goto free;
5858 	}
5859 
5860 free:
5861 	mcx_dmamem_free(sc, &mxm);
5862 	return (error);
5863 }
5864 
5865 static int
5866 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
5867     int ethertype, int ip_proto, uint32_t dest)
5868 {
5869 	struct mcx_cmdq_entry *cqe;
5870 	struct mcx_dmamem mxm;
5871 	struct mcx_cmd_set_flow_table_entry_in *in;
5872 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5873 	struct mcx_cmd_set_flow_table_entry_out *out;
5874 	struct mcx_flow_group *mfg;
5875 	uint32_t *pdest;
5876 	int error;
5877 	int token;
5878 
5879 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5880 	token = mcx_cmdq_token(sc);
5881 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5882 	    sizeof(*out), token);
5883 
5884 	in = mcx_cmdq_in(cqe);
5885 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5886 	in->cmd_op_mod = htobe16(0);
5887 
5888 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5889 	    != 0) {
5890 		printf("%s: unable to allocate set flow table entry mailbox\n",
5891 		    DEVNAME(sc));
5892 		return (-1);
5893 	}
5894 
5895 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5896 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5897 
5898 	mfg = &sc->sc_flow_group[group];
5899 	mbin->cmd_table_id = htobe32(mfg->g_table);
5900 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5901 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5902 
5903 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
5904 	pdest = (uint32_t *)
5905 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5906 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5907 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5908 	*pdest = htobe32(dest);
5909 
5910 	mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
5911 	mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
5912 
5913 	mcx_cmdq_mboxes_sign(&mxm, 2);
5914 	mcx_cmdq_post(sc, cqe, 0);
5915 	error = mcx_cmdq_poll(sc, cqe, 1000);
5916 	if (error != 0) {
5917 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5918 		goto free;
5919 	}
5920 	if (mcx_cmdq_verify(cqe) != 0) {
5921 		printf("%s: set flow table entry command corrupt\n",
5922 		    DEVNAME(sc));
5923 		goto free;
5924 	}
5925 
5926 	out = mcx_cmdq_out(cqe);
5927 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5928 		printf("%s: set flow table entry failed (%x, %x)\n",
5929 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5930 		error = -1;
5931 		goto free;
5932 	}
5933 
5934 free:
5935 	mcx_dmamem_free(sc, &mxm);
5936 	return (error);
5937 }
5938 
5939 static int
5940 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5941 {
5942 	struct mcx_cmdq_entry *cqe;
5943 	struct mcx_dmamem mxm;
5944 	struct mcx_cmd_delete_flow_table_entry_in *in;
5945 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5946 	struct mcx_cmd_delete_flow_table_entry_out *out;
5947 	struct mcx_flow_group *mfg;
5948 	int error;
5949 	int token;
5950 
5951 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5952 	token = mcx_cmdq_token(sc);
5953 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5954 	    token);
5955 
5956 	in = mcx_cmdq_in(cqe);
5957 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5958 	in->cmd_op_mod = htobe16(0);
5959 
5960 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5961 	    &cqe->cq_input_ptr, token) != 0) {
5962 		printf("%s: unable to allocate "
5963 		    "delete flow table entry mailbox\n", DEVNAME(sc));
5964 		return (-1);
5965 	}
5966 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5967 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5968 
5969 	mfg = &sc->sc_flow_group[group];
5970 	mbin->cmd_table_id = htobe32(mfg->g_table);
5971 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5972 
5973 	mcx_cmdq_mboxes_sign(&mxm, 2);
5974 	mcx_cmdq_post(sc, cqe, 0);
5975 	error = mcx_cmdq_poll(sc, cqe, 1000);
5976 	if (error != 0) {
5977 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5978 		goto free;
5979 	}
5980 	if (mcx_cmdq_verify(cqe) != 0) {
5981 		printf("%s: delete flow table entry command corrupt\n",
5982 		    DEVNAME(sc));
5983 		goto free;
5984 	}
5985 
5986 	out = mcx_cmdq_out(cqe);
5987 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5988 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5989 		    DEVNAME(sc), group, index, out->cmd_status,
5990 		    betoh32(out->cmd_syndrome));
5991 		error = -1;
5992 		goto free;
5993 	}
5994 
5995 free:
5996 	mcx_dmamem_free(sc, &mxm);
5997 	return (error);
5998 }
5999 
6000 #if 0
6001 int
6002 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
6003 {
6004 	struct mcx_dmamem mxm;
6005 	struct mcx_cmdq_entry *cqe;
6006 	struct mcx_cmd_query_flow_table_in *in;
6007 	struct mcx_cmd_query_flow_table_mb_in *mbin;
6008 	struct mcx_cmd_query_flow_table_out *out;
6009 	struct mcx_cmd_query_flow_table_mb_out *mbout;
6010 	uint8_t token = mcx_cmdq_token(sc);
6011 	int error;
6012 	int i;
6013 	uint8_t *dump;
6014 
6015 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6016 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6017 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6018 
6019 	in = mcx_cmdq_in(cqe);
6020 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
6021 	in->cmd_op_mod = htobe16(0);
6022 
6023 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6024 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
6025 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6026 	    &cqe->cq_output_ptr, token) != 0) {
6027 		printf(", unable to allocate query flow table mailboxes\n");
6028 		return (-1);
6029 	}
6030 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6031 
6032 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6033 	mbin->cmd_table_type = 0;
6034 	mbin->cmd_table_id = htobe32(flow_table_id);
6035 
6036 	mcx_cmdq_mboxes_sign(&mxm, 1);
6037 
6038 	mcx_cmdq_post(sc, cqe, 0);
6039 	error = mcx_cmdq_poll(sc, cqe, 1000);
6040 	if (error != 0) {
6041 		printf("%s: query flow table timeout\n", DEVNAME(sc));
6042 		goto free;
6043 	}
6044 	error = mcx_cmdq_verify(cqe);
6045 	if (error != 0) {
6046 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
6047 		goto free;
6048 	}
6049 
6050 	out = mcx_cmdq_out(cqe);
6051 	switch (out->cmd_status) {
6052 	case MCX_CQ_STATUS_OK:
6053 		break;
6054 	default:
6055 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
6056 		    out->cmd_status, betoh32(out->cmd_syndrome));
6057 		error = -1;
6058 		goto free;
6059 	}
6060 
6061         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
6062 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6063 	dump = (uint8_t *)mbout + 8;
6064 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
6065 		printf("%.2x ", dump[i]);
6066 		if (i % 16 == 15)
6067 			printf("\n");
6068 	}
6069 free:
6070 	mcx_cq_mboxes_free(sc, &mxm);
6071 	return (error);
6072 }
6073 int
6074 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
6075 {
6076 	struct mcx_dmamem mxm;
6077 	struct mcx_cmdq_entry *cqe;
6078 	struct mcx_cmd_query_flow_table_entry_in *in;
6079 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
6080 	struct mcx_cmd_query_flow_table_entry_out *out;
6081 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
6082 	uint8_t token = mcx_cmdq_token(sc);
6083 	int error;
6084 	int i;
6085 	uint8_t *dump;
6086 
6087 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6088 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6089 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6090 
6091 	in = mcx_cmdq_in(cqe);
6092 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
6093 	in->cmd_op_mod = htobe16(0);
6094 
6095 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6096 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6097 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6098 	    &cqe->cq_output_ptr, token) != 0) {
6099 		printf(", unable to allocate "
6100 		    "query flow table entry mailboxes\n");
6101 		return (-1);
6102 	}
6103 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6104 
6105 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6106 	mbin->cmd_table_type = 0;
6107 	mbin->cmd_table_id = htobe32(flow_table_id);
6108 	mbin->cmd_flow_index = htobe32(index);
6109 
6110 	mcx_cmdq_mboxes_sign(&mxm, 1);
6111 
6112 	mcx_cmdq_post(sc, cqe, 0);
6113 	error = mcx_cmdq_poll(sc, cqe, 1000);
6114 	if (error != 0) {
6115 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
6116 		goto free;
6117 	}
6118 	error = mcx_cmdq_verify(cqe);
6119 	if (error != 0) {
6120 		printf("%s: query flow table entry reply corrupt\n",
6121 		    DEVNAME(sc));
6122 		goto free;
6123 	}
6124 
6125 	out = mcx_cmdq_out(cqe);
6126 	switch (out->cmd_status) {
6127 	case MCX_CQ_STATUS_OK:
6128 		break;
6129 	default:
6130 		printf("%s: query flow table entry failed (%x/%x)\n",
6131 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6132 		error = -1;
6133 		goto free;
6134 	}
6135 
6136         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
6137 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6138 	dump = (uint8_t *)mbout;
6139 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6140 		printf("%.2x ", dump[i]);
6141 		if (i % 16 == 15)
6142 			printf("\n");
6143 	}
6144 
6145 free:
6146 	mcx_cq_mboxes_free(sc, &mxm);
6147 	return (error);
6148 }
6149 
6150 int
6151 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
6152 {
6153 	struct mcx_dmamem mxm;
6154 	struct mcx_cmdq_entry *cqe;
6155 	struct mcx_cmd_query_flow_group_in *in;
6156 	struct mcx_cmd_query_flow_group_mb_in *mbin;
6157 	struct mcx_cmd_query_flow_group_out *out;
6158 	struct mcx_cmd_query_flow_group_mb_out *mbout;
6159 	uint8_t token = mcx_cmdq_token(sc);
6160 	int error;
6161 	int i;
6162 	uint8_t *dump;
6163 
6164 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6165 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6166 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6167 
6168 	in = mcx_cmdq_in(cqe);
6169 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
6170 	in->cmd_op_mod = htobe16(0);
6171 
6172 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6173 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6174 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6175 	    &cqe->cq_output_ptr, token) != 0) {
6176 		printf(", unable to allocate query flow group mailboxes\n");
6177 		return (-1);
6178 	}
6179 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6180 
6181 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6182 	mbin->cmd_table_type = 0;
6183 	mbin->cmd_table_id = htobe32(flow_table_id);
6184 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
6185 
6186 	mcx_cmdq_mboxes_sign(&mxm, 1);
6187 
6188 	mcx_cmdq_post(sc, cqe, 0);
6189 	error = mcx_cmdq_poll(sc, cqe, 1000);
6190 	if (error != 0) {
6191 		printf("%s: query flow group timeout\n", DEVNAME(sc));
6192 		goto free;
6193 	}
6194 	error = mcx_cmdq_verify(cqe);
6195 	if (error != 0) {
6196 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
6197 		goto free;
6198 	}
6199 
6200 	out = mcx_cmdq_out(cqe);
6201 	switch (out->cmd_status) {
6202 	case MCX_CQ_STATUS_OK:
6203 		break;
6204 	default:
6205 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
6206 		    out->cmd_status, betoh32(out->cmd_syndrome));
6207 		error = -1;
6208 		goto free;
6209 	}
6210 
6211         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
6212 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6213 	dump = (uint8_t *)mbout;
6214 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6215 		printf("%.2x ", dump[i]);
6216 		if (i % 16 == 15)
6217 			printf("\n");
6218 	}
6219 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
6220 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6221 		printf("%.2x ", dump[i]);
6222 		if (i % 16 == 15)
6223 			printf("\n");
6224 	}
6225 
6226 free:
6227 	mcx_cq_mboxes_free(sc, &mxm);
6228 	return (error);
6229 }
6230 
6231 static int
6232 mcx_dump_counters(struct mcx_softc *sc)
6233 {
6234 	struct mcx_dmamem mxm;
6235 	struct mcx_cmdq_entry *cqe;
6236 	struct mcx_cmd_query_vport_counters_in *in;
6237 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
6238 	struct mcx_cmd_query_vport_counters_out *out;
6239 	struct mcx_nic_vport_counters *counters;
6240 	int error, token;
6241 
6242 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6243 	token = mcx_cmdq_token(sc);
6244 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6245 	    sizeof(*out) + sizeof(*counters), token);
6246 
6247 	in = mcx_cmdq_in(cqe);
6248 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
6249 	in->cmd_op_mod = htobe16(0);
6250 
6251 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6252 	    &cqe->cq_output_ptr, token) != 0) {
6253 		printf(", unable to allocate "
6254 		    "query nic vport counters mailboxen\n");
6255 		return (-1);
6256 	}
6257 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6258 
6259 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6260 	mbin->cmd_clear = 0x80;
6261 
6262 	mcx_cmdq_mboxes_sign(&mxm, 1);
6263 	mcx_cmdq_post(sc, cqe, 0);
6264 
6265 	error = mcx_cmdq_poll(sc, cqe, 1000);
6266 	if (error != 0) {
6267 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
6268 		goto free;
6269 	}
6270 	if (mcx_cmdq_verify(cqe) != 0) {
6271 		printf("%s: query nic vport counters command corrupt\n",
6272 		    DEVNAME(sc));
6273 		goto free;
6274 	}
6275 
6276 	out = mcx_cmdq_out(cqe);
6277 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6278 		printf("%s: query nic vport counters failed (%x, %x)\n",
6279 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6280 		error = -1;
6281 		goto free;
6282 	}
6283 
6284 	counters = (struct mcx_nic_vport_counters *)
6285 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6286 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
6287 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
6288 	    counters->rx_err.packets + counters->tx_err.packets)
6289 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
6290 		    DEVNAME(sc),
6291 		    betoh64(counters->tx_err.packets),
6292 		    betoh64(counters->rx_err.packets),
6293 		    betoh64(counters->tx_ucast.packets),
6294 		    betoh64(counters->rx_ucast.packets),
6295 		    betoh64(counters->tx_bcast.packets),
6296 		    betoh64(counters->rx_bcast.packets));
6297 free:
6298 	mcx_dmamem_free(sc, &mxm);
6299 
6300 	return (error);
6301 }
6302 
6303 static int
6304 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
6305 {
6306 	struct mcx_dmamem mxm;
6307 	struct mcx_cmdq_entry *cqe;
6308 	struct mcx_cmd_query_flow_counter_in *in;
6309 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
6310 	struct mcx_cmd_query_flow_counter_out *out;
6311 	struct mcx_counter *counters;
6312 	int error, token;
6313 
6314 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6315 	token = mcx_cmdq_token(sc);
6316 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
6317 	    sizeof(*counters), token);
6318 
6319 	in = mcx_cmdq_in(cqe);
6320 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
6321 	in->cmd_op_mod = htobe16(0);
6322 
6323 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6324 	    &cqe->cq_output_ptr, token) != 0) {
6325 		printf(", unable to allocate query flow counter mailboxen\n");
6326 		return (-1);
6327 	}
6328 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6329 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6330 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
6331 	mbin->cmd_clear = 0x80;
6332 
6333 	mcx_cmdq_mboxes_sign(&mxm, 1);
6334 	mcx_cmdq_post(sc, cqe, 0);
6335 
6336 	error = mcx_cmdq_poll(sc, cqe, 1000);
6337 	if (error != 0) {
6338 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
6339 		goto free;
6340 	}
6341 	if (mcx_cmdq_verify(cqe) != 0) {
6342 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
6343 		goto free;
6344 	}
6345 
6346 	out = mcx_cmdq_out(cqe);
6347 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6348 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
6349 		    out->cmd_status, betoh32(out->cmd_syndrome));
6350 		error = -1;
6351 		goto free;
6352 	}
6353 
6354 	counters = (struct mcx_counter *)
6355 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6356 	if (counters->packets)
6357 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
6358 		    betoh64(counters->packets));
6359 free:
6360 	mcx_dmamem_free(sc, &mxm);
6361 
6362 	return (error);
6363 }
6364 
6365 #endif
6366 
6367 #if NKSTAT > 0
6368 
6369 int
6370 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
6371 {
6372 	struct mcx_dmamem mxm;
6373 	struct mcx_cmdq_entry *cqe;
6374 	struct mcx_cmd_query_rq_in *in;
6375 	struct mcx_cmd_query_rq_out *out;
6376 	struct mcx_cmd_query_rq_mb_out *mbout;
6377 	uint8_t token = mcx_cmdq_token(sc);
6378 	int error;
6379 
6380 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6381 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6382 	    token);
6383 
6384 	in = mcx_cmdq_in(cqe);
6385 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
6386 	in->cmd_op_mod = htobe16(0);
6387 	in->cmd_rqn = htobe32(rx->rx_rqn);
6388 
6389 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6390 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6391 	    &cqe->cq_output_ptr, token) != 0) {
6392 		printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
6393 		return (-1);
6394 	}
6395 
6396 	mcx_cmdq_mboxes_sign(&mxm, 1);
6397 
6398 	mcx_cmdq_post(sc, cqe, 0);
6399 	error = mcx_cmdq_poll(sc, cqe, 1000);
6400 	if (error != 0) {
6401 		printf("%s: query rq timeout\n", DEVNAME(sc));
6402 		goto free;
6403 	}
6404 	error = mcx_cmdq_verify(cqe);
6405 	if (error != 0) {
6406 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
6407 		goto free;
6408 	}
6409 
6410 	out = mcx_cmdq_out(cqe);
6411 	switch (out->cmd_status) {
6412 	case MCX_CQ_STATUS_OK:
6413 		break;
6414 	default:
6415 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
6416 		    out->cmd_status, betoh32(out->cmd_syndrome));
6417 		error = -1;
6418 		goto free;
6419 	}
6420 
6421         mbout = (struct mcx_cmd_query_rq_mb_out *)
6422 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6423 	memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
6424 
6425 free:
6426 	mcx_cq_mboxes_free(sc, &mxm);
6427 	return (error);
6428 }
6429 
6430 int
6431 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
6432 {
6433 	struct mcx_dmamem mxm;
6434 	struct mcx_cmdq_entry *cqe;
6435 	struct mcx_cmd_query_sq_in *in;
6436 	struct mcx_cmd_query_sq_out *out;
6437 	struct mcx_cmd_query_sq_mb_out *mbout;
6438 	uint8_t token = mcx_cmdq_token(sc);
6439 	int error;
6440 
6441 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6442 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6443 	    token);
6444 
6445 	in = mcx_cmdq_in(cqe);
6446 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
6447 	in->cmd_op_mod = htobe16(0);
6448 	in->cmd_sqn = htobe32(tx->tx_sqn);
6449 
6450 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6451 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6452 	    &cqe->cq_output_ptr, token) != 0) {
6453 		printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
6454 		return (-1);
6455 	}
6456 
6457 	mcx_cmdq_mboxes_sign(&mxm, 1);
6458 
6459 	mcx_cmdq_post(sc, cqe, 0);
6460 	error = mcx_cmdq_poll(sc, cqe, 1000);
6461 	if (error != 0) {
6462 		printf("%s: query sq timeout\n", DEVNAME(sc));
6463 		goto free;
6464 	}
6465 	error = mcx_cmdq_verify(cqe);
6466 	if (error != 0) {
6467 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
6468 		goto free;
6469 	}
6470 
6471 	out = mcx_cmdq_out(cqe);
6472 	switch (out->cmd_status) {
6473 	case MCX_CQ_STATUS_OK:
6474 		break;
6475 	default:
6476 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
6477 		    out->cmd_status, betoh32(out->cmd_syndrome));
6478 		error = -1;
6479 		goto free;
6480 	}
6481 
6482         mbout = (struct mcx_cmd_query_sq_mb_out *)
6483 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6484 	memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
6485 
6486 free:
6487 	mcx_cq_mboxes_free(sc, &mxm);
6488 	return (error);
6489 }
6490 
6491 int
6492 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
6493 {
6494 	struct mcx_dmamem mxm;
6495 	struct mcx_cmdq_entry *cqe;
6496 	struct mcx_cmd_query_cq_in *in;
6497 	struct mcx_cmd_query_cq_out *out;
6498 	struct mcx_cq_ctx *ctx;
6499 	uint8_t token = mcx_cmdq_token(sc);
6500 	int error;
6501 
6502 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6503 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6504 	    token);
6505 
6506 	in = mcx_cmdq_in(cqe);
6507 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
6508 	in->cmd_op_mod = htobe16(0);
6509 	in->cmd_cqn = htobe32(cq->cq_n);
6510 
6511 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6512 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6513 	    &cqe->cq_output_ptr, token) != 0) {
6514 		printf("%s: unable to allocate query cq mailboxes\n", DEVNAME(sc));
6515 		return (-1);
6516 	}
6517 
6518 	mcx_cmdq_mboxes_sign(&mxm, 1);
6519 
6520 	mcx_cmdq_post(sc, cqe, 0);
6521 	error = mcx_cmdq_poll(sc, cqe, 1000);
6522 	if (error != 0) {
6523 		printf("%s: query cq timeout\n", DEVNAME(sc));
6524 		goto free;
6525 	}
6526 	error = mcx_cmdq_verify(cqe);
6527 	if (error != 0) {
6528 		printf("%s: query cq reply corrupt\n", DEVNAME(sc));
6529 		goto free;
6530 	}
6531 
6532 	out = mcx_cmdq_out(cqe);
6533 	switch (out->cmd_status) {
6534 	case MCX_CQ_STATUS_OK:
6535 		break;
6536 	default:
6537 		printf("%s: query cq failed (%x/%x)\n", DEVNAME(sc),
6538 		    out->cmd_status, betoh32(out->cmd_syndrome));
6539 		error = -1;
6540 		goto free;
6541 	}
6542 
6543         ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6544 	memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
6545 free:
6546 	mcx_cq_mboxes_free(sc, &mxm);
6547 	return (error);
6548 }
6549 
6550 int
6551 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
6552 {
6553 	struct mcx_dmamem mxm;
6554 	struct mcx_cmdq_entry *cqe;
6555 	struct mcx_cmd_query_eq_in *in;
6556 	struct mcx_cmd_query_eq_out *out;
6557 	struct mcx_eq_ctx *ctx;
6558 	uint8_t token = mcx_cmdq_token(sc);
6559 	int error;
6560 
6561 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6562 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6563 	    token);
6564 
6565 	in = mcx_cmdq_in(cqe);
6566 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
6567 	in->cmd_op_mod = htobe16(0);
6568 	in->cmd_eqn = htobe32(eq->eq_n);
6569 
6570 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6571 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6572 	    &cqe->cq_output_ptr, token) != 0) {
6573 		printf("%s: unable to allocate query eq mailboxes\n", DEVNAME(sc));
6574 		return (-1);
6575 	}
6576 
6577 	mcx_cmdq_mboxes_sign(&mxm, 1);
6578 
6579 	mcx_cmdq_post(sc, cqe, 0);
6580 	error = mcx_cmdq_poll(sc, cqe, 1000);
6581 	if (error != 0) {
6582 		printf("%s: query eq timeout\n", DEVNAME(sc));
6583 		goto free;
6584 	}
6585 	error = mcx_cmdq_verify(cqe);
6586 	if (error != 0) {
6587 		printf("%s: query eq reply corrupt\n", DEVNAME(sc));
6588 		goto free;
6589 	}
6590 
6591 	out = mcx_cmdq_out(cqe);
6592 	switch (out->cmd_status) {
6593 	case MCX_CQ_STATUS_OK:
6594 		break;
6595 	default:
6596 		printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
6597 		    out->cmd_status, betoh32(out->cmd_syndrome));
6598 		error = -1;
6599 		goto free;
6600 	}
6601 
6602         ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6603 	memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
6604 free:
6605 	mcx_cq_mboxes_free(sc, &mxm);
6606 	return (error);
6607 }
6608 
6609 #endif /* NKSTAT > 0 */
6610 
6611 static inline unsigned int
6612 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
6613 {
6614 	struct mcx_rq_entry *ring, *rqe;
6615 	struct mcx_slot *ms;
6616 	struct mbuf *m;
6617 	uint slot, p, fills;
6618 
6619 	ring = MCX_DMA_KVA(&rx->rx_rq_mem);
6620 	p = rx->rx_prod;
6621 
6622 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6623 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
6624 
6625 	for (fills = 0; fills < nslots; fills++) {
6626 		slot = p % (1 << MCX_LOG_RQ_SIZE);
6627 
6628 		ms = &rx->rx_slots[slot];
6629 		rqe = &ring[slot];
6630 
6631 		m = MCLGETL(NULL, M_DONTWAIT, sc->sc_rxbufsz);
6632 		if (m == NULL)
6633 			break;
6634 
6635 		m->m_data += (m->m_ext.ext_size - sc->sc_rxbufsz);
6636 		m->m_data += ETHER_ALIGN;
6637 		m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
6638 
6639 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6640 		    BUS_DMA_NOWAIT) != 0) {
6641 			m_freem(m);
6642 			break;
6643 		}
6644 		ms->ms_m = m;
6645 
6646 		htobem32(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
6647 		htobem64(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
6648 		htobem32(&rqe->rqe_lkey, sc->sc_lkey);
6649 
6650 		p++;
6651 	}
6652 
6653 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6654 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
6655 
6656 	rx->rx_prod = p;
6657 
6658 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6659 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
6660 	htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
6661 	    p & MCX_WQ_DOORBELL_MASK);
6662 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6663 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
6664 
6665 	return (nslots - fills);
6666 }
6667 
6668 int
6669 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
6670 {
6671 	u_int slots;
6672 
6673 	slots = if_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
6674 	if (slots == 0)
6675 		return (1);
6676 
6677 	slots = mcx_rx_fill_slots(sc, rx, slots);
6678 	if_rxr_put(&rx->rx_rxr, slots);
6679 	return (0);
6680 }
6681 
6682 void
6683 mcx_refill(void *xrx)
6684 {
6685 	struct mcx_rx *rx = xrx;
6686 	struct mcx_softc *sc = rx->rx_softc;
6687 
6688 	mcx_rx_fill(sc, rx);
6689 
6690 	if (if_rxr_inuse(&rx->rx_rxr) == 0)
6691 		timeout_add(&rx->rx_refill, 1);
6692 }
6693 
6694 static int
6695 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
6696     struct mcx_cq_entry *cqe)
6697 {
6698 	struct mcx_slot *ms;
6699 	bus_dmamap_t map;
6700 	int slot, slots;
6701 
6702 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
6703 
6704 	ms = &tx->tx_slots[slot];
6705 	map = ms->ms_map;
6706 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6707 	    BUS_DMASYNC_POSTWRITE);
6708 
6709 	slots = 1;
6710 	if (map->dm_nsegs > 1)
6711 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
6712 
6713 	bus_dmamap_unload(sc->sc_dmat, map);
6714 	m_freem(ms->ms_m);
6715 	ms->ms_m = NULL;
6716 
6717 	return (slots);
6718 }
6719 
6720 static uint64_t
6721 mcx_uptime(void)
6722 {
6723 	struct timespec ts;
6724 
6725 	nanouptime(&ts);
6726 
6727 	return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
6728 }
6729 
6730 static void
6731 mcx_calibrate_first(struct mcx_softc *sc)
6732 {
6733 	struct mcx_calibration *c = &sc->sc_calibration[0];
6734 	int s;
6735 
6736 	sc->sc_calibration_gen = 0;
6737 
6738 	s = splhigh(); /* crit_enter? */
6739 	c->c_ubase = mcx_uptime();
6740 	c->c_tbase = mcx_timer(sc);
6741 	splx(s);
6742 	c->c_ratio = 0;
6743 
6744 #ifdef notyet
6745 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_FIRST);
6746 #endif
6747 }
6748 
6749 #define MCX_TIMESTAMP_SHIFT 24
6750 
6751 static void
6752 mcx_calibrate(void *arg)
6753 {
6754 	struct mcx_softc *sc = arg;
6755 	struct mcx_calibration *nc, *pc;
6756 	uint64_t udiff, tdiff;
6757 	unsigned int gen;
6758 	int s;
6759 
6760 	if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING))
6761 		return;
6762 
6763 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL);
6764 
6765 	gen = sc->sc_calibration_gen;
6766 	pc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6767 	gen++;
6768 	nc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6769 
6770 	nc->c_uptime = pc->c_ubase;
6771 	nc->c_timestamp = pc->c_tbase;
6772 
6773 	s = splhigh(); /* crit_enter? */
6774 	nc->c_ubase = mcx_uptime();
6775 	nc->c_tbase = mcx_timer(sc);
6776 	splx(s);
6777 
6778 	udiff = nc->c_ubase - nc->c_uptime;
6779 	tdiff = nc->c_tbase - nc->c_timestamp;
6780 
6781 	/*
6782 	 * udiff is the wall clock time between calibration ticks,
6783 	 * which should be 32 seconds or 32 billion nanoseconds. if
6784 	 * we squint, 1 billion nanoseconds is kind of like a 32 bit
6785 	 * number, so 32 billion should still have a lot of high bits
6786 	 * spare. we use this space by shifting the nanoseconds up
6787 	 * 24 bits so we have a nice big number to divide by the
6788 	 * number of mcx timer ticks.
6789 	 */
6790 	nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
6791 
6792 	membar_producer();
6793 	sc->sc_calibration_gen = gen;
6794 }
6795 
6796 static int
6797 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
6798     struct mcx_cq_entry *cqe, struct mbuf_list *ml,
6799     const struct mcx_calibration *c)
6800 {
6801 	struct mcx_slot *ms;
6802 	struct mbuf *m;
6803 	uint32_t flags;
6804 	int slot;
6805 
6806 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
6807 
6808 	ms = &rx->rx_slots[slot];
6809 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
6810 	    BUS_DMASYNC_POSTREAD);
6811 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
6812 
6813 	m = ms->ms_m;
6814 	ms->ms_m = NULL;
6815 
6816 	m->m_pkthdr.len = m->m_len = bemtoh32(&cqe->cq_byte_cnt);
6817 
6818 	if (cqe->cq_rx_hash_type) {
6819 		m->m_pkthdr.ph_flowid = betoh32(cqe->cq_rx_hash);
6820 		m->m_pkthdr.csum_flags |= M_FLOWID;
6821 	}
6822 
6823 	flags = bemtoh32(&cqe->cq_flags);
6824 	if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK)
6825 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
6826 	if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK)
6827 		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
6828 		    M_UDP_CSUM_IN_OK;
6829 #if NVLAN > 0
6830 	if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
6831 		m->m_pkthdr.ether_vtag = (flags &
6832 		    MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
6833 		m->m_flags |= M_VLANTAG;
6834 	}
6835 #endif
6836 
6837 #ifdef notyet
6838 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_LINK0) && c->c_ratio) {
6839 		uint64_t t = bemtoh64(&cqe->cq_timestamp);
6840 		t -= c->c_timestamp;
6841 		t *= c->c_ratio;
6842 		t >>= MCX_TIMESTAMP_SHIFT;
6843 		t += c->c_uptime;
6844 
6845 		m->m_pkthdr.ph_timestamp = t;
6846 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
6847 	}
6848 #endif
6849 
6850 	ml_enqueue(ml, m);
6851 
6852 	return (1);
6853 }
6854 
6855 static struct mcx_cq_entry *
6856 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
6857 {
6858 	struct mcx_cq_entry *cqe;
6859 	int next;
6860 
6861 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
6862 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
6863 
6864 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
6865 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
6866 		return (&cqe[next]);
6867 	}
6868 
6869 	return (NULL);
6870 }
6871 
6872 static void
6873 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
6874 {
6875 	struct mcx_cq_doorbell *db;
6876 	bus_size_t offset;
6877 	uint32_t val;
6878 	uint64_t uval;
6879 
6880 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
6881 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6882 
6883 	db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
6884 
6885 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6886 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
6887 
6888 	htobem32(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6889 	htobem32(&db->db_arm_ci, val);
6890 
6891 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6892 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
6893 
6894 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
6895 
6896 	uval = (uint64_t)val << 32;
6897 	uval |= cq->cq_n;
6898 
6899 	bus_space_write_raw_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
6900 	mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
6901 }
6902 
6903 void
6904 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
6905 {
6906 	struct mcx_rx *rx = &q->q_rx;
6907 	struct mcx_tx *tx = &q->q_tx;
6908 	const struct mcx_calibration *c;
6909 	unsigned int gen;
6910 	struct mcx_cq_entry *cqe;
6911 	uint8_t *cqp;
6912 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
6913 	int rxfree, txfree;
6914 
6915 	gen = sc->sc_calibration_gen;
6916 	membar_consumer();
6917 	c = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6918 
6919 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
6920 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
6921 
6922 	rxfree = 0;
6923 	txfree = 0;
6924 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
6925 		uint8_t opcode;
6926 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
6927 		switch (opcode) {
6928 		case MCX_CQ_ENTRY_OPCODE_REQ:
6929 			txfree += mcx_process_txeof(sc, tx, cqe);
6930 			break;
6931 		case MCX_CQ_ENTRY_OPCODE_SEND:
6932 			rxfree += mcx_process_rx(sc, rx, cqe, &ml, c);
6933 			break;
6934 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
6935 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
6936 			cqp = (uint8_t *)cqe;
6937 			/* printf("%s: cq completion error: %x\n",
6938 			    DEVNAME(sc), cqp[0x37]); */
6939 			break;
6940 
6941 		default:
6942 			/* printf("%s: cq completion opcode %x??\n",
6943 			    DEVNAME(sc), opcode); */
6944 			break;
6945 		}
6946 
6947 		cq->cq_cons++;
6948 	}
6949 
6950 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
6951 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
6952 
6953 	cq->cq_count++;
6954 	mcx_arm_cq(sc, cq, q->q_uar);
6955 
6956 	if (rxfree > 0) {
6957 		if_rxr_put(&rx->rx_rxr, rxfree);
6958 		if (ifiq_input(rx->rx_ifiq, &ml))
6959 			if_rxr_livelocked(&rx->rx_rxr);
6960 
6961 		mcx_rx_fill(sc, rx);
6962 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
6963 			timeout_add(&rx->rx_refill, 1);
6964 	}
6965 	if (txfree > 0) {
6966 		tx->tx_cons += txfree;
6967 		if (ifq_is_oactive(tx->tx_ifq))
6968 			ifq_restart(tx->tx_ifq);
6969 	}
6970 }
6971 
6972 
6973 static void
6974 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
6975 {
6976 	bus_size_t offset;
6977 	uint32_t val;
6978 
6979 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
6980 	val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
6981 
6982 	mcx_wr(sc, offset, val);
6983 	mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
6984 }
6985 
6986 static struct mcx_eq_entry *
6987 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
6988 {
6989 	struct mcx_eq_entry *eqe;
6990 	int next;
6991 
6992 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
6993 	next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
6994 	if ((eqe[next].eq_owner & 1) ==
6995 	    ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
6996 		eq->eq_cons++;
6997 		return (&eqe[next]);
6998 	}
6999 	return (NULL);
7000 }
7001 
7002 int
7003 mcx_admin_intr(void *xsc)
7004 {
7005 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
7006 	struct mcx_eq *eq = &sc->sc_admin_eq;
7007 	struct mcx_eq_entry *eqe;
7008 
7009 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7010 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7011 
7012 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7013 		switch (eqe->eq_event_type) {
7014 		case MCX_EVENT_TYPE_LAST_WQE:
7015 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
7016 			break;
7017 
7018 		case MCX_EVENT_TYPE_CQ_ERROR:
7019 			/* printf("%s: cq error\n", DEVNAME(sc)); */
7020 			break;
7021 
7022 		case MCX_EVENT_TYPE_CMD_COMPLETION:
7023 			/* wakeup probably */
7024 			break;
7025 
7026 		case MCX_EVENT_TYPE_PORT_CHANGE:
7027 			task_add(systq, &sc->sc_port_change);
7028 			break;
7029 
7030 		default:
7031 			/* printf("%s: something happened\n", DEVNAME(sc)); */
7032 			break;
7033 		}
7034 	}
7035 
7036 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7037 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7038 
7039 	mcx_arm_eq(sc, eq, sc->sc_uar);
7040 
7041 	return (1);
7042 }
7043 
7044 int
7045 mcx_cq_intr(void *xq)
7046 {
7047 	struct mcx_queues *q = (struct mcx_queues *)xq;
7048 	struct mcx_softc *sc = q->q_sc;
7049 	struct mcx_eq *eq = &q->q_eq;
7050 	struct mcx_eq_entry *eqe;
7051 	int cqn;
7052 
7053 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7054 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7055 
7056 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7057 		switch (eqe->eq_event_type) {
7058 		case MCX_EVENT_TYPE_COMPLETION:
7059 			cqn = betoh32(eqe->eq_event_data[6]);
7060 			if (cqn == q->q_cq.cq_n)
7061 				mcx_process_cq(sc, q, &q->q_cq);
7062 			break;
7063 		}
7064 	}
7065 
7066 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7067 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7068 
7069 	mcx_arm_eq(sc, eq, q->q_uar);
7070 
7071 	return (1);
7072 }
7073 
7074 static void
7075 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
7076     int total)
7077 {
7078 	struct mcx_slot *ms;
7079 
7080 	int i = allocated;
7081 	while (i-- > 0) {
7082 		ms = &slots[i];
7083 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
7084 		if (ms->ms_m != NULL)
7085 			m_freem(ms->ms_m);
7086 	}
7087 	free(slots, M_DEVBUF, total * sizeof(*ms));
7088 }
7089 
7090 static int
7091 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
7092 {
7093 	struct mcx_rx *rx;
7094 	struct mcx_tx *tx;
7095 	struct mcx_slot *ms;
7096 	int i;
7097 
7098 	rx = &q->q_rx;
7099 	rx->rx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_RQ_SIZE),
7100 	    M_DEVBUF, M_WAITOK | M_ZERO);
7101 	if (rx->rx_slots == NULL) {
7102 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
7103 		return ENOMEM;
7104 	}
7105 
7106 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
7107 		ms = &rx->rx_slots[i];
7108 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
7109 		    sc->sc_hardmtu, 0,
7110 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
7111 		    &ms->ms_map) != 0) {
7112 			printf("%s: failed to allocate rx dma maps\n",
7113 			    DEVNAME(sc));
7114 			goto destroy_rx_slots;
7115 		}
7116 	}
7117 
7118 	tx = &q->q_tx;
7119 	tx->tx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_SQ_SIZE),
7120 	    M_DEVBUF, M_WAITOK | M_ZERO);
7121 	if (tx->tx_slots == NULL) {
7122 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
7123 		goto destroy_rx_slots;
7124 	}
7125 
7126 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
7127 		ms = &tx->tx_slots[i];
7128 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
7129 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
7130 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
7131 		    &ms->ms_map) != 0) {
7132 			printf("%s: failed to allocate tx dma maps\n",
7133 			    DEVNAME(sc));
7134 			goto destroy_tx_slots;
7135 		}
7136 	}
7137 
7138 	if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
7139 	    q->q_eq.eq_n) != 0)
7140 		goto destroy_tx_slots;
7141 
7142 	if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
7143 	    != 0)
7144 		goto destroy_cq;
7145 
7146 	if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
7147 		goto destroy_sq;
7148 
7149 	return 0;
7150 
7151 destroy_sq:
7152 	mcx_destroy_sq(sc, tx);
7153 destroy_cq:
7154 	mcx_destroy_cq(sc, &q->q_cq);
7155 destroy_tx_slots:
7156 	mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
7157 	tx->tx_slots = NULL;
7158 
7159 	i = (1 << MCX_LOG_RQ_SIZE);
7160 destroy_rx_slots:
7161 	mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
7162 	rx->rx_slots = NULL;
7163 	return ENOMEM;
7164 }
7165 
7166 static int
7167 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
7168 {
7169 	int i;
7170 	int count;
7171 
7172 	count = 0;
7173 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7174 		if (mcx_rss_config[i].flow_group == group)
7175 			count++;
7176 	}
7177 
7178 	return count;
7179 }
7180 
7181 static int
7182 mcx_up(struct mcx_softc *sc)
7183 {
7184 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7185 	struct mcx_rx *rx;
7186 	struct mcx_tx *tx;
7187 	int i, start, count, flow_group, flow_index;
7188 	struct mcx_flow_match match_crit;
7189 	struct mcx_rss_rule *rss;
7190 	uint32_t dest;
7191 	int rqns[MCX_MAX_QUEUES];
7192 
7193 	if (mcx_create_tis(sc, &sc->sc_tis) != 0)
7194 		goto down;
7195 
7196 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7197 		if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
7198 			goto down;
7199 		}
7200 	}
7201 
7202 	/* RSS flow table and flow groups */
7203 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
7204 	    &sc->sc_rss_flow_table_id) != 0)
7205 		goto down;
7206 
7207 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7208 	    sc->sc_rss_flow_table_id;
7209 
7210 	/* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
7211 	memset(&match_crit, 0, sizeof(match_crit));
7212 	match_crit.mc_ethertype = 0xffff;
7213 	match_crit.mc_ip_proto = 0xff;
7214 	match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
7215 	start = 0;
7216 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
7217 	if (count != 0) {
7218 		if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7219 		    MCX_FLOW_GROUP_RSS_L4, start, count,
7220 		    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7221 			goto down;
7222 		start += count;
7223 	}
7224 
7225 	/* L3 RSS flow group (v4/v6, including fragments) */
7226 	memset(&match_crit, 0, sizeof(match_crit));
7227 	match_crit.mc_ethertype = 0xffff;
7228 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
7229 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7230 	    MCX_FLOW_GROUP_RSS_L3, start, count,
7231 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7232 		goto down;
7233 	start += count;
7234 
7235 	/* non-RSS flow group */
7236 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
7237 	memset(&match_crit, 0, sizeof(match_crit));
7238 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7239 	    MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
7240 		goto down;
7241 
7242 	/* Root flow table, matching packets based on mac address */
7243 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
7244 	    &sc->sc_mac_flow_table_id) != 0)
7245 		goto down;
7246 
7247 	/* promisc flow group */
7248 	start = 0;
7249 	memset(&match_crit, 0, sizeof(match_crit));
7250 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7251 	    MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
7252 		goto down;
7253 	sc->sc_promisc_flow_enabled = 0;
7254 	start++;
7255 
7256 	/* all multicast flow group */
7257 	match_crit.mc_dest_mac[0] = 0x01;
7258 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7259 	    MCX_FLOW_GROUP_ALLMULTI, start, 1,
7260 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7261 		goto down;
7262 	sc->sc_allmulti_flow_enabled = 0;
7263 	start++;
7264 
7265 	/* mac address matching flow group */
7266 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
7267 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7268 	    MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
7269 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7270 		goto down;
7271 
7272 	/* flow table entries for unicast and broadcast */
7273 	start = 0;
7274 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7275 	    sc->sc_ac.ac_enaddr, dest) != 0)
7276 		goto down;
7277 	start++;
7278 
7279 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7280 	    etherbroadcastaddr, dest) != 0)
7281 		goto down;
7282 	start++;
7283 
7284 	/* multicast entries go after that */
7285 	sc->sc_mcast_flow_base = start;
7286 
7287 	/* re-add any existing multicast flows */
7288 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7289 		if (sc->sc_mcast_flows[i][0] != 0) {
7290 			mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
7291 			    sc->sc_mcast_flow_base + i,
7292 			    sc->sc_mcast_flows[i], dest);
7293 		}
7294 	}
7295 
7296 	if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
7297 		goto down;
7298 
7299 	/*
7300 	 * the RQT can be any size as long as it's a power of two.
7301 	 * since we also restrict the number of queues to a power of two,
7302 	 * we can just put each rx queue in once.
7303 	 */
7304 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++)
7305 		rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
7306 
7307 	if (mcx_create_rqt(sc, intrmap_count(sc->sc_intrmap), rqns,
7308 	    &sc->sc_rqt) != 0)
7309 		goto down;
7310 
7311 	start = 0;
7312 	flow_index = 0;
7313 	flow_group = -1;
7314 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7315 		rss = &mcx_rss_config[i];
7316 		if (rss->flow_group != flow_group) {
7317 			flow_group = rss->flow_group;
7318 			flow_index = 0;
7319 		}
7320 
7321 		if (rss->hash_sel == 0) {
7322 			if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
7323 			    &sc->sc_tir[i]) != 0)
7324 				goto down;
7325 		} else {
7326 			if (mcx_create_tir_indirect(sc, sc->sc_rqt,
7327 			    rss->hash_sel, &sc->sc_tir[i]) != 0)
7328 				goto down;
7329 		}
7330 
7331 		if (mcx_set_flow_table_entry_proto(sc, flow_group,
7332 		    flow_index, rss->ethertype, rss->ip_proto,
7333 		    MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
7334 			goto down;
7335 		flow_index++;
7336 	}
7337 
7338 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7339 		struct mcx_queues *q = &sc->sc_queues[i];
7340 		rx = &q->q_rx;
7341 		tx = &q->q_tx;
7342 
7343 		/* start the queues */
7344 		if (mcx_ready_sq(sc, tx) != 0)
7345 			goto down;
7346 
7347 		if (mcx_ready_rq(sc, rx) != 0)
7348 			goto down;
7349 
7350 		if_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
7351 		rx->rx_prod = 0;
7352 		mcx_rx_fill(sc, rx);
7353 
7354 		tx->tx_cons = 0;
7355 		tx->tx_prod = 0;
7356 		ifq_clr_oactive(tx->tx_ifq);
7357 	}
7358 
7359 	mcx_calibrate_first(sc);
7360 
7361 	SET(ifp->if_flags, IFF_RUNNING);
7362 
7363 	return ENETRESET;
7364 down:
7365 	mcx_down(sc);
7366 	return ENOMEM;
7367 }
7368 
7369 static void
7370 mcx_down(struct mcx_softc *sc)
7371 {
7372 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7373 	struct mcx_rss_rule *rss;
7374 	int group, i, flow_group, flow_index;
7375 
7376 	CLR(ifp->if_flags, IFF_RUNNING);
7377 
7378 	/*
7379 	 * delete flow table entries first, so no packets can arrive
7380 	 * after the barriers
7381 	 */
7382 	if (sc->sc_promisc_flow_enabled)
7383 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
7384 	if (sc->sc_allmulti_flow_enabled)
7385 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
7386 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
7387 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
7388 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7389 		if (sc->sc_mcast_flows[i][0] != 0) {
7390 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
7391 			    sc->sc_mcast_flow_base + i);
7392 		}
7393 	}
7394 
7395 	flow_group = -1;
7396 	flow_index = 0;
7397 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7398 		rss = &mcx_rss_config[i];
7399 		if (rss->flow_group != flow_group) {
7400 			flow_group = rss->flow_group;
7401 			flow_index = 0;
7402 		}
7403 
7404 		mcx_delete_flow_table_entry(sc, flow_group, flow_index);
7405 
7406 		mcx_destroy_tir(sc, sc->sc_tir[i]);
7407 		sc->sc_tir[i] = 0;
7408 
7409 		flow_index++;
7410 	}
7411 	intr_barrier(sc->sc_ihc);
7412 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7413 		struct ifqueue *ifq = sc->sc_queues[i].q_tx.tx_ifq;
7414 		ifq_barrier(ifq);
7415 
7416 		timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill);
7417 
7418 		intr_barrier(sc->sc_queues[i].q_ihc);
7419 	}
7420 
7421 	timeout_del_barrier(&sc->sc_calibrate);
7422 
7423 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
7424 		if (sc->sc_flow_group[group].g_id != -1)
7425 			mcx_destroy_flow_group(sc, group);
7426 	}
7427 
7428 	if (sc->sc_mac_flow_table_id != -1) {
7429 		mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
7430 		sc->sc_mac_flow_table_id = -1;
7431 	}
7432 	if (sc->sc_rss_flow_table_id != -1) {
7433 		mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
7434 		sc->sc_rss_flow_table_id = -1;
7435 	}
7436 	if (sc->sc_rqt != -1) {
7437 		mcx_destroy_rqt(sc, sc->sc_rqt);
7438 		sc->sc_rqt = -1;
7439 	}
7440 
7441 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7442 		struct mcx_queues *q = &sc->sc_queues[i];
7443 		struct mcx_rx *rx = &q->q_rx;
7444 		struct mcx_tx *tx = &q->q_tx;
7445 		struct mcx_cq *cq = &q->q_cq;
7446 
7447 		if (rx->rx_rqn != 0)
7448 			mcx_destroy_rq(sc, rx);
7449 
7450 		if (tx->tx_sqn != 0)
7451 			mcx_destroy_sq(sc, tx);
7452 
7453 		if (tx->tx_slots != NULL) {
7454 			mcx_free_slots(sc, tx->tx_slots,
7455 			    (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
7456 			tx->tx_slots = NULL;
7457 		}
7458 		if (rx->rx_slots != NULL) {
7459 			mcx_free_slots(sc, rx->rx_slots,
7460 			    (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
7461 			rx->rx_slots = NULL;
7462 		}
7463 
7464 		if (cq->cq_n != 0)
7465 			mcx_destroy_cq(sc, cq);
7466 	}
7467 	if (sc->sc_tis != 0) {
7468 		mcx_destroy_tis(sc, sc->sc_tis);
7469 		sc->sc_tis = 0;
7470 	}
7471 }
7472 
7473 static int
7474 mcx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
7475 {
7476 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7477 	struct ifreq *ifr = (struct ifreq *)data;
7478 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
7479 	int s, i, error = 0;
7480 	uint32_t dest;
7481 
7482 	s = splnet();
7483 	switch (cmd) {
7484 	case SIOCSIFADDR:
7485 		ifp->if_flags |= IFF_UP;
7486 		/* FALLTHROUGH */
7487 
7488 	case SIOCSIFFLAGS:
7489 		if (ISSET(ifp->if_flags, IFF_UP)) {
7490 			if (ISSET(ifp->if_flags, IFF_RUNNING))
7491 				error = ENETRESET;
7492 			else
7493 				error = mcx_up(sc);
7494 		} else {
7495 			if (ISSET(ifp->if_flags, IFF_RUNNING))
7496 				mcx_down(sc);
7497 		}
7498 		break;
7499 
7500 	case SIOCGIFMEDIA:
7501 	case SIOCSIFMEDIA:
7502 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
7503 		break;
7504 
7505 	case SIOCGIFSFFPAGE:
7506 		error = mcx_get_sffpage(ifp, (struct if_sffpage *)data);
7507 		break;
7508 
7509 	case SIOCGIFRXR:
7510 		error = mcx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
7511 		break;
7512 
7513 	case SIOCADDMULTI:
7514 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
7515 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7516 			if (error != 0)
7517 				return (error);
7518 
7519 			dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7520 			    sc->sc_rss_flow_table_id;
7521 
7522 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7523 				if (sc->sc_mcast_flows[i][0] == 0) {
7524 					memcpy(sc->sc_mcast_flows[i], addrlo,
7525 					    ETHER_ADDR_LEN);
7526 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7527 						mcx_set_flow_table_entry_mac(sc,
7528 						    MCX_FLOW_GROUP_MAC,
7529 						    sc->sc_mcast_flow_base + i,
7530 						    sc->sc_mcast_flows[i], dest);
7531 					}
7532 					break;
7533 				}
7534 			}
7535 
7536 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
7537 				if (i == MCX_NUM_MCAST_FLOWS) {
7538 					SET(ifp->if_flags, IFF_ALLMULTI);
7539 					sc->sc_extra_mcast++;
7540 					error = ENETRESET;
7541 				}
7542 
7543 				if (sc->sc_ac.ac_multirangecnt > 0) {
7544 					SET(ifp->if_flags, IFF_ALLMULTI);
7545 					error = ENETRESET;
7546 				}
7547 			}
7548 		}
7549 		break;
7550 
7551 	case SIOCDELMULTI:
7552 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
7553 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7554 			if (error != 0)
7555 				return (error);
7556 
7557 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7558 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
7559 				    ETHER_ADDR_LEN) == 0) {
7560 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7561 						mcx_delete_flow_table_entry(sc,
7562 						    MCX_FLOW_GROUP_MAC,
7563 						    sc->sc_mcast_flow_base + i);
7564 					}
7565 					sc->sc_mcast_flows[i][0] = 0;
7566 					break;
7567 				}
7568 			}
7569 
7570 			if (i == MCX_NUM_MCAST_FLOWS)
7571 				sc->sc_extra_mcast--;
7572 
7573 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
7574 			    (sc->sc_extra_mcast == 0) &&
7575 			    (sc->sc_ac.ac_multirangecnt == 0)) {
7576 				CLR(ifp->if_flags, IFF_ALLMULTI);
7577 				error = ENETRESET;
7578 			}
7579 		}
7580 		break;
7581 
7582 	default:
7583 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
7584 	}
7585 
7586 	if (error == ENETRESET) {
7587 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7588 		    (IFF_UP | IFF_RUNNING))
7589 			mcx_iff(sc);
7590 		error = 0;
7591 	}
7592 	splx(s);
7593 
7594 	return (error);
7595 }
7596 
7597 static int
7598 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
7599 {
7600 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7601 	struct mcx_reg_mcia mcia;
7602 	struct mcx_reg_pmlp pmlp;
7603 	int offset, error;
7604 
7605 	/* get module number */
7606 	memset(&pmlp, 0, sizeof(pmlp));
7607 	pmlp.rp_local_port = 1;
7608 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
7609 	    sizeof(pmlp));
7610 	if (error != 0) {
7611 		printf("%s: unable to get eeprom module number\n",
7612 		    DEVNAME(sc));
7613 		return error;
7614 	}
7615 
7616 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
7617 		memset(&mcia, 0, sizeof(mcia));
7618 		mcia.rm_l = 0;
7619 		mcia.rm_module = betoh32(pmlp.rp_lane0_mapping) &
7620 		    MCX_PMLP_MODULE_NUM_MASK;
7621 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
7622 		mcia.rm_page_num = sff->sff_page;
7623 		mcia.rm_dev_addr = htobe16(offset);
7624 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
7625 
7626 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
7627 		    &mcia, sizeof(mcia));
7628 		if (error != 0) {
7629 			printf("%s: unable to read eeprom at %x\n",
7630 			    DEVNAME(sc), offset);
7631 			return error;
7632 		}
7633 
7634 		memcpy(sff->sff_data + offset, mcia.rm_data,
7635 		    MCX_MCIA_EEPROM_BYTES);
7636 	}
7637 
7638 	return 0;
7639 }
7640 
7641 static int
7642 mcx_rxrinfo(struct mcx_softc *sc, struct if_rxrinfo *ifri)
7643 {
7644 	struct if_rxring_info *ifrs;
7645 	unsigned int i;
7646 	int error;
7647 
7648 	ifrs = mallocarray(intrmap_count(sc->sc_intrmap), sizeof(*ifrs),
7649 	    M_TEMP, M_WAITOK|M_ZERO|M_CANFAIL);
7650 	if (ifrs == NULL)
7651 		return (ENOMEM);
7652 
7653 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7654 		struct mcx_rx *rx = &sc->sc_queues[i].q_rx;
7655 		struct if_rxring_info *ifr = &ifrs[i];
7656 
7657 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
7658 		ifr->ifr_size = sc->sc_hardmtu;
7659 		ifr->ifr_info = rx->rx_rxr;
7660 	}
7661 
7662 	error = if_rxr_info_ioctl(ifri, i, ifrs);
7663 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
7664 
7665 	return (error);
7666 }
7667 
7668 int
7669 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
7670 {
7671 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7672 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
7673 	case 0:
7674 		break;
7675 
7676 	case EFBIG:
7677 		if (m_defrag(m, M_DONTWAIT) == 0 &&
7678 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7679 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
7680 			break;
7681 
7682 	default:
7683 		return (1);
7684 	}
7685 
7686 	ms->ms_m = m;
7687 	return (0);
7688 }
7689 
7690 static void
7691 mcx_start(struct ifqueue *ifq)
7692 {
7693 	struct mcx_tx *tx = ifq->ifq_softc;
7694 	struct ifnet *ifp = ifq->ifq_if;
7695 	struct mcx_softc *sc = ifp->if_softc;
7696 	struct mcx_sq_entry *sq, *sqe;
7697 	struct mcx_sq_entry_seg *sqs;
7698 	struct mcx_slot *ms;
7699 	bus_dmamap_t map;
7700 	struct mbuf *m;
7701 	u_int idx, free, used;
7702 	uint64_t *bf;
7703 	uint32_t csum;
7704 	size_t bf_base;
7705 	int i, seg, nseg;
7706 
7707 	bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
7708 
7709 	idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
7710 	free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
7711 
7712 	used = 0;
7713 	bf = NULL;
7714 
7715 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7716 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
7717 
7718 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
7719 
7720 	for (;;) {
7721 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
7722 			ifq_set_oactive(ifq);
7723 			break;
7724 		}
7725 
7726 		m = ifq_dequeue(ifq);
7727 		if (m == NULL) {
7728 			break;
7729 		}
7730 
7731 		sqe = sq + idx;
7732 		ms = &tx->tx_slots[idx];
7733 		memset(sqe, 0, sizeof(*sqe));
7734 
7735 		/* ctrl segment */
7736 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
7737 		    ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
7738 		/* always generate a completion event */
7739 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
7740 
7741 		/* eth segment */
7742 		csum = 0;
7743 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
7744 			csum |= MCX_SQE_L3_CSUM;
7745 		if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
7746 			csum |= MCX_SQE_L4_CSUM;
7747 		sqe->sqe_mss_csum = htobe32(csum);
7748 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
7749 #if NVLAN > 0
7750 		if (m->m_flags & M_VLANTAG) {
7751 			struct ether_vlan_header *evh;
7752 			evh = (struct ether_vlan_header *)
7753 			    &sqe->sqe_inline_headers;
7754 
7755 			/* slightly cheaper vlan_inject() */
7756 			m_copydata(m, 0, ETHER_HDR_LEN, (caddr_t)evh);
7757 			evh->evl_proto = evh->evl_encap_proto;
7758 			evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
7759 			evh->evl_tag = htons(m->m_pkthdr.ether_vtag);
7760 
7761 			m_adj(m, ETHER_HDR_LEN);
7762 		} else
7763 #endif
7764 		{
7765 			m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
7766 			    (caddr_t)sqe->sqe_inline_headers);
7767 			m_adj(m, MCX_SQ_INLINE_SIZE);
7768 		}
7769 
7770 		if (mcx_load_mbuf(sc, ms, m) != 0) {
7771 			m_freem(m);
7772 			ifp->if_oerrors++;
7773 			continue;
7774 		}
7775 		bf = (uint64_t *)sqe;
7776 
7777 #if NBPFILTER > 0
7778 		if (ifp->if_bpf)
7779 			bpf_mtap_hdr(ifp->if_bpf,
7780 			    (caddr_t)sqe->sqe_inline_headers,
7781 			    MCX_SQ_INLINE_SIZE, m, BPF_DIRECTION_OUT);
7782 #endif
7783 		map = ms->ms_map;
7784 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
7785 		    BUS_DMASYNC_PREWRITE);
7786 
7787 		sqe->sqe_ds_sq_num =
7788 		    htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
7789 		    (map->dm_nsegs + 3));
7790 
7791 		/* data segment - first wqe has one segment */
7792 		sqs = sqe->sqe_segs;
7793 		seg = 0;
7794 		nseg = 1;
7795 		for (i = 0; i < map->dm_nsegs; i++) {
7796 			if (seg == nseg) {
7797 				/* next slot */
7798 				idx++;
7799 				if (idx == (1 << MCX_LOG_SQ_SIZE))
7800 					idx = 0;
7801 				tx->tx_prod++;
7802 				used++;
7803 
7804 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
7805 				seg = 0;
7806 				nseg = MCX_SQ_SEGS_PER_SLOT;
7807 			}
7808 			sqs[seg].sqs_byte_count =
7809 			    htobe32(map->dm_segs[i].ds_len);
7810 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
7811 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
7812 			seg++;
7813 		}
7814 
7815 		idx++;
7816 		if (idx == (1 << MCX_LOG_SQ_SIZE))
7817 			idx = 0;
7818 		tx->tx_prod++;
7819 		used++;
7820 	}
7821 
7822 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7823 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
7824 
7825 	if (used) {
7826 		bus_size_t blueflame;
7827 
7828 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7829 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
7830 		htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
7831 		    tx->tx_prod & MCX_WQ_DOORBELL_MASK);
7832 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7833 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
7834 
7835 		/*
7836 		 * write the first 64 bits of the last sqe we produced
7837 		 * to the blue flame buffer
7838 		 */
7839 
7840 		blueflame = bf_base + tx->tx_bf_offset;
7841 		bus_space_write_raw_8(sc->sc_memt, sc->sc_memh,
7842 		    blueflame, *bf);
7843 		mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
7844 
7845 		/* next write goes to the other buffer */
7846 		tx->tx_bf_offset ^= sc->sc_bf_size;
7847 	}
7848 }
7849 
7850 static void
7851 mcx_watchdog(struct ifnet *ifp)
7852 {
7853 }
7854 
7855 static void
7856 mcx_media_add_types(struct mcx_softc *sc)
7857 {
7858 	struct mcx_reg_ptys ptys;
7859 	int i;
7860 	uint32_t proto_cap;
7861 
7862 	memset(&ptys, 0, sizeof(ptys));
7863 	ptys.rp_local_port = 1;
7864 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7865 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7866 	    sizeof(ptys)) != 0) {
7867 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7868 		return;
7869 	}
7870 
7871 	proto_cap = betoh32(ptys.rp_eth_proto_cap);
7872 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
7873 		const struct mcx_eth_proto_capability *cap;
7874 		if (!ISSET(proto_cap, 1 << i))
7875 			continue;
7876 
7877 		cap = &mcx_eth_cap_map[i];
7878 		if (cap->cap_media == 0)
7879 			continue;
7880 
7881 		ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
7882 	}
7883 }
7884 
7885 static void
7886 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
7887 {
7888 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7889 	struct mcx_reg_ptys ptys;
7890 	int i;
7891 	uint32_t proto_oper;
7892 	uint64_t media_oper;
7893 
7894 	memset(&ptys, 0, sizeof(ptys));
7895 	ptys.rp_local_port = 1;
7896 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7897 
7898 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7899 	    sizeof(ptys)) != 0) {
7900 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7901 		return;
7902 	}
7903 
7904 	proto_oper = betoh32(ptys.rp_eth_proto_oper);
7905 
7906 	media_oper = 0;
7907 
7908 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
7909 		const struct mcx_eth_proto_capability *cap;
7910 		if (!ISSET(proto_oper, 1 << i))
7911 			continue;
7912 
7913 		cap = &mcx_eth_cap_map[i];
7914 
7915 		if (cap->cap_media != 0)
7916 			media_oper = cap->cap_media;
7917 	}
7918 
7919 	ifmr->ifm_status = IFM_AVALID;
7920 	if (proto_oper != 0) {
7921 		ifmr->ifm_status |= IFM_ACTIVE;
7922 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
7923 		/* txpause, rxpause, duplex? */
7924 	}
7925 }
7926 
7927 static int
7928 mcx_media_change(struct ifnet *ifp)
7929 {
7930 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7931 	struct mcx_reg_ptys ptys;
7932 	struct mcx_reg_paos paos;
7933 	uint32_t media;
7934 	int i, error;
7935 
7936 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
7937 		return EINVAL;
7938 
7939 	error = 0;
7940 
7941 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
7942 		/* read ptys to get supported media */
7943 		memset(&ptys, 0, sizeof(ptys));
7944 		ptys.rp_local_port = 1;
7945 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7946 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
7947 		    &ptys, sizeof(ptys)) != 0) {
7948 			printf("%s: unable to read port type/speed\n",
7949 			    DEVNAME(sc));
7950 			return EIO;
7951 		}
7952 
7953 		media = betoh32(ptys.rp_eth_proto_cap);
7954 	} else {
7955 		/* map media type */
7956 		media = 0;
7957 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
7958 			const struct  mcx_eth_proto_capability *cap;
7959 
7960 			cap = &mcx_eth_cap_map[i];
7961 			if (cap->cap_media ==
7962 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
7963 				media = (1 << i);
7964 				break;
7965 			}
7966 		}
7967 	}
7968 
7969 	/* disable the port */
7970 	memset(&paos, 0, sizeof(paos));
7971 	paos.rp_local_port = 1;
7972 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
7973 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
7974 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
7975 	    sizeof(paos)) != 0) {
7976 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
7977 		return EIO;
7978 	}
7979 
7980 	memset(&ptys, 0, sizeof(ptys));
7981 	ptys.rp_local_port = 1;
7982 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7983 	ptys.rp_eth_proto_admin = htobe32(media);
7984 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
7985 	    sizeof(ptys)) != 0) {
7986 		printf("%s: unable to set port media type/speed\n",
7987 		    DEVNAME(sc));
7988 		error = EIO;
7989 	}
7990 
7991 	/* re-enable the port to start negotiation */
7992 	memset(&paos, 0, sizeof(paos));
7993 	paos.rp_local_port = 1;
7994 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
7995 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
7996 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
7997 	    sizeof(paos)) != 0) {
7998 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
7999 		error = EIO;
8000 	}
8001 
8002 	return error;
8003 }
8004 
8005 static void
8006 mcx_port_change(void *xsc)
8007 {
8008 	struct mcx_softc *sc = xsc;
8009 	struct ifnet *ifp = &sc->sc_ac.ac_if;
8010 	struct mcx_reg_ptys ptys = {
8011 		.rp_local_port = 1,
8012 		.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
8013 	};
8014 	int link_state = LINK_STATE_DOWN;
8015 
8016 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8017 	    sizeof(ptys)) == 0) {
8018 		uint32_t proto_oper = betoh32(ptys.rp_eth_proto_oper);
8019 		uint64_t baudrate = 0;
8020 		unsigned int i;
8021 
8022 		if (proto_oper != 0)
8023 			link_state = LINK_STATE_FULL_DUPLEX;
8024 
8025 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
8026 			const struct mcx_eth_proto_capability *cap;
8027 			if (!ISSET(proto_oper, 1 << i))
8028 				continue;
8029 
8030 			cap = &mcx_eth_cap_map[i];
8031 			if (cap->cap_baudrate == 0)
8032 				continue;
8033 
8034 			baudrate = cap->cap_baudrate;
8035 			break;
8036 		}
8037 
8038 		ifp->if_baudrate = baudrate;
8039 	}
8040 
8041 	if (link_state != ifp->if_link_state) {
8042 		ifp->if_link_state = link_state;
8043 		if_link_state_change(ifp);
8044 	}
8045 }
8046 
8047 static inline uint32_t
8048 mcx_rd(struct mcx_softc *sc, bus_size_t r)
8049 {
8050 	uint32_t word;
8051 
8052 	word = bus_space_read_raw_4(sc->sc_memt, sc->sc_memh, r);
8053 
8054 	return (betoh32(word));
8055 }
8056 
8057 static inline void
8058 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
8059 {
8060 	bus_space_write_raw_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
8061 }
8062 
8063 static inline void
8064 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
8065 {
8066 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
8067 }
8068 
8069 static uint64_t
8070 mcx_timer(struct mcx_softc *sc)
8071 {
8072 	uint32_t hi, lo, ni;
8073 
8074 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8075 	for (;;) {
8076 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
8077 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
8078 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8079 
8080 		if (ni == hi)
8081 			break;
8082 
8083 		hi = ni;
8084 	}
8085 
8086 	return (((uint64_t)hi << 32) | (uint64_t)lo);
8087 }
8088 
8089 static int
8090 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
8091     bus_size_t size, u_int align)
8092 {
8093 	mxm->mxm_size = size;
8094 
8095 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
8096 	    mxm->mxm_size, 0,
8097 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
8098 	    &mxm->mxm_map) != 0)
8099 		return (1);
8100 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
8101 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
8102 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
8103 		goto destroy;
8104 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
8105 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
8106 		goto free;
8107 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
8108 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
8109 		goto unmap;
8110 
8111 	return (0);
8112 unmap:
8113 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8114 free:
8115 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8116 destroy:
8117 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8118 	return (1);
8119 }
8120 
8121 static void
8122 mcx_dmamem_zero(struct mcx_dmamem *mxm)
8123 {
8124 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
8125 }
8126 
8127 static void
8128 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
8129 {
8130 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
8131 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8132 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8133 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8134 }
8135 
8136 static int
8137 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
8138 {
8139 	bus_dma_segment_t *segs;
8140 	bus_size_t len = pages * MCX_PAGE_SIZE;
8141 	size_t seglen;
8142 
8143 	segs = mallocarray(sizeof(*segs), pages, M_DEVBUF, M_WAITOK|M_CANFAIL);
8144 	if (segs == NULL)
8145 		return (-1);
8146 
8147 	seglen = sizeof(*segs) * pages;
8148 
8149 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
8150 	    segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
8151 		goto free_segs;
8152 
8153 	if (mhm->mhm_seg_count < pages) {
8154 		size_t nseglen;
8155 
8156 		mhm->mhm_segs = mallocarray(sizeof(*mhm->mhm_segs),
8157 		    mhm->mhm_seg_count, M_DEVBUF, M_WAITOK|M_CANFAIL);
8158 		if (mhm->mhm_segs == NULL)
8159 			goto free_dmamem;
8160 
8161 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
8162 
8163 		memcpy(mhm->mhm_segs, segs, nseglen);
8164 
8165 		free(segs, M_DEVBUF, seglen);
8166 
8167 		segs = mhm->mhm_segs;
8168 		seglen = nseglen;
8169 	} else
8170 		mhm->mhm_segs = segs;
8171 
8172 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
8173 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
8174 	    &mhm->mhm_map) != 0)
8175 		goto free_dmamem;
8176 
8177 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
8178 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
8179 		goto destroy;
8180 
8181 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8182 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
8183 
8184 	mhm->mhm_npages = pages;
8185 
8186 	return (0);
8187 
8188 destroy:
8189 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8190 free_dmamem:
8191 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8192 free_segs:
8193 	free(segs, M_DEVBUF, seglen);
8194 	mhm->mhm_segs = NULL;
8195 
8196 	return (-1);
8197 }
8198 
8199 static void
8200 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
8201 {
8202 	if (mhm->mhm_npages == 0)
8203 		return;
8204 
8205 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8206 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
8207 
8208 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
8209 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8210 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8211 	free(mhm->mhm_segs, M_DEVBUF,
8212 	    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
8213 
8214 	mhm->mhm_npages = 0;
8215 }
8216 
8217 #if NKSTAT > 0
8218 struct mcx_ppcnt {
8219 	char			 name[KSTAT_KV_NAMELEN];
8220 	enum kstat_kv_unit	 unit;
8221 };
8222 
8223 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
8224 	{ "Good Tx",		KSTAT_KV_U_PACKETS, },
8225 	{ "Good Rx",		KSTAT_KV_U_PACKETS, },
8226 	{ "FCS errs",		KSTAT_KV_U_PACKETS, },
8227 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8228 	{ "Good Tx",		KSTAT_KV_U_BYTES, },
8229 	{ "Good Rx",		KSTAT_KV_U_BYTES, },
8230 	{ "Multicast Tx",	KSTAT_KV_U_PACKETS, },
8231 	{ "Broadcast Tx",	KSTAT_KV_U_PACKETS, },
8232 	{ "Multicast Rx",	KSTAT_KV_U_PACKETS, },
8233 	{ "Broadcast Rx",	KSTAT_KV_U_PACKETS, },
8234 	{ "In Range Len",	KSTAT_KV_U_PACKETS, },
8235 	{ "Out Of Range Len",	KSTAT_KV_U_PACKETS, },
8236 	{ "Frame Too Long",	KSTAT_KV_U_PACKETS, },
8237 	{ "Symbol Errs",	KSTAT_KV_U_PACKETS, },
8238 	{ "MAC Ctrl Tx",	KSTAT_KV_U_PACKETS, },
8239 	{ "MAC Ctrl Rx",	KSTAT_KV_U_PACKETS, },
8240 	{ "MAC Ctrl Unsup",	KSTAT_KV_U_PACKETS, },
8241 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8242 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8243 };
8244 CTASSERT(nitems(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
8245 
8246 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
8247 	{ "Rx Bytes",		KSTAT_KV_U_BYTES, },
8248 	{ "Rx Unicast",		KSTAT_KV_U_PACKETS, },
8249 	{ "Rx Discards",	KSTAT_KV_U_PACKETS, },
8250 	{ "Rx Errors",		KSTAT_KV_U_PACKETS, },
8251 	{ "Rx Unknown Proto",	KSTAT_KV_U_PACKETS, },
8252 	{ "Tx Bytes",		KSTAT_KV_U_BYTES, },
8253 	{ "Tx Unicast",		KSTAT_KV_U_PACKETS, },
8254 	{ "Tx Discards",	KSTAT_KV_U_PACKETS, },
8255 	{ "Tx Errors",		KSTAT_KV_U_PACKETS, },
8256 	{ "Rx Multicast",	KSTAT_KV_U_PACKETS, },
8257 	{ "Rx Broadcast",	KSTAT_KV_U_PACKETS, },
8258 	{ "Tx Multicast",	KSTAT_KV_U_PACKETS, },
8259 	{ "Tx Broadcast",	KSTAT_KV_U_PACKETS, },
8260 };
8261 CTASSERT(nitems(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
8262 
8263 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
8264 	{ "Drop Events",	KSTAT_KV_U_PACKETS, },
8265 	{ "Octets",		KSTAT_KV_U_BYTES, },
8266 	{ "Packets",		KSTAT_KV_U_PACKETS, },
8267 	{ "Broadcasts",		KSTAT_KV_U_PACKETS, },
8268 	{ "Multicasts",		KSTAT_KV_U_PACKETS, },
8269 	{ "CRC Align Errs",	KSTAT_KV_U_PACKETS, },
8270 	{ "Undersize",		KSTAT_KV_U_PACKETS, },
8271 	{ "Oversize",		KSTAT_KV_U_PACKETS, },
8272 	{ "Fragments",		KSTAT_KV_U_PACKETS, },
8273 	{ "Jabbers",		KSTAT_KV_U_PACKETS, },
8274 	{ "Collisions",		KSTAT_KV_U_NONE, },
8275 	{ "64B",		KSTAT_KV_U_PACKETS, },
8276 	{ "65-127B",		KSTAT_KV_U_PACKETS, },
8277 	{ "128-255B",		KSTAT_KV_U_PACKETS, },
8278 	{ "256-511B",		KSTAT_KV_U_PACKETS, },
8279 	{ "512-1023B",		KSTAT_KV_U_PACKETS, },
8280 	{ "1024-1518B",		KSTAT_KV_U_PACKETS, },
8281 	{ "1519-2047B",		KSTAT_KV_U_PACKETS, },
8282 	{ "2048-4095B",		KSTAT_KV_U_PACKETS, },
8283 	{ "4096-8191B",		KSTAT_KV_U_PACKETS, },
8284 	{ "8192-10239B",	KSTAT_KV_U_PACKETS, },
8285 };
8286 CTASSERT(nitems(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
8287 
8288 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
8289 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8290 	{ "FCS Errs",		KSTAT_KV_U_PACKETS, },
8291 	{ "Single Colls",	KSTAT_KV_U_PACKETS, },
8292 	{ "Multiple Colls",	KSTAT_KV_U_PACKETS, },
8293 	{ "SQE Test Errs",	KSTAT_KV_U_NONE, },
8294 	{ "Deferred Tx",	KSTAT_KV_U_PACKETS, },
8295 	{ "Late Colls",		KSTAT_KV_U_NONE, },
8296 	{ "Exess Colls",	KSTAT_KV_U_NONE, },
8297 	{ "Int MAC Tx Errs",	KSTAT_KV_U_PACKETS, },
8298 	{ "CSM Sense Errs",	KSTAT_KV_U_NONE, },
8299 	{ "Too Long",		KSTAT_KV_U_PACKETS, },
8300 	{ "Int MAC Rx Errs",	KSTAT_KV_U_PACKETS, },
8301 	{ "Symbol Errs",	KSTAT_KV_U_NONE, },
8302 	{ "Unknown Control",	KSTAT_KV_U_PACKETS, },
8303 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8304 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8305 };
8306 CTASSERT(nitems(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
8307 
8308 struct mcx_kstat_ppcnt {
8309 	const char		*ksp_name;
8310 	const struct mcx_ppcnt	*ksp_tpl;
8311 	unsigned int		 ksp_n;
8312 	uint8_t			 ksp_grp;
8313 };
8314 
8315 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
8316 	.ksp_name =		"ieee802.3",
8317 	.ksp_tpl =		mcx_ppcnt_ieee8023_tpl,
8318 	.ksp_n =		nitems(mcx_ppcnt_ieee8023_tpl),
8319 	.ksp_grp =		MCX_REG_PPCNT_GRP_IEEE8023,
8320 };
8321 
8322 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
8323 	.ksp_name =		"rfc2863",
8324 	.ksp_tpl =		mcx_ppcnt_rfc2863_tpl,
8325 	.ksp_n =		nitems(mcx_ppcnt_rfc2863_tpl),
8326 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2863,
8327 };
8328 
8329 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
8330 	.ksp_name =		"rfc2819",
8331 	.ksp_tpl =		mcx_ppcnt_rfc2819_tpl,
8332 	.ksp_n =		nitems(mcx_ppcnt_rfc2819_tpl),
8333 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2819,
8334 };
8335 
8336 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
8337 	.ksp_name =		"rfc3635",
8338 	.ksp_tpl =		mcx_ppcnt_rfc3635_tpl,
8339 	.ksp_n =		nitems(mcx_ppcnt_rfc3635_tpl),
8340 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC3635,
8341 };
8342 
8343 static int	mcx_kstat_ppcnt_read(struct kstat *);
8344 
8345 static void	mcx_kstat_attach_tmps(struct mcx_softc *sc);
8346 static void	mcx_kstat_attach_queues(struct mcx_softc *sc);
8347 
8348 static struct kstat *
8349 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
8350     const struct mcx_kstat_ppcnt *ksp)
8351 {
8352 	struct kstat *ks;
8353 	struct kstat_kv *kvs;
8354 	unsigned int i;
8355 
8356 	ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
8357 	if (ks == NULL)
8358 		return (NULL);
8359 
8360 	kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
8361 	    M_DEVBUF, M_WAITOK);
8362 
8363 	for (i = 0; i < ksp->ksp_n; i++) {
8364 		const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
8365 
8366 		kstat_kv_unit_init(&kvs[i], tpl->name,
8367 		    KSTAT_KV_T_COUNTER64, tpl->unit);
8368 	}
8369 
8370 	ks->ks_softc = sc;
8371 	ks->ks_ptr = (void *)ksp;
8372 	ks->ks_data = kvs;
8373 	ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
8374 	ks->ks_read = mcx_kstat_ppcnt_read;
8375 
8376 	kstat_install(ks);
8377 
8378 	return (ks);
8379 }
8380 
8381 static void
8382 mcx_kstat_attach(struct mcx_softc *sc)
8383 {
8384 	sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
8385 	    &mcx_kstat_ppcnt_ieee8023);
8386 	sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
8387 	    &mcx_kstat_ppcnt_rfc2863);
8388 	sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
8389 	    &mcx_kstat_ppcnt_rfc2819);
8390 	sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
8391 	    &mcx_kstat_ppcnt_rfc3635);
8392 
8393 	mcx_kstat_attach_tmps(sc);
8394 	mcx_kstat_attach_queues(sc);
8395 }
8396 
8397 static int
8398 mcx_kstat_ppcnt_read(struct kstat *ks)
8399 {
8400 	struct mcx_softc *sc = ks->ks_softc;
8401 	struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
8402 	struct mcx_reg_ppcnt ppcnt = {
8403 		.ppcnt_grp = ksp->ksp_grp,
8404 		.ppcnt_local_port = 1,
8405 	};
8406 	struct kstat_kv *kvs = ks->ks_data;
8407 	uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
8408 	unsigned int i;
8409 	int rv;
8410 
8411 	KERNEL_LOCK(); /* XXX */
8412 	rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
8413 	    &ppcnt, sizeof(ppcnt));
8414 	KERNEL_UNLOCK();
8415 	if (rv != 0)
8416 		return (EIO);
8417 
8418 	nanouptime(&ks->ks_updated);
8419 
8420 	for (i = 0; i < ksp->ksp_n; i++)
8421 		kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
8422 
8423 	return (0);
8424 }
8425 
8426 struct mcx_kstat_mtmp {
8427 	struct kstat_kv		ktmp_name;
8428 	struct kstat_kv		ktmp_temperature;
8429 	struct kstat_kv		ktmp_threshold_lo;
8430 	struct kstat_kv		ktmp_threshold_hi;
8431 };
8432 
8433 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
8434 	KSTAT_KV_INITIALIZER("name",		KSTAT_KV_T_ISTR),
8435 	KSTAT_KV_INITIALIZER("temperature",	KSTAT_KV_T_TEMP),
8436 	KSTAT_KV_INITIALIZER("lo threshold",	KSTAT_KV_T_TEMP),
8437 	KSTAT_KV_INITIALIZER("hi threshold",	KSTAT_KV_T_TEMP),
8438 };
8439 
8440 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
8441 
8442 static int mcx_kstat_mtmp_read(struct kstat *);
8443 
8444 static void
8445 mcx_kstat_attach_tmps(struct mcx_softc *sc)
8446 {
8447 	struct kstat *ks;
8448 	struct mcx_reg_mcam mcam;
8449 	struct mcx_reg_mtcap mtcap;
8450 	struct mcx_kstat_mtmp *ktmp;
8451 	uint64_t map;
8452 	unsigned int i, n;
8453 
8454 	memset(&mtcap, 0, sizeof(mtcap));
8455 	memset(&mcam, 0, sizeof(mcam));
8456 
8457 	if (sc->sc_mcam_reg == 0) {
8458 		/* no management capabilities */
8459 		return;
8460 	}
8461 
8462 	if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
8463 	    &mcam, sizeof(mcam)) != 0) {
8464 		/* unable to check management capabilities? */
8465 		return;
8466 	}
8467 
8468 	if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
8469 	    MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
8470 		/* no sensor map */
8471 		return;
8472 	}
8473 
8474 	if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
8475 	    &mtcap, sizeof(mtcap)) != 0) {
8476 		/* unable to find temperature sensors */
8477 		return;
8478 	}
8479 
8480 	sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
8481 	sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
8482 	    sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
8483 
8484 	n = 0;
8485 	map = bemtoh64(&mtcap.mtcap_sensor_map);
8486 	for (i = 0; i < sizeof(map) * NBBY; i++) {
8487 		if (!ISSET(map, (1ULL << i)))
8488 			continue;
8489 
8490 		ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
8491 		    KSTAT_T_KV, 0);
8492 		if (ks == NULL) {
8493 			/* unable to attach temperature sensor %u, i */
8494 			continue;
8495 		}
8496 
8497 		ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
8498 		*ktmp = mcx_kstat_mtmp_tpl;
8499 
8500 		ks->ks_data = ktmp;
8501 		ks->ks_datalen = sizeof(*ktmp);
8502 		TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
8503 		ks->ks_read = mcx_kstat_mtmp_read;
8504 
8505 		ks->ks_softc = sc;
8506 		kstat_install(ks);
8507 
8508 		sc->sc_kstat_mtmp[n++] = ks;
8509 		if (n >= sc->sc_kstat_mtmp_count)
8510 			break;
8511 	}
8512 }
8513 
8514 static uint64_t
8515 mcx_tmp_to_uK(uint16_t *t)
8516 {
8517 	int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
8518 	mt *= 1000000 / 8; /* convert to uC */
8519 	mt += 273150000; /* convert to uK */
8520 
8521 	return (mt);
8522 }
8523 
8524 static int
8525 mcx_kstat_mtmp_read(struct kstat *ks)
8526 {
8527 	struct mcx_softc *sc = ks->ks_softc;
8528 	struct mcx_kstat_mtmp *ktmp = ks->ks_data;
8529 	struct mcx_reg_mtmp mtmp;
8530 	int rv;
8531 	struct timeval updated;
8532 
8533 	TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
8534 
8535 	if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
8536 		return (0);
8537 
8538 	memset(&mtmp, 0, sizeof(mtmp));
8539 	htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
8540 
8541 	KERNEL_LOCK(); /* XXX */
8542 	rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
8543 	    &mtmp, sizeof(mtmp));
8544 	KERNEL_UNLOCK();
8545 	if (rv != 0)
8546 		return (EIO);
8547 
8548 	memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
8549 	    sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
8550 	memcpy(kstat_kv_istr(&ktmp->ktmp_name),
8551 	    mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
8552 	kstat_kv_temp(&ktmp->ktmp_temperature) =
8553 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature);
8554 	kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
8555 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
8556 	kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
8557 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
8558 
8559 	TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
8560 
8561 	return (0);
8562 }
8563 
8564 struct mcx_queuestat {
8565 	char			 name[KSTAT_KV_NAMELEN];
8566 	enum kstat_kv_type	 type;
8567 };
8568 
8569 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
8570 	{ "RQ SW prod",		KSTAT_KV_T_COUNTER64 },
8571 	{ "RQ HW prod",		KSTAT_KV_T_COUNTER64 },
8572 	{ "RQ HW cons",		KSTAT_KV_T_COUNTER64 },
8573 	{ "RQ HW state",	KSTAT_KV_T_ISTR },
8574 
8575 	{ "SQ SW prod",		KSTAT_KV_T_COUNTER64 },
8576 	{ "SQ SW cons",		KSTAT_KV_T_COUNTER64 },
8577 	{ "SQ HW prod",		KSTAT_KV_T_COUNTER64 },
8578 	{ "SQ HW cons",		KSTAT_KV_T_COUNTER64 },
8579 	{ "SQ HW state",	KSTAT_KV_T_ISTR },
8580 
8581 	{ "CQ SW cons",		KSTAT_KV_T_COUNTER64 },
8582 	{ "CQ HW prod",		KSTAT_KV_T_COUNTER64 },
8583 	{ "CQ HW cons",		KSTAT_KV_T_COUNTER64 },
8584 	{ "CQ HW notify",	KSTAT_KV_T_COUNTER64 },
8585 	{ "CQ HW solicit",	KSTAT_KV_T_COUNTER64 },
8586 	{ "CQ HW status",	KSTAT_KV_T_ISTR },
8587 	{ "CQ HW state",	KSTAT_KV_T_ISTR },
8588 
8589 	{ "EQ SW cons",		KSTAT_KV_T_COUNTER64 },
8590 	{ "EQ HW prod",		KSTAT_KV_T_COUNTER64 },
8591 	{ "EQ HW cons",		KSTAT_KV_T_COUNTER64 },
8592 	{ "EQ HW status",	KSTAT_KV_T_ISTR },
8593 	{ "EQ HW state",	KSTAT_KV_T_ISTR },
8594 };
8595 
8596 static int	mcx_kstat_queue_read(struct kstat *);
8597 
8598 static void
8599 mcx_kstat_attach_queues(struct mcx_softc *sc)
8600 {
8601 	struct kstat *ks;
8602 	struct kstat_kv *kvs;
8603 	int q, i;
8604 
8605 	for (q = 0; q < intrmap_count(sc->sc_intrmap); q++) {
8606 		ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
8607 		    KSTAT_T_KV, 0);
8608 		if (ks == NULL) {
8609 			/* unable to attach queue stats %u, q */
8610 			continue;
8611 		}
8612 
8613 		kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
8614 		    sizeof(*kvs), M_DEVBUF, M_WAITOK);
8615 
8616 		for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
8617 			const struct mcx_queuestat *tpl =
8618 			    &mcx_queue_kstat_tpl[i];
8619 
8620 			kstat_kv_init(&kvs[i], tpl->name, tpl->type);
8621 		}
8622 
8623 		ks->ks_softc = &sc->sc_queues[q];
8624 		ks->ks_data = kvs;
8625 		ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
8626 		ks->ks_read = mcx_kstat_queue_read;
8627 
8628 		sc->sc_queues[q].q_kstat = ks;
8629 		kstat_install(ks);
8630 	}
8631 }
8632 
8633 static int
8634 mcx_kstat_queue_read(struct kstat *ks)
8635 {
8636 	struct mcx_queues *q = ks->ks_softc;
8637 	struct mcx_softc *sc = q->q_sc;
8638 	struct kstat_kv *kvs = ks->ks_data;
8639 	union {
8640 		struct mcx_rq_ctx rq;
8641 		struct mcx_sq_ctx sq;
8642 		struct mcx_cq_ctx cq;
8643 		struct mcx_eq_ctx eq;
8644 	} u;
8645 	const char *text;
8646 	int error = 0;
8647 
8648 	KERNEL_LOCK();
8649 
8650 	if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
8651 		error = EIO;
8652 		goto out;
8653 	}
8654 
8655 	kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
8656 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
8657 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
8658 	switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
8659 	    MCX_RQ_CTX_STATE_SHIFT) {
8660 	case MCX_RQ_CTX_STATE_RST:
8661 		text = "RST";
8662 		break;
8663 	case MCX_RQ_CTX_STATE_RDY:
8664 		text = "RDY";
8665 		break;
8666 	case MCX_RQ_CTX_STATE_ERR:
8667 		text = "ERR";
8668 		break;
8669 	default:
8670 		text = "unknown";
8671 		break;
8672 	}
8673 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8674 	kvs++;
8675 
8676 	if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
8677 		error = EIO;
8678 		goto out;
8679 	}
8680 
8681 	kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
8682 	kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
8683 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
8684 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
8685 	switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
8686 	    MCX_SQ_CTX_STATE_SHIFT) {
8687 	case MCX_SQ_CTX_STATE_RST:
8688 		text = "RST";
8689 		break;
8690 	case MCX_SQ_CTX_STATE_RDY:
8691 		text = "RDY";
8692 		break;
8693 	case MCX_SQ_CTX_STATE_ERR:
8694 		text = "ERR";
8695 		break;
8696 	default:
8697 		text = "unknown";
8698 		break;
8699 	}
8700 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8701 	kvs++;
8702 
8703 	if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
8704 		error = EIO;
8705 		goto out;
8706 	}
8707 
8708 	kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
8709 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
8710 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
8711 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
8712 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
8713 
8714 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
8715 	    MCX_CQ_CTX_STATUS_SHIFT) {
8716 	case MCX_CQ_CTX_STATUS_OK:
8717 		text = "OK";
8718 		break;
8719 	case MCX_CQ_CTX_STATUS_OVERFLOW:
8720 		text = "overflow";
8721 		break;
8722 	case MCX_CQ_CTX_STATUS_WRITE_FAIL:
8723 		text = "write fail";
8724 		break;
8725 	default:
8726 		text = "unknown";
8727 		break;
8728 	}
8729 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8730 	kvs++;
8731 
8732 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
8733 	    MCX_CQ_CTX_STATE_SHIFT) {
8734 	case MCX_CQ_CTX_STATE_SOLICITED:
8735 		text = "solicited";
8736 		break;
8737 	case MCX_CQ_CTX_STATE_ARMED:
8738 		text = "armed";
8739 		break;
8740 	case MCX_CQ_CTX_STATE_FIRED:
8741 		text = "fired";
8742 		break;
8743 	default:
8744 		text = "unknown";
8745 		break;
8746 	}
8747 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8748 	kvs++;
8749 
8750 	if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
8751 		error = EIO;
8752 		goto out;
8753 	}
8754 
8755 	kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
8756 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
8757 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
8758 
8759 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
8760 	    MCX_EQ_CTX_STATUS_SHIFT) {
8761 	case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
8762 		text = "write fail";
8763 		break;
8764 	case MCX_EQ_CTX_STATUS_OK:
8765 		text = "OK";
8766 		break;
8767 	default:
8768 		text = "unknown";
8769 		break;
8770 	}
8771 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8772 	kvs++;
8773 
8774 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
8775 	    MCX_EQ_CTX_STATE_SHIFT) {
8776 	case MCX_EQ_CTX_STATE_ARMED:
8777 		text = "armed";
8778 		break;
8779 	case MCX_EQ_CTX_STATE_FIRED:
8780 		text = "fired";
8781 		break;
8782 	default:
8783 		text = "unknown";
8784 		break;
8785 	}
8786 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8787 	kvs++;
8788 
8789 	nanouptime(&ks->ks_updated);
8790 out:
8791 	KERNEL_UNLOCK();
8792 	return (error);
8793 }
8794 
8795 #endif /* NKSTAT > 0 */
8796 
8797 static unsigned int
8798 mcx_timecounter_read(struct timecounter *tc)
8799 {
8800 	struct mcx_softc *sc = tc->tc_priv;
8801 
8802 	return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
8803 }
8804 
8805 static void
8806 mcx_timecounter_attach(struct mcx_softc *sc)
8807 {
8808 	struct timecounter *tc = &sc->sc_timecounter;
8809 
8810 	tc->tc_get_timecount = mcx_timecounter_read;
8811 	tc->tc_counter_mask = ~0U;
8812 	tc->tc_frequency = sc->sc_khz * 1000;
8813 	tc->tc_name = sc->sc_dev.dv_xname;
8814 	tc->tc_quality = -100;
8815 	tc->tc_priv = sc;
8816 
8817 	tc_init(tc);
8818 }
8819