xref: /netbsd-src/sys/dev/pci/if_mcx.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: if_mcx.c,v 1.5 2019/10/17 15:57:56 msaitoh Exp $ */
2 /*	$OpenBSD: if_mcx.c,v 1.33 2019/09/12 04:23:59 jmatthew Exp $ */
3 
4 /*
5  * Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/pool.h>
33 #include <sys/queue.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/bus.h>
39 
40 #include <machine/intr.h>
41 
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_ether.h>
45 #include <net/if_media.h>
46 
47 #include <net/bpf.h>
48 
49 #include <netinet/in.h>
50 
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcidevs.h>
54 
55 #ifdef NET_MPSAFE
56 #define	MCX_MPSAFE	1
57 #define	CALLOUT_FLAGS	CALLOUT_MPSAFE
58 #else
59 #define	CALLOUT_FLAGS	0
60 #endif
61 
62 #define	MCX_MAX_NINTR	1
63 
64 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
65 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
66 
67 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
68 
69 #define MCX_FW_VER	 	  0x0000
70 #define  MCX_FW_VER_MAJOR(_v)		((_v) & 0xffff)
71 #define  MCX_FW_VER_MINOR(_v)		((_v) >> 16)
72 #define MCX_CMDIF_FW_SUBVER	  0x0004
73 #define  MCX_FW_VER_SUBMINOR(_v)	((_v) & 0xffff)
74 #define  MCX_CMDIF(_v)			((_v) >> 16)
75 
76 #define MCX_ISSI		 1 /* as per the PRM */
77 #define MCX_CMD_IF_SUPPORTED	 5
78 
79 #define MCX_HARDMTU		 9500
80 
81 #define MCX_MAX_CQS		 2		/* rq, sq */
82 
83 /* queue sizes */
84 #define MCX_LOG_EQ_SIZE		 6		/* one page */
85 #define MCX_LOG_CQ_SIZE		 11
86 #define MCX_LOG_RQ_SIZE		 10
87 #define MCX_LOG_SQ_SIZE		 11
88 
89 /* completion event moderation - about 10khz, or 90% of the cq */
90 #define MCX_CQ_MOD_PERIOD	50
91 #define MCX_CQ_MOD_COUNTER	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
92 
93 #define MCX_LOG_SQ_ENTRY_SIZE	 6
94 #define MCX_SQ_ENTRY_MAX_SLOTS	 4
95 #define MCX_SQ_SEGS_PER_SLOT	 \
96 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
97 #define MCX_SQ_MAX_SEGMENTS	 \
98 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
99 
100 #define MCX_LOG_FLOW_TABLE_SIZE	 5
101 #define MCX_NUM_STATIC_FLOWS	 4	/* promisc, allmulti, ucast, bcast */
102 #define MCX_NUM_MCAST_FLOWS 	\
103 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
104 
105 #define MCX_SQ_INLINE_SIZE	 18
106 
107 /* doorbell offsets */
108 #define MCX_CQ_DOORBELL_OFFSET	 0
109 #define MCX_CQ_DOORBELL_SIZE	 16
110 #define MCX_RQ_DOORBELL_OFFSET	 64
111 #define MCX_SQ_DOORBELL_OFFSET	 64
112 
113 #define MCX_WQ_DOORBELL_MASK	 0xffff
114 
115 /* uar registers */
116 #define MCX_UAR_CQ_DOORBELL	 0x20
117 #define MCX_UAR_EQ_DOORBELL_ARM	 0x40
118 #define MCX_UAR_EQ_DOORBELL	 0x48
119 #define MCX_UAR_BF		 0x800
120 
121 #define MCX_CMDQ_ADDR_HI		 0x0010
122 #define MCX_CMDQ_ADDR_LO		 0x0014
123 #define MCX_CMDQ_ADDR_NMASK		0xfff
124 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
125 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
126 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
127 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
128 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
129 
130 #define MCX_CMDQ_DOORBELL		0x0018
131 
132 #define MCX_STATE		0x01fc
133 #define MCX_STATE_MASK			(1U << 31)
134 #define MCX_STATE_INITIALIZING		(1 << 31)
135 #define MCX_STATE_READY			(0 << 31)
136 #define MCX_STATE_INTERFACE_MASK	(0x3 << 24)
137 #define MCX_STATE_INTERFACE_FULL_DRIVER	(0x0 << 24)
138 #define MCX_STATE_INTERFACE_DISABLED	(0x1 << 24)
139 
140 #define MCX_INTERNAL_TIMER	0x1000
141 #define MCX_INTERNAL_TIMER_H	0x1000
142 #define MCX_INTERNAL_TIMER_L	0x1004
143 
144 #define MCX_CLEAR_INT		0x100c
145 
146 #define MCX_REG_OP_WRITE	0
147 #define MCX_REG_OP_READ		1
148 
149 #define MCX_REG_PMLP		0x5002
150 #define MCX_REG_PMTU		0x5003
151 #define MCX_REG_PTYS		0x5004
152 #define MCX_REG_PAOS		0x5006
153 #define MCX_REG_PFCC		0x5007
154 #define MCX_REG_PPCNT		0x5008
155 #define MCX_REG_MCIA		0x9014
156 
157 #define MCX_ETHER_CAP_SGMII	(1 << 0)
158 #define MCX_ETHER_CAP_1000_KX	(1 << 1)
159 #define MCX_ETHER_CAP_10G_CX4	(1 << 2)
160 #define MCX_ETHER_CAP_10G_KX4	(1 << 3)
161 #define MCX_ETHER_CAP_10G_KR	(1 << 4)
162 #define MCX_ETHER_CAP_40G_CR4	(1 << 6)
163 #define MCX_ETHER_CAP_40G_KR4	(1 << 7)
164 #define MCX_ETHER_CAP_10G_CR	(1 << 12)
165 #define MCX_ETHER_CAP_10G_SR	(1 << 13)
166 #define MCX_ETHER_CAP_10G_LR	(1 << 14)
167 #define MCX_ETHER_CAP_40G_SR4	(1 << 15)
168 #define MCX_ETHER_CAP_40G_LR4	(1 << 16)
169 #define MCX_ETHER_CAP_50G_SR2	(1 << 18)
170 #define MCX_ETHER_CAP_100G_CR4	(1 << 20)
171 #define MCX_ETHER_CAP_100G_SR4	(1 << 21)
172 #define MCX_ETHER_CAP_100G_KR4	(1 << 22)
173 #define MCX_ETHER_CAP_25G_CR	(1 << 27)
174 #define MCX_ETHER_CAP_25G_KR	(1 << 28)
175 #define MCX_ETHER_CAP_25G_SR	(1 << 29)
176 #define MCX_ETHER_CAP_50G_CR2	(1 << 30)
177 #define MCX_ETHER_CAP_50G_KR2	(1 << 31)
178 
179 #define MCX_PAGE_SHIFT		12
180 #define MCX_PAGE_SIZE		(1 << MCX_PAGE_SHIFT)
181 #define MCX_MAX_CQE		32
182 
183 #define MCX_CMD_QUERY_HCA_CAP	0x100
184 #define MCX_CMD_QUERY_ADAPTER	0x101
185 #define MCX_CMD_INIT_HCA	0x102
186 #define MCX_CMD_TEARDOWN_HCA	0x103
187 #define MCX_CMD_ENABLE_HCA	0x104
188 #define MCX_CMD_DISABLE_HCA	0x105
189 #define MCX_CMD_QUERY_PAGES	0x107
190 #define MCX_CMD_MANAGE_PAGES	0x108
191 #define MCX_CMD_SET_HCA_CAP	0x109
192 #define MCX_CMD_QUERY_ISSI	0x10a
193 #define MCX_CMD_SET_ISSI	0x10b
194 #define MCX_CMD_SET_DRIVER_VERSION \
195 				0x10d
196 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
197 				0x203
198 #define MCX_CMD_CREATE_EQ	0x301
199 #define MCX_CMD_DESTROY_EQ	0x302
200 #define MCX_CMD_CREATE_CQ	0x400
201 #define MCX_CMD_DESTROY_CQ	0x401
202 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
203 				0x754
204 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
205 				0x755
206 #define MCX_CMD_QUERY_VPORT_COUNTERS \
207 				0x770
208 #define MCX_CMD_ALLOC_PD	0x800
209 #define MCX_CMD_ALLOC_UAR	0x802
210 #define MCX_CMD_ACCESS_REG	0x805
211 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
212 				0x816
213 #define MCX_CMD_CREATE_TIR	0x900
214 #define MCX_CMD_DESTROY_TIR	0x902
215 #define MCX_CMD_CREATE_SQ	0x904
216 #define MCX_CMD_MODIFY_SQ	0x905
217 #define MCX_CMD_DESTROY_SQ	0x906
218 #define MCX_CMD_QUERY_SQ	0x907
219 #define MCX_CMD_CREATE_RQ	0x908
220 #define MCX_CMD_MODIFY_RQ	0x909
221 #define MCX_CMD_DESTROY_RQ	0x90a
222 #define MCX_CMD_QUERY_RQ	0x90b
223 #define MCX_CMD_CREATE_TIS	0x912
224 #define MCX_CMD_DESTROY_TIS	0x914
225 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
226 				0x92f
227 #define MCX_CMD_CREATE_FLOW_TABLE \
228 				0x930
229 #define MCX_CMD_DESTROY_FLOW_TABLE \
230 				0x931
231 #define MCX_CMD_QUERY_FLOW_TABLE \
232 				0x932
233 #define MCX_CMD_CREATE_FLOW_GROUP \
234 				0x933
235 #define MCX_CMD_DESTROY_FLOW_GROUP \
236 				0x934
237 #define MCX_CMD_QUERY_FLOW_GROUP \
238 				0x935
239 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
240 				0x936
241 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
242 				0x937
243 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
244 				0x938
245 #define MCX_CMD_ALLOC_FLOW_COUNTER \
246 				0x939
247 #define MCX_CMD_QUERY_FLOW_COUNTER \
248 				0x93b
249 
250 #define MCX_QUEUE_STATE_RST	0
251 #define MCX_QUEUE_STATE_RDY	1
252 #define MCX_QUEUE_STATE_ERR	3
253 
254 #define MCX_FLOW_TABLE_TYPE_RX	0
255 #define MCX_FLOW_TABLE_TYPE_TX	1
256 
257 #define MCX_CMDQ_INLINE_DATASIZE 16
258 
259 struct mcx_cmdq_entry {
260 	uint8_t			cq_type;
261 #define MCX_CMDQ_TYPE_PCIE		0x7
262 	uint8_t			cq_reserved0[3];
263 
264 	uint32_t		cq_input_length;
265 	uint64_t		cq_input_ptr;
266 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
267 
268 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
269 	uint64_t		cq_output_ptr;
270 	uint32_t		cq_output_length;
271 
272 	uint8_t			cq_token;
273 	uint8_t			cq_signature;
274 	uint8_t			cq_reserved1[1];
275 	uint8_t			cq_status;
276 #define MCX_CQ_STATUS_SHIFT		1
277 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
278 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
279 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
280 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
281 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
282 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
283 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
284 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
285 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
292 					(0x10 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_OWN_MASK		0x1
295 #define MCX_CQ_STATUS_OWN_SW		0x0
296 #define MCX_CQ_STATUS_OWN_HW		0x1
297 } __packed __aligned(8);
298 
299 #define MCX_CMDQ_MAILBOX_DATASIZE	512
300 
301 struct mcx_cmdq_mailbox {
302 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
303 	uint8_t			mb_reserved0[48];
304 	uint64_t		mb_next_ptr;
305 	uint32_t		mb_block_number;
306 	uint8_t			mb_reserved1[1];
307 	uint8_t			mb_token;
308 	uint8_t			mb_ctrl_signature;
309 	uint8_t			mb_signature;
310 } __packed __aligned(8);
311 
312 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
313 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
314 				    MCX_CMDQ_MAILBOX_ALIGN)
315 /*
316  * command mailbox structres
317  */
318 
319 struct mcx_cmd_enable_hca_in {
320 	uint16_t		cmd_opcode;
321 	uint8_t			cmd_reserved0[4];
322 	uint16_t		cmd_op_mod;
323 	uint8_t			cmd_reserved1[2];
324 	uint16_t		cmd_function_id;
325 	uint8_t			cmd_reserved2[4];
326 } __packed __aligned(4);
327 
328 struct mcx_cmd_enable_hca_out {
329 	uint8_t			cmd_status;
330 	uint8_t			cmd_reserved0[3];
331 	uint32_t		cmd_syndrome;
332 	uint8_t			cmd_reserved1[4];
333 } __packed __aligned(4);
334 
335 struct mcx_cmd_init_hca_in {
336 	uint16_t		cmd_opcode;
337 	uint8_t			cmd_reserved0[4];
338 	uint16_t		cmd_op_mod;
339 	uint8_t			cmd_reserved1[8];
340 } __packed __aligned(4);
341 
342 struct mcx_cmd_init_hca_out {
343 	uint8_t			cmd_status;
344 	uint8_t			cmd_reserved0[3];
345 	uint32_t		cmd_syndrome;
346 	uint8_t			cmd_reserved1[8];
347 } __packed __aligned(4);
348 
349 struct mcx_cmd_teardown_hca_in {
350 	uint16_t		cmd_opcode;
351 	uint8_t			cmd_reserved0[4];
352 	uint16_t		cmd_op_mod;
353 	uint8_t			cmd_reserved1[2];
354 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
355 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
356 	uint16_t		cmd_profile;
357 	uint8_t			cmd_reserved2[4];
358 } __packed __aligned(4);
359 
360 struct mcx_cmd_teardown_hca_out {
361 	uint8_t			cmd_status;
362 	uint8_t			cmd_reserved0[3];
363 	uint32_t		cmd_syndrome;
364 	uint8_t			cmd_reserved1[8];
365 } __packed __aligned(4);
366 
367 struct mcx_cmd_access_reg_in {
368 	uint16_t		cmd_opcode;
369 	uint8_t			cmd_reserved0[4];
370 	uint16_t		cmd_op_mod;
371 	uint8_t			cmd_reserved1[2];
372 	uint16_t		cmd_register_id;
373 	uint32_t		cmd_argument;
374 } __packed __aligned(4);
375 
376 struct mcx_cmd_access_reg_out {
377 	uint8_t			cmd_status;
378 	uint8_t			cmd_reserved0[3];
379 	uint32_t		cmd_syndrome;
380 	uint8_t			cmd_reserved1[8];
381 } __packed __aligned(4);
382 
383 struct mcx_reg_pmtu {
384 	uint8_t			rp_reserved1;
385 	uint8_t			rp_local_port;
386 	uint8_t			rp_reserved2[2];
387 	uint16_t		rp_max_mtu;
388 	uint8_t			rp_reserved3[2];
389 	uint16_t		rp_admin_mtu;
390 	uint8_t			rp_reserved4[2];
391 	uint16_t		rp_oper_mtu;
392 	uint8_t			rp_reserved5[2];
393 } __packed __aligned(4);
394 
395 struct mcx_reg_ptys {
396 	uint8_t			rp_reserved1;
397 	uint8_t			rp_local_port;
398 	uint8_t			rp_reserved2;
399 	uint8_t			rp_proto_mask;
400 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
401 	uint8_t			rp_reserved3[8];
402 	uint32_t		rp_eth_proto_cap;
403 	uint8_t			rp_reserved4[8];
404 	uint32_t		rp_eth_proto_admin;
405 	uint8_t			rp_reserved5[8];
406 	uint32_t		rp_eth_proto_oper;
407 	uint8_t			rp_reserved6[24];
408 } __packed __aligned(4);
409 
410 struct mcx_reg_paos {
411 	uint8_t			rp_reserved1;
412 	uint8_t			rp_local_port;
413 	uint8_t			rp_admin_status;
414 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
415 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
416 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
417 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
418 	uint8_t			rp_oper_status;
419 #define MCX_REG_PAOS_OPER_STATUS_UP		1
420 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
421 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
422 	uint8_t			rp_admin_state_update;
423 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
424 	uint8_t			rp_reserved2[11];
425 } __packed __aligned(4);
426 
427 struct mcx_reg_pfcc {
428 	uint8_t			rp_reserved1;
429 	uint8_t			rp_local_port;
430 	uint8_t			rp_reserved2[3];
431 	uint8_t			rp_prio_mask_tx;
432 	uint8_t			rp_reserved3;
433 	uint8_t			rp_prio_mask_rx;
434 	uint8_t			rp_pptx_aptx;
435 	uint8_t			rp_pfctx;
436 	uint8_t			rp_fctx_dis;
437 	uint8_t			rp_reserved4;
438 	uint8_t			rp_pprx_aprx;
439 	uint8_t			rp_pfcrx;
440 	uint8_t			rp_reserved5[2];
441 	uint16_t		rp_dev_stall_min;
442 	uint16_t		rp_dev_stall_crit;
443 	uint8_t			rp_reserved6[12];
444 } __packed __aligned(4);
445 
446 #define MCX_PMLP_MODULE_NUM_MASK	0xff
447 struct mcx_reg_pmlp {
448 	uint8_t			rp_rxtx;
449 	uint8_t			rp_local_port;
450 	uint8_t			rp_reserved0;
451 	uint8_t			rp_width;
452 	uint32_t		rp_lane0_mapping;
453 	uint32_t		rp_lane1_mapping;
454 	uint32_t		rp_lane2_mapping;
455 	uint32_t		rp_lane3_mapping;
456 	uint8_t			rp_reserved1[44];
457 } __packed __aligned(4);
458 
459 #define MCX_MCIA_EEPROM_BYTES	32
460 struct mcx_reg_mcia {
461 	uint8_t			rm_l;
462 	uint8_t			rm_module;
463 	uint8_t			rm_reserved0;
464 	uint8_t			rm_status;
465 	uint8_t			rm_i2c_addr;
466 	uint8_t			rm_page_num;
467 	uint16_t		rm_dev_addr;
468 	uint16_t		rm_reserved1;
469 	uint16_t		rm_size;
470 	uint32_t		rm_reserved2;
471 	uint8_t			rm_data[48];
472 } __packed __aligned(4);
473 
474 struct mcx_cmd_query_issi_in {
475 	uint16_t		cmd_opcode;
476 	uint8_t			cmd_reserved0[4];
477 	uint16_t		cmd_op_mod;
478 	uint8_t			cmd_reserved1[8];
479 } __packed __aligned(4);
480 
481 struct mcx_cmd_query_issi_il_out {
482 	uint8_t			cmd_status;
483 	uint8_t			cmd_reserved0[3];
484 	uint32_t		cmd_syndrome;
485 	uint8_t			cmd_reserved1[2];
486 	uint16_t		cmd_current_issi;
487 	uint8_t			cmd_reserved2[4];
488 } __packed __aligned(4);
489 
490 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
491 
492 struct mcx_cmd_query_issi_mb_out {
493 	uint8_t			cmd_reserved2[16];
494 	uint8_t			cmd_supported_issi[80]; /* very big endian */
495 } __packed __aligned(4);
496 
497 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
498 
499 struct mcx_cmd_set_issi_in {
500 	uint16_t		cmd_opcode;
501 	uint8_t			cmd_reserved0[4];
502 	uint16_t		cmd_op_mod;
503 	uint8_t			cmd_reserved1[2];
504 	uint16_t		cmd_current_issi;
505 	uint8_t			cmd_reserved2[4];
506 } __packed __aligned(4);
507 
508 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
509 
510 struct mcx_cmd_set_issi_out {
511 	uint8_t			cmd_status;
512 	uint8_t			cmd_reserved0[3];
513 	uint32_t		cmd_syndrome;
514 	uint8_t			cmd_reserved1[8];
515 } __packed __aligned(4);
516 
517 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
518 
519 struct mcx_cmd_query_pages_in {
520 	uint16_t		cmd_opcode;
521 	uint8_t			cmd_reserved0[4];
522 	uint16_t		cmd_op_mod;
523 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
524 #define MCX_CMD_QUERY_PAGES_INIT	0x02
525 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
526 	uint8_t			cmd_reserved1[8];
527 } __packed __aligned(4);
528 
529 struct mcx_cmd_query_pages_out {
530 	uint8_t			cmd_status;
531 	uint8_t			cmd_reserved0[3];
532 	uint32_t		cmd_syndrome;
533 	uint8_t			cmd_reserved1[2];
534 	uint16_t		cmd_func_id;
535 	uint32_t		cmd_num_pages;
536 } __packed __aligned(4);
537 
538 struct mcx_cmd_manage_pages_in {
539 	uint16_t		cmd_opcode;
540 	uint8_t			cmd_reserved0[4];
541 	uint16_t		cmd_op_mod;
542 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
543 					0x00
544 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
545 					0x01
546 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
547 					0x02
548 	uint8_t			cmd_reserved1[2];
549 	uint16_t		cmd_func_id;
550 	uint32_t		cmd_input_num_entries;
551 } __packed __aligned(4);
552 
553 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
554 
555 struct mcx_cmd_manage_pages_out {
556 	uint8_t			cmd_status;
557 	uint8_t			cmd_reserved0[3];
558 	uint32_t		cmd_syndrome;
559 	uint32_t		cmd_output_num_entries;
560 	uint8_t			cmd_reserved1[4];
561 } __packed __aligned(4);
562 
563 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
564 
565 struct mcx_cmd_query_hca_cap_in {
566 	uint16_t		cmd_opcode;
567 	uint8_t			cmd_reserved0[4];
568 	uint16_t		cmd_op_mod;
569 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
570 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
571 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
572 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
573 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
574 	uint8_t			cmd_reserved1[8];
575 } __packed __aligned(4);
576 
577 struct mcx_cmd_query_hca_cap_out {
578 	uint8_t			cmd_status;
579 	uint8_t			cmd_reserved0[3];
580 	uint32_t		cmd_syndrome;
581 	uint8_t			cmd_reserved1[8];
582 } __packed __aligned(4);
583 
584 #define MCX_HCA_CAP_LEN			0x1000
585 #define MCX_HCA_CAP_NMAILBOXES		\
586 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
587 
588 #if __GNUC_PREREQ__(4, 3)
589 #define __counter__		__COUNTER__
590 #else
591 #define __counter__		__LINE__
592 #endif
593 
594 #define __token(_tok, _num)	_tok##_num
595 #define _token(_tok, _num)	__token(_tok, _num)
596 #define __reserved__		_token(__reserved, __counter__)
597 
598 struct mcx_cap_device {
599 	uint8_t			reserved0[16];
600 
601 	uint8_t			log_max_srq_sz;
602 	uint8_t			log_max_qp_sz;
603 	uint8_t			__reserved__[1];
604 	uint8_t			log_max_qp; /* 5 bits */
605 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
606 
607 	uint8_t			__reserved__[1];
608 	uint8_t			log_max_srq; /* 5 bits */
609 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
610 	uint8_t			__reserved__[2];
611 
612 	uint8_t			__reserved__[1];
613 	uint8_t			log_max_cq_sz;
614 	uint8_t			__reserved__[1];
615 	uint8_t			log_max_cq; /* 5 bits */
616 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
617 
618 	uint8_t			log_max_eq_sz;
619 	uint8_t			log_max_mkey; /* 6 bits */
620 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
621 	uint8_t			__reserved__[1];
622 	uint8_t			log_max_eq; /* 4 bits */
623 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
624 
625 	uint8_t			max_indirection;
626 	uint8_t			log_max_mrw_sz; /* 7 bits */
627 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
628 	uint8_t			teardown_log_max_msf_list_size;
629 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
630 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
631 					0x3f
632 	uint8_t			log_max_klm_list_size; /* 6 bits */
633 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
634 					0x3f
635 
636 	uint8_t			__reserved__[1];
637 	uint8_t			log_max_ra_req_dc; /* 6 bits */
638 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
639 	uint8_t			__reserved__[1];
640 	uint8_t			log_max_ra_res_dc; /* 6 bits */
641 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
642 					0x3f
643 
644 	uint8_t			__reserved__[1];
645 	uint8_t			log_max_ra_req_qp; /* 6 bits */
646 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
647 					0x3f
648 	uint8_t			__reserved__[1];
649 	uint8_t			log_max_ra_res_qp; /* 6 bits */
650 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
651 					0x3f
652 
653 	uint8_t			flags1;
654 #define MCX_CAP_DEVICE_END_PAD		0x80
655 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
656 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
657 					0x20
658 #define MCX_CAP_DEVICE_START_PAD	0x10
659 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
660 					0x08
661 	uint8_t			__reserved__[1];
662 	uint16_t		gid_table_size;
663 
664 	uint16_t		flags2;
665 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
666 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
667 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
668 					0x2000
669 #define MCX_CAP_DEVICE_DEBUG		0x1000
670 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
671 					0x8000
672 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
673 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
674 	uint16_t		pkey_table_size;
675 
676 	uint8_t			flags3;
677 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
678 					0x80
679 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
680 					0x40
681 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
682 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
683 #define MCX_CAP_DEVICE_ETS		0x04
684 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
685 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
686 					0x01
687 	uint8_t			local_ca_ack_delay; /* 5 bits */
688 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
689 					0x1f
690 	uint8_t			port_type;
691 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
692 					0x80
693 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
694 	uint8_t			num_ports;
695 
696 	uint8_t			snapshot_log_max_msg;
697 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
698 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
699 	uint8_t			max_tc; /* 4 bits */
700 #define MCX_CAP_DEVICE_MAX_TC		0x0f
701 	uint8_t			flags4;
702 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
703 #define MCX_CAP_DEVICE_DCBX		0x40
704 #define MCX_CAP_DEVICE_ROL_S		0x02
705 #define MCX_CAP_DEVICE_ROL_G		0x01
706 	uint8_t			wol;
707 #define MCX_CAP_DEVICE_WOL_S		0x40
708 #define MCX_CAP_DEVICE_WOL_G		0x20
709 #define MCX_CAP_DEVICE_WOL_A		0x10
710 #define MCX_CAP_DEVICE_WOL_B		0x08
711 #define MCX_CAP_DEVICE_WOL_M		0x04
712 #define MCX_CAP_DEVICE_WOL_U		0x02
713 #define MCX_CAP_DEVICE_WOL_P		0x01
714 
715 	uint16_t		stat_rate_support;
716 	uint8_t			__reserved__[1];
717 	uint8_t			cqe_version; /* 4 bits */
718 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
719 
720 	uint32_t		flags5;
721 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
722 					0x80000000
723 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
724 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
725 					0x10000000
726 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
727 					0x08000000
728 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
729 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
730 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
731 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
732 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
733 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
734 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
735 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
736 #define MCX_CAP_DEVICE_SHO		0x00000100
737 #define MCX_CAP_DEVICE_TPH		0x00000080
738 #define MCX_CAP_DEVICE_RF		0x00000040
739 #define MCX_CAP_DEVICE_DCT		0x00000020
740 #define MCX_CAP_DEVICE_QOS		0x00000010
741 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
742 #define MCX_CAP_DEVICE_ROCE		0x00000004
743 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
744 
745 	uint32_t		flags6;
746 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
747 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
748 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
749 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
750 					0x10000000
751 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
752 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
753 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
754 #define MCX_CAP_DEVICE_PG		0x01000000
755 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
756 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
757 					0x00400000
758 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
759 					0x00200000
760 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
761 					0x00100000
762 #define MCX_CAP_DEVICE_CD		0x00080000
763 #define MCX_CAP_DEVICE_ATM		0x00040000
764 #define MCX_CAP_DEVICE_APM		0x00020000
765 #define MCX_CAP_DEVICE_IMAICL		0x00010000
766 #define MCX_CAP_DEVICE_QKV		0x00000200
767 #define MCX_CAP_DEVICE_PKV		0x00000100
768 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
769 #define MCX_CAP_DEVICE_XRC		0x00000008
770 #define MCX_CAP_DEVICE_UD		0x00000004
771 #define MCX_CAP_DEVICE_UC		0x00000002
772 #define MCX_CAP_DEVICE_RC		0x00000001
773 
774 	uint8_t			uar_flags;
775 #define MCX_CAP_DEVICE_UAR_4K		0x80
776 	uint8_t			uar_sz;	/* 6 bits */
777 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
778 	uint8_t			__reserved__[1];
779 	uint8_t			log_pg_sz;
780 
781 	uint8_t			flags7;
782 #define MCX_CAP_DEVICE_BF		0x80
783 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
784 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
785 					0x20
786 	uint8_t			log_bf_reg_size; /* 5 bits */
787 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
788 	uint8_t			__reserved__[2];
789 
790 	uint16_t		num_of_diagnostic_counters;
791 	uint16_t		max_wqe_sz_sq;
792 
793 	uint8_t			__reserved__[2];
794 	uint16_t		max_wqe_sz_rq;
795 
796 	uint8_t			__reserved__[2];
797 	uint16_t		max_wqe_sz_sq_dc;
798 
799 	uint32_t		max_qp_mcg; /* 25 bits */
800 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
801 
802 	uint8_t			__reserved__[3];
803 	uint8_t			log_max_mcq;
804 
805 	uint8_t			log_max_transport_domain; /* 5 bits */
806 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
807 					0x1f
808 	uint8_t			log_max_pd; /* 5 bits */
809 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
810 	uint8_t			__reserved__[1];
811 	uint8_t			log_max_xrcd; /* 5 bits */
812 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
813 
814 	uint8_t			__reserved__[2];
815 	uint16_t		max_flow_counter;
816 
817 	uint8_t			log_max_rq; /* 5 bits */
818 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
819 	uint8_t			log_max_sq; /* 5 bits */
820 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
821 	uint8_t			log_max_tir; /* 5 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
823 	uint8_t			log_max_tis; /* 5 bits */
824 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
825 
826 	uint8_t 		flags8;
827 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
828 					0x80
829 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
830 	uint8_t			log_max_rqt; /* 5 bits */
831 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
832 	uint8_t			log_max_rqt_size; /* 5 bits */
833 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
834 	uint8_t			log_max_tis_per_sq; /* 5 bits */
835 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
836 					0x1f
837 } __packed __aligned(8);
838 
839 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
840 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
841 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
842 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
843 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
844 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
845 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
846 
847 struct mcx_cmd_set_driver_version_in {
848 	uint16_t		cmd_opcode;
849 	uint8_t			cmd_reserved0[4];
850 	uint16_t		cmd_op_mod;
851 	uint8_t			cmd_reserved1[8];
852 } __packed __aligned(4);
853 
854 struct mcx_cmd_set_driver_version_out {
855 	uint8_t			cmd_status;
856 	uint8_t			cmd_reserved0[3];
857 	uint32_t		cmd_syndrome;
858 	uint8_t			cmd_reserved1[8];
859 } __packed __aligned(4);
860 
861 struct mcx_cmd_set_driver_version {
862 	uint8_t			cmd_driver_version[64];
863 } __packed __aligned(8);
864 
865 struct mcx_cmd_modify_nic_vport_context_in {
866 	uint16_t		cmd_opcode;
867 	uint8_t			cmd_reserved0[4];
868 	uint16_t		cmd_op_mod;
869 	uint8_t			cmd_reserved1[4];
870 	uint32_t		cmd_field_select;
871 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
872 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
873 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
874 } __packed __aligned(4);
875 
876 struct mcx_cmd_modify_nic_vport_context_out {
877 	uint8_t			cmd_status;
878 	uint8_t			cmd_reserved0[3];
879 	uint32_t		cmd_syndrome;
880 	uint8_t			cmd_reserved1[8];
881 } __packed __aligned(4);
882 
883 struct mcx_cmd_query_nic_vport_context_in {
884 	uint16_t		cmd_opcode;
885 	uint8_t			cmd_reserved0[4];
886 	uint16_t		cmd_op_mod;
887 	uint8_t			cmd_reserved1[4];
888 	uint8_t			cmd_allowed_list_type;
889 	uint8_t			cmd_reserved2[3];
890 } __packed __aligned(4);
891 
892 struct mcx_cmd_query_nic_vport_context_out {
893 	uint8_t			cmd_status;
894 	uint8_t			cmd_reserved0[3];
895 	uint32_t		cmd_syndrome;
896 	uint8_t			cmd_reserved1[8];
897 } __packed __aligned(4);
898 
899 struct mcx_nic_vport_ctx {
900 	uint32_t		vp_min_wqe_inline_mode;
901 	uint8_t			vp_reserved0[32];
902 	uint32_t		vp_mtu;
903 	uint8_t			vp_reserved1[200];
904 	uint16_t		vp_flags;
905 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
906 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
907 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
908 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
909 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
910 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
911 	uint16_t		vp_allowed_list_size;
912 	uint64_t		vp_perm_addr;
913 	uint8_t			vp_reserved2[4];
914 	/* allowed list follows */
915 } __packed __aligned(4);
916 
917 struct mcx_counter {
918 	uint64_t		packets;
919 	uint64_t		octets;
920 } __packed __aligned(4);
921 
922 struct mcx_nic_vport_counters {
923 	struct mcx_counter	rx_err;
924 	struct mcx_counter	tx_err;
925 	uint8_t			reserved0[64]; /* 0x30 */
926 	struct mcx_counter	rx_bcast;
927 	struct mcx_counter	tx_bcast;
928 	struct mcx_counter	rx_ucast;
929 	struct mcx_counter	tx_ucast;
930 	struct mcx_counter	rx_mcast;
931 	struct mcx_counter	tx_mcast;
932 	uint8_t			reserved1[0x210 - 0xd0];
933 } __packed __aligned(4);
934 
935 struct mcx_cmd_query_vport_counters_in {
936 	uint16_t		cmd_opcode;
937 	uint8_t			cmd_reserved0[4];
938 	uint16_t		cmd_op_mod;
939 	uint8_t			cmd_reserved1[8];
940 } __packed __aligned(4);
941 
942 struct mcx_cmd_query_vport_counters_mb_in {
943 	uint8_t			cmd_reserved0[8];
944 	uint8_t			cmd_clear;
945 	uint8_t			cmd_reserved1[7];
946 } __packed __aligned(4);
947 
948 struct mcx_cmd_query_vport_counters_out {
949 	uint8_t			cmd_status;
950 	uint8_t			cmd_reserved0[3];
951 	uint32_t		cmd_syndrome;
952 	uint8_t			cmd_reserved1[8];
953 } __packed __aligned(4);
954 
955 struct mcx_cmd_query_flow_counter_in {
956 	uint16_t		cmd_opcode;
957 	uint8_t			cmd_reserved0[4];
958 	uint16_t		cmd_op_mod;
959 	uint8_t			cmd_reserved1[8];
960 } __packed __aligned(4);
961 
962 struct mcx_cmd_query_flow_counter_mb_in {
963 	uint8_t			cmd_reserved0[8];
964 	uint8_t			cmd_clear;
965 	uint8_t			cmd_reserved1[5];
966 	uint16_t		cmd_flow_counter_id;
967 } __packed __aligned(4);
968 
969 struct mcx_cmd_query_flow_counter_out {
970 	uint8_t			cmd_status;
971 	uint8_t			cmd_reserved0[3];
972 	uint32_t		cmd_syndrome;
973 	uint8_t			cmd_reserved1[8];
974 } __packed __aligned(4);
975 
976 struct mcx_cmd_alloc_uar_in {
977 	uint16_t		cmd_opcode;
978 	uint8_t			cmd_reserved0[4];
979 	uint16_t		cmd_op_mod;
980 	uint8_t			cmd_reserved1[8];
981 } __packed __aligned(4);
982 
983 struct mcx_cmd_alloc_uar_out {
984 	uint8_t			cmd_status;
985 	uint8_t			cmd_reserved0[3];
986 	uint32_t		cmd_syndrome;
987 	uint32_t		cmd_uar;
988 	uint8_t			cmd_reserved1[4];
989 } __packed __aligned(4);
990 
991 struct mcx_cmd_query_special_ctx_in {
992 	uint16_t		cmd_opcode;
993 	uint8_t			cmd_reserved0[4];
994 	uint16_t		cmd_op_mod;
995 	uint8_t			cmd_reserved1[8];
996 } __packed __aligned(4);
997 
998 struct mcx_cmd_query_special_ctx_out {
999 	uint8_t			cmd_status;
1000 	uint8_t			cmd_reserved0[3];
1001 	uint32_t		cmd_syndrome;
1002 	uint8_t			cmd_reserved1[4];
1003 	uint32_t		cmd_resd_lkey;
1004 } __packed __aligned(4);
1005 
1006 struct mcx_eq_ctx {
1007 	uint32_t		eq_status;
1008 #define MCX_EQ_CTX_ST_SHIFT		8
1009 #define MCX_EQ_CTX_ST_MASK		(0xf << MCX_EQ_CTX_ST_SHIFT)
1010 #define MCX_EQ_CTX_ST_ARMED		(0x9 << MCX_EQ_CTX_ST_SHIFT)
1011 #define MCX_EQ_CTX_ST_FIRED		(0xa << MCX_EQ_CTX_ST_SHIFT)
1012 #define MCX_EQ_CTX_OI_SHIFT		17
1013 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
1014 #define MCX_EQ_CTX_EC_SHIFT		18
1015 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
1016 #define MCX_EQ_CTX_STATUS_SHIFT		28
1017 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
1018 #define MCX_EQ_CTX_STATUS_OK		(0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1019 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1020 					(0xa << MCX_EQ_CTX_STATUS_SHIFT)
1021 	uint32_t		eq_reserved1;
1022 	uint32_t		eq_page_offset;
1023 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
1024 	uint32_t		eq_uar_size;
1025 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
1026 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
1027 	uint32_t		eq_reserved2;
1028 	uint8_t			eq_reserved3[3];
1029 	uint8_t			eq_intr;
1030 	uint32_t		eq_log_page_size;
1031 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1032 	uint32_t		eq_reserved4[3];
1033 	uint32_t		eq_consumer_counter;
1034 	uint32_t		eq_producer_counter;
1035 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
1036 	uint32_t		eq_reserved5[4];
1037 } __packed __aligned(4);
1038 
1039 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1040 
1041 struct mcx_cmd_create_eq_in {
1042 	uint16_t		cmd_opcode;
1043 	uint8_t			cmd_reserved0[4];
1044 	uint16_t		cmd_op_mod;
1045 	uint8_t			cmd_reserved1[8];
1046 } __packed __aligned(4);
1047 
1048 struct mcx_cmd_create_eq_mb_in {
1049 	struct mcx_eq_ctx	cmd_eq_ctx;
1050 	uint8_t			cmd_reserved0[8];
1051 	uint64_t		cmd_event_bitmask;
1052 #define MCX_EVENT_TYPE_COMPLETION	0x00
1053 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
1054 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
1055 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
1056 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
1057 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
1058 #define MCX_EVENT_TYPE_LAST_WQE		0x13
1059 	uint8_t			cmd_reserved1[176];
1060 } __packed __aligned(4);
1061 
1062 struct mcx_cmd_create_eq_out {
1063 	uint8_t			cmd_status;
1064 	uint8_t			cmd_reserved0[3];
1065 	uint32_t		cmd_syndrome;
1066 	uint32_t		cmd_eqn;
1067 	uint8_t			cmd_reserved1[4];
1068 } __packed __aligned(4);
1069 
1070 struct mcx_eq_entry {
1071 	uint8_t			eq_reserved1;
1072 	uint8_t			eq_event_type;
1073 	uint8_t			eq_reserved2;
1074 	uint8_t			eq_event_sub_type;
1075 
1076 	uint8_t			eq_reserved3[28];
1077 	uint32_t		eq_event_data[7];
1078 	uint8_t			eq_reserved4[2];
1079 	uint8_t			eq_signature;
1080 	uint8_t			eq_owner;
1081 #define MCX_EQ_ENTRY_OWNER_INIT			1
1082 } __packed __aligned(4);
1083 
1084 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1085 
1086 struct mcx_cmd_alloc_pd_in {
1087 	uint16_t		cmd_opcode;
1088 	uint8_t			cmd_reserved0[4];
1089 	uint16_t		cmd_op_mod;
1090 	uint8_t			cmd_reserved1[8];
1091 } __packed __aligned(4);
1092 
1093 struct mcx_cmd_alloc_pd_out {
1094 	uint8_t			cmd_status;
1095 	uint8_t			cmd_reserved0[3];
1096 	uint32_t		cmd_syndrome;
1097 	uint32_t		cmd_pd;
1098 	uint8_t			cmd_reserved1[4];
1099 } __packed __aligned(4);
1100 
1101 struct mcx_cmd_alloc_td_in {
1102 	uint16_t		cmd_opcode;
1103 	uint8_t			cmd_reserved0[4];
1104 	uint16_t		cmd_op_mod;
1105 	uint8_t			cmd_reserved1[8];
1106 } __packed __aligned(4);
1107 
1108 struct mcx_cmd_alloc_td_out {
1109 	uint8_t			cmd_status;
1110 	uint8_t			cmd_reserved0[3];
1111 	uint32_t		cmd_syndrome;
1112 	uint32_t		cmd_tdomain;
1113 	uint8_t			cmd_reserved1[4];
1114 } __packed __aligned(4);
1115 
1116 struct mcx_cmd_create_tir_in {
1117 	uint16_t		cmd_opcode;
1118 	uint8_t			cmd_reserved0[4];
1119 	uint16_t		cmd_op_mod;
1120 	uint8_t			cmd_reserved1[8];
1121 } __packed __aligned(4);
1122 
1123 struct mcx_cmd_create_tir_mb_in {
1124 	uint8_t			cmd_reserved0[20];
1125 	uint32_t		cmd_disp_type;
1126 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
1127 	uint8_t			cmd_reserved1[8];
1128 	uint32_t		cmd_lro;
1129 	uint8_t			cmd_reserved2[8];
1130 	uint32_t		cmd_inline_rqn;
1131 	uint32_t		cmd_indir_table;
1132 	uint32_t		cmd_tdomain;
1133 	uint8_t			cmd_rx_hash_key[40];
1134 	uint32_t		cmd_rx_hash_sel_outer;
1135 	uint32_t		cmd_rx_hash_sel_inner;
1136 	uint8_t			cmd_reserved3[152];
1137 } __packed __aligned(4);
1138 
1139 struct mcx_cmd_create_tir_out {
1140 	uint8_t			cmd_status;
1141 	uint8_t			cmd_reserved0[3];
1142 	uint32_t		cmd_syndrome;
1143 	uint32_t		cmd_tirn;
1144 	uint8_t			cmd_reserved1[4];
1145 } __packed __aligned(4);
1146 
1147 struct mcx_cmd_destroy_tir_in {
1148 	uint16_t		cmd_opcode;
1149 	uint8_t			cmd_reserved0[4];
1150 	uint16_t		cmd_op_mod;
1151 	uint32_t		cmd_tirn;
1152 	uint8_t			cmd_reserved1[4];
1153 } __packed __aligned(4);
1154 
1155 struct mcx_cmd_destroy_tir_out {
1156 	uint8_t			cmd_status;
1157 	uint8_t			cmd_reserved0[3];
1158 	uint32_t		cmd_syndrome;
1159 	uint8_t			cmd_reserved1[8];
1160 } __packed __aligned(4);
1161 
1162 struct mcx_cmd_create_tis_in {
1163 	uint16_t		cmd_opcode;
1164 	uint8_t			cmd_reserved0[4];
1165 	uint16_t		cmd_op_mod;
1166 	uint8_t			cmd_reserved1[8];
1167 } __packed __aligned(4);
1168 
1169 struct mcx_cmd_create_tis_mb_in {
1170 	uint8_t			cmd_reserved[16];
1171 	uint32_t		cmd_prio;
1172 	uint8_t			cmd_reserved1[32];
1173 	uint32_t		cmd_tdomain;
1174 	uint8_t			cmd_reserved2[120];
1175 } __packed __aligned(4);
1176 
1177 struct mcx_cmd_create_tis_out {
1178 	uint8_t			cmd_status;
1179 	uint8_t			cmd_reserved0[3];
1180 	uint32_t		cmd_syndrome;
1181 	uint32_t		cmd_tisn;
1182 	uint8_t			cmd_reserved1[4];
1183 } __packed __aligned(4);
1184 
1185 struct mcx_cmd_destroy_tis_in {
1186 	uint16_t		cmd_opcode;
1187 	uint8_t			cmd_reserved0[4];
1188 	uint16_t		cmd_op_mod;
1189 	uint32_t		cmd_tisn;
1190 	uint8_t			cmd_reserved1[4];
1191 } __packed __aligned(4);
1192 
1193 struct mcx_cmd_destroy_tis_out {
1194 	uint8_t			cmd_status;
1195 	uint8_t			cmd_reserved0[3];
1196 	uint32_t		cmd_syndrome;
1197 	uint8_t			cmd_reserved1[8];
1198 } __packed __aligned(4);
1199 
1200 struct mcx_cq_ctx {
1201 	uint32_t		cq_status;
1202 	uint32_t		cq_reserved1;
1203 	uint32_t		cq_page_offset;
1204 	uint32_t		cq_uar_size;
1205 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
1206 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
1207 	uint32_t		cq_period_max_count;
1208 #define MCX_CQ_CTX_PERIOD_SHIFT		16
1209 	uint32_t		cq_eqn;
1210 	uint32_t		cq_log_page_size;
1211 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1212 	uint32_t		cq_reserved2;
1213 	uint32_t		cq_last_notified;
1214 	uint32_t		cq_last_solicit;
1215 	uint32_t		cq_consumer_counter;
1216 	uint32_t		cq_producer_counter;
1217 	uint8_t			cq_reserved3[8];
1218 	uint64_t		cq_doorbell;
1219 } __packed __aligned(4);
1220 
1221 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1222 
1223 struct mcx_cmd_create_cq_in {
1224 	uint16_t		cmd_opcode;
1225 	uint8_t			cmd_reserved0[4];
1226 	uint16_t		cmd_op_mod;
1227 	uint8_t			cmd_reserved1[8];
1228 } __packed __aligned(4);
1229 
1230 struct mcx_cmd_create_cq_mb_in {
1231 	struct mcx_cq_ctx	cmd_cq_ctx;
1232 	uint8_t			cmd_reserved1[192];
1233 } __packed __aligned(4);
1234 
1235 struct mcx_cmd_create_cq_out {
1236 	uint8_t			cmd_status;
1237 	uint8_t			cmd_reserved0[3];
1238 	uint32_t		cmd_syndrome;
1239 	uint32_t		cmd_cqn;
1240 	uint8_t			cmd_reserved1[4];
1241 } __packed __aligned(4);
1242 
1243 struct mcx_cmd_destroy_cq_in {
1244 	uint16_t		cmd_opcode;
1245 	uint8_t			cmd_reserved0[4];
1246 	uint16_t		cmd_op_mod;
1247 	uint32_t		cmd_cqn;
1248 	uint8_t			cmd_reserved1[4];
1249 } __packed __aligned(4);
1250 
1251 struct mcx_cmd_destroy_cq_out {
1252 	uint8_t			cmd_status;
1253 	uint8_t			cmd_reserved0[3];
1254 	uint32_t		cmd_syndrome;
1255 	uint8_t			cmd_reserved1[8];
1256 } __packed __aligned(4);
1257 
1258 struct mcx_cq_entry {
1259 	uint32_t		__reserved__;
1260 	uint32_t		cq_lro;
1261 	uint32_t		cq_lro_ack_seq_num;
1262 	uint32_t		cq_rx_hash;
1263 	uint8_t			cq_rx_hash_type;
1264 	uint8_t			cq_ml_path;
1265 	uint16_t		__reserved__;
1266 	uint32_t		cq_checksum;
1267 	uint32_t		__reserved__;
1268 	uint32_t		cq_flags;
1269 	uint32_t		cq_lro_srqn;
1270 	uint32_t		__reserved__[2];
1271 	uint32_t		cq_byte_cnt;
1272 	uint64_t		cq_timestamp;
1273 	uint8_t			cq_rx_drops;
1274 	uint8_t			cq_flow_tag[3];
1275 	uint16_t		cq_wqe_count;
1276 	uint8_t			cq_signature;
1277 	uint8_t			cq_opcode_owner;
1278 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
1279 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
1280 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
1281 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
1282 
1283 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
1284 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
1285 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
1286 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
1287 
1288 #define MCX_CQ_ENTRY_OPCODE_REQ			0
1289 #define MCX_CQ_ENTRY_OPCODE_SEND		2
1290 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
1291 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
1292 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
1293 
1294 } __packed __aligned(4);
1295 
1296 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1297 
1298 struct mcx_cq_doorbell {
1299 	uint32_t		 db_update_ci;
1300 	uint32_t		 db_arm_ci;
1301 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
1302 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
1303 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
1304 } __packed __aligned(8);
1305 
1306 struct mcx_wq_ctx {
1307 	uint8_t			 wq_type;
1308 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
1309 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
1310 	uint8_t			 wq_reserved0[5];
1311 	uint16_t		 wq_lwm;
1312 	uint32_t		 wq_pd;
1313 	uint32_t		 wq_uar_page;
1314 	uint64_t		 wq_doorbell;
1315 	uint32_t		 wq_hw_counter;
1316 	uint32_t		 wq_sw_counter;
1317 	uint16_t		 wq_log_stride;
1318 	uint8_t			 wq_log_page_sz;
1319 	uint8_t			 wq_log_size;
1320 	uint8_t			 wq_reserved1[156];
1321 } __packed __aligned(4);
1322 
1323 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1324 
1325 struct mcx_sq_ctx {
1326 	uint32_t		sq_flags;
1327 #define MCX_SQ_CTX_RLKEY			(1U << 31)
1328 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
1329 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
1330 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
1331 #define MCX_SQ_CTX_STATE_SHIFT			20
1332 	uint32_t		sq_user_index;
1333 	uint32_t		sq_cqn;
1334 	uint32_t		sq_reserved1[5];
1335 	uint32_t		sq_tis_lst_sz;
1336 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
1337 	uint32_t		sq_reserved2[2];
1338 	uint32_t		sq_tis_num;
1339 	struct mcx_wq_ctx	sq_wq;
1340 } __packed __aligned(4);
1341 
1342 struct mcx_sq_entry_seg {
1343 	uint32_t		sqs_byte_count;
1344 	uint32_t		sqs_lkey;
1345 	uint64_t		sqs_addr;
1346 } __packed __aligned(4);
1347 
1348 struct mcx_sq_entry {
1349 	/* control segment */
1350 	uint32_t		sqe_opcode_index;
1351 #define MCX_SQE_WQE_INDEX_SHIFT			8
1352 #define MCX_SQE_WQE_OPCODE_NOP			0x00
1353 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
1354 	uint32_t		sqe_ds_sq_num;
1355 #define MCX_SQE_SQ_NUM_SHIFT			8
1356 	uint32_t		sqe_signature;
1357 #define MCX_SQE_SIGNATURE_SHIFT			24
1358 #define MCX_SQE_SOLICITED_EVENT			0x02
1359 #define MCX_SQE_CE_CQE_ON_ERR			0x00
1360 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
1361 #define MCX_SQE_CE_CQE_ALWAYS			0x08
1362 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
1363 #define MCX_SQE_FM_NO_FENCE			0x00
1364 #define MCX_SQE_FM_SMALL_FENCE			0x40
1365 	uint32_t		sqe_mkey;
1366 
1367 	/* ethernet segment */
1368 	uint32_t		sqe_reserved1;
1369 	uint32_t		sqe_mss_csum;
1370 #define MCX_SQE_L4_CSUM				(1 << 31)
1371 #define MCX_SQE_L3_CSUM				(1 << 30)
1372 	uint32_t		sqe_reserved2;
1373 	uint16_t		sqe_inline_header_size;
1374 	uint16_t		sqe_inline_headers[9];
1375 
1376 	/* data segment */
1377 	struct mcx_sq_entry_seg sqe_segs[1];
1378 } __packed __aligned(64);
1379 
1380 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1381 
1382 struct mcx_cmd_create_sq_in {
1383 	uint16_t		cmd_opcode;
1384 	uint8_t			cmd_reserved0[4];
1385 	uint16_t		cmd_op_mod;
1386 	uint8_t			cmd_reserved1[8];
1387 } __packed __aligned(4);
1388 
1389 struct mcx_cmd_create_sq_out {
1390 	uint8_t			cmd_status;
1391 	uint8_t			cmd_reserved0[3];
1392 	uint32_t		cmd_syndrome;
1393 	uint32_t		cmd_sqn;
1394 	uint8_t			cmd_reserved1[4];
1395 } __packed __aligned(4);
1396 
1397 struct mcx_cmd_modify_sq_in {
1398 	uint16_t		cmd_opcode;
1399 	uint8_t			cmd_reserved0[4];
1400 	uint16_t		cmd_op_mod;
1401 	uint32_t		cmd_sq_state;
1402 	uint8_t			cmd_reserved1[4];
1403 } __packed __aligned(4);
1404 
1405 struct mcx_cmd_modify_sq_mb_in {
1406 	uint32_t		cmd_modify_hi;
1407 	uint32_t		cmd_modify_lo;
1408 	uint8_t			cmd_reserved0[8];
1409 	struct mcx_sq_ctx	cmd_sq_ctx;
1410 } __packed __aligned(4);
1411 
1412 struct mcx_cmd_modify_sq_out {
1413 	uint8_t			cmd_status;
1414 	uint8_t			cmd_reserved0[3];
1415 	uint32_t		cmd_syndrome;
1416 	uint8_t			cmd_reserved1[8];
1417 } __packed __aligned(4);
1418 
1419 struct mcx_cmd_destroy_sq_in {
1420 	uint16_t		cmd_opcode;
1421 	uint8_t			cmd_reserved0[4];
1422 	uint16_t		cmd_op_mod;
1423 	uint32_t		cmd_sqn;
1424 	uint8_t			cmd_reserved1[4];
1425 } __packed __aligned(4);
1426 
1427 struct mcx_cmd_destroy_sq_out {
1428 	uint8_t			cmd_status;
1429 	uint8_t			cmd_reserved0[3];
1430 	uint32_t		cmd_syndrome;
1431 	uint8_t			cmd_reserved1[8];
1432 } __packed __aligned(4);
1433 
1434 
1435 struct mcx_rq_ctx {
1436 	uint32_t		rq_flags;
1437 #define MCX_RQ_CTX_RLKEY			(1U << 31)
1438 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
1439 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
1440 #define MCX_RQ_CTX_STATE_SHIFT			20
1441 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
1442 	uint32_t		rq_user_index;
1443 	uint32_t		rq_cqn;
1444 	uint32_t		rq_reserved1;
1445 	uint32_t		rq_rmpn;
1446 	uint32_t		rq_reserved2[7];
1447 	struct mcx_wq_ctx	rq_wq;
1448 } __packed __aligned(4);
1449 
1450 struct mcx_rq_entry {
1451 	uint32_t		rqe_byte_count;
1452 	uint32_t		rqe_lkey;
1453 	uint64_t		rqe_addr;
1454 } __packed __aligned(16);
1455 
1456 struct mcx_cmd_create_rq_in {
1457 	uint16_t		cmd_opcode;
1458 	uint8_t			cmd_reserved0[4];
1459 	uint16_t		cmd_op_mod;
1460 	uint8_t			cmd_reserved1[8];
1461 } __packed __aligned(4);
1462 
1463 struct mcx_cmd_create_rq_out {
1464 	uint8_t			cmd_status;
1465 	uint8_t			cmd_reserved0[3];
1466 	uint32_t		cmd_syndrome;
1467 	uint32_t		cmd_rqn;
1468 	uint8_t			cmd_reserved1[4];
1469 } __packed __aligned(4);
1470 
1471 struct mcx_cmd_modify_rq_in {
1472 	uint16_t		cmd_opcode;
1473 	uint8_t			cmd_reserved0[4];
1474 	uint16_t		cmd_op_mod;
1475 	uint32_t		cmd_rq_state;
1476 	uint8_t			cmd_reserved1[4];
1477 } __packed __aligned(4);
1478 
1479 struct mcx_cmd_modify_rq_mb_in {
1480 	uint32_t		cmd_modify_hi;
1481 	uint32_t		cmd_modify_lo;
1482 	uint8_t			cmd_reserved0[8];
1483 	struct mcx_rq_ctx	cmd_rq_ctx;
1484 } __packed __aligned(4);
1485 
1486 struct mcx_cmd_modify_rq_out {
1487 	uint8_t			cmd_status;
1488 	uint8_t			cmd_reserved0[3];
1489 	uint32_t		cmd_syndrome;
1490 	uint8_t			cmd_reserved1[8];
1491 } __packed __aligned(4);
1492 
1493 struct mcx_cmd_destroy_rq_in {
1494 	uint16_t		cmd_opcode;
1495 	uint8_t			cmd_reserved0[4];
1496 	uint16_t		cmd_op_mod;
1497 	uint32_t		cmd_rqn;
1498 	uint8_t			cmd_reserved1[4];
1499 } __packed __aligned(4);
1500 
1501 struct mcx_cmd_destroy_rq_out {
1502 	uint8_t			cmd_status;
1503 	uint8_t			cmd_reserved0[3];
1504 	uint32_t		cmd_syndrome;
1505 	uint8_t			cmd_reserved1[8];
1506 } __packed __aligned(4);
1507 
1508 struct mcx_cmd_create_flow_table_in {
1509 	uint16_t		cmd_opcode;
1510 	uint8_t			cmd_reserved0[4];
1511 	uint16_t		cmd_op_mod;
1512 	uint8_t			cmd_reserved1[8];
1513 } __packed __aligned(4);
1514 
1515 struct mcx_flow_table_ctx {
1516 	uint8_t			ft_miss_action;
1517 	uint8_t			ft_level;
1518 	uint8_t			ft_reserved0;
1519 	uint8_t			ft_log_size;
1520 	uint32_t		ft_table_miss_id;
1521 	uint8_t			ft_reserved1[28];
1522 } __packed __aligned(4);
1523 
1524 struct mcx_cmd_create_flow_table_mb_in {
1525 	uint8_t			cmd_table_type;
1526 	uint8_t			cmd_reserved0[7];
1527 	struct mcx_flow_table_ctx cmd_ctx;
1528 } __packed __aligned(4);
1529 
1530 struct mcx_cmd_create_flow_table_out {
1531 	uint8_t			cmd_status;
1532 	uint8_t			cmd_reserved0[3];
1533 	uint32_t		cmd_syndrome;
1534 	uint32_t		cmd_table_id;
1535 	uint8_t			cmd_reserved1[4];
1536 } __packed __aligned(4);
1537 
1538 struct mcx_cmd_destroy_flow_table_in {
1539 	uint16_t		cmd_opcode;
1540 	uint8_t			cmd_reserved0[4];
1541 	uint16_t		cmd_op_mod;
1542 	uint8_t			cmd_reserved1[8];
1543 } __packed __aligned(4);
1544 
1545 struct mcx_cmd_destroy_flow_table_mb_in {
1546 	uint8_t			cmd_table_type;
1547 	uint8_t			cmd_reserved0[3];
1548 	uint32_t		cmd_table_id;
1549 	uint8_t			cmd_reserved1[40];
1550 } __packed __aligned(4);
1551 
1552 struct mcx_cmd_destroy_flow_table_out {
1553 	uint8_t			cmd_status;
1554 	uint8_t			cmd_reserved0[3];
1555 	uint32_t		cmd_syndrome;
1556 	uint8_t			cmd_reserved1[8];
1557 } __packed __aligned(4);
1558 
1559 struct mcx_cmd_set_flow_table_root_in {
1560 	uint16_t		cmd_opcode;
1561 	uint8_t			cmd_reserved0[4];
1562 	uint16_t		cmd_op_mod;
1563 	uint8_t			cmd_reserved1[8];
1564 } __packed __aligned(4);
1565 
1566 struct mcx_cmd_set_flow_table_root_mb_in {
1567 	uint8_t			cmd_table_type;
1568 	uint8_t			cmd_reserved0[3];
1569 	uint32_t		cmd_table_id;
1570 	uint8_t			cmd_reserved1[56];
1571 } __packed __aligned(4);
1572 
1573 struct mcx_cmd_set_flow_table_root_out {
1574 	uint8_t			cmd_status;
1575 	uint8_t			cmd_reserved0[3];
1576 	uint32_t		cmd_syndrome;
1577 	uint8_t			cmd_reserved1[8];
1578 } __packed __aligned(4);
1579 
1580 struct mcx_flow_match {
1581 	/* outer headers */
1582 	uint8_t			mc_src_mac[6];
1583 	uint16_t		mc_ethertype;
1584 	uint8_t			mc_dest_mac[6];
1585 	uint16_t		mc_first_vlan;
1586 	uint8_t			mc_ip_proto;
1587 	uint8_t			mc_ip_dscp_ecn;
1588 	uint8_t			mc_vlan_flags;
1589 	uint8_t			mc_tcp_flags;
1590 	uint16_t		mc_tcp_sport;
1591 	uint16_t		mc_tcp_dport;
1592 	uint32_t		mc_reserved0;
1593 	uint16_t		mc_udp_sport;
1594 	uint16_t		mc_udp_dport;
1595 	uint8_t			mc_src_ip[16];
1596 	uint8_t			mc_dest_ip[16];
1597 
1598 	/* misc parameters */
1599 	uint8_t			mc_reserved1[8];
1600 	uint16_t		mc_second_vlan;
1601 	uint8_t			mc_reserved2[2];
1602 	uint8_t			mc_second_vlan_flags;
1603 	uint8_t			mc_reserved3[15];
1604 	uint32_t		mc_outer_ipv6_flow_label;
1605 	uint8_t			mc_reserved4[32];
1606 
1607 	uint8_t			mc_reserved[384];
1608 } __packed __aligned(4);
1609 
1610 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1611 
1612 struct mcx_cmd_create_flow_group_in {
1613 	uint16_t		cmd_opcode;
1614 	uint8_t			cmd_reserved0[4];
1615 	uint16_t		cmd_op_mod;
1616 	uint8_t			cmd_reserved1[8];
1617 } __packed __aligned(4);
1618 
1619 struct mcx_cmd_create_flow_group_mb_in {
1620 	uint8_t			cmd_table_type;
1621 	uint8_t			cmd_reserved0[3];
1622 	uint32_t		cmd_table_id;
1623 	uint8_t			cmd_reserved1[4];
1624 	uint32_t		cmd_start_flow_index;
1625 	uint8_t			cmd_reserved2[4];
1626 	uint32_t		cmd_end_flow_index;
1627 	uint8_t			cmd_reserved3[23];
1628 	uint8_t			cmd_match_criteria_enable;
1629 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
1630 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
1631 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
1632 	struct mcx_flow_match	cmd_match_criteria;
1633 	uint8_t			cmd_reserved4[448];
1634 } __packed __aligned(4);
1635 
1636 struct mcx_cmd_create_flow_group_out {
1637 	uint8_t			cmd_status;
1638 	uint8_t			cmd_reserved0[3];
1639 	uint32_t		cmd_syndrome;
1640 	uint32_t		cmd_group_id;
1641 	uint8_t			cmd_reserved1[4];
1642 } __packed __aligned(4);
1643 
1644 struct mcx_flow_ctx {
1645 	uint8_t			fc_reserved0[4];
1646 	uint32_t		fc_group_id;
1647 	uint32_t		fc_flow_tag;
1648 	uint32_t		fc_action;
1649 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
1650 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
1651 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
1652 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
1653 	uint32_t		fc_dest_list_size;
1654 	uint32_t		fc_counter_list_size;
1655 	uint8_t			fc_reserved1[40];
1656 	struct mcx_flow_match	fc_match_value;
1657 	uint8_t			fc_reserved2[192];
1658 } __packed __aligned(4);
1659 
1660 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
1661 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
1662 
1663 struct mcx_cmd_destroy_flow_group_in {
1664 	uint16_t		cmd_opcode;
1665 	uint8_t			cmd_reserved0[4];
1666 	uint16_t		cmd_op_mod;
1667 	uint8_t			cmd_reserved1[8];
1668 } __packed __aligned(4);
1669 
1670 struct mcx_cmd_destroy_flow_group_mb_in {
1671 	uint8_t			cmd_table_type;
1672 	uint8_t			cmd_reserved0[3];
1673 	uint32_t		cmd_table_id;
1674 	uint32_t		cmd_group_id;
1675 	uint8_t			cmd_reserved1[36];
1676 } __packed __aligned(4);
1677 
1678 struct mcx_cmd_destroy_flow_group_out {
1679 	uint8_t			cmd_status;
1680 	uint8_t			cmd_reserved0[3];
1681 	uint32_t		cmd_syndrome;
1682 	uint8_t			cmd_reserved1[8];
1683 } __packed __aligned(4);
1684 
1685 struct mcx_cmd_set_flow_table_entry_in {
1686 	uint16_t		cmd_opcode;
1687 	uint8_t			cmd_reserved0[4];
1688 	uint16_t		cmd_op_mod;
1689 	uint8_t			cmd_reserved1[8];
1690 } __packed __aligned(4);
1691 
1692 struct mcx_cmd_set_flow_table_entry_mb_in {
1693 	uint8_t			cmd_table_type;
1694 	uint8_t			cmd_reserved0[3];
1695 	uint32_t		cmd_table_id;
1696 	uint32_t		cmd_modify_enable_mask;
1697 	uint8_t			cmd_reserved1[4];
1698 	uint32_t		cmd_flow_index;
1699 	uint8_t			cmd_reserved2[28];
1700 	struct mcx_flow_ctx	cmd_flow_ctx;
1701 } __packed __aligned(4);
1702 
1703 struct mcx_cmd_set_flow_table_entry_out {
1704 	uint8_t			cmd_status;
1705 	uint8_t			cmd_reserved0[3];
1706 	uint32_t		cmd_syndrome;
1707 	uint8_t			cmd_reserved1[8];
1708 } __packed __aligned(4);
1709 
1710 struct mcx_cmd_query_flow_table_entry_in {
1711 	uint16_t		cmd_opcode;
1712 	uint8_t			cmd_reserved0[4];
1713 	uint16_t		cmd_op_mod;
1714 	uint8_t			cmd_reserved1[8];
1715 } __packed __aligned(4);
1716 
1717 struct mcx_cmd_query_flow_table_entry_mb_in {
1718 	uint8_t			cmd_table_type;
1719 	uint8_t			cmd_reserved0[3];
1720 	uint32_t		cmd_table_id;
1721 	uint8_t			cmd_reserved1[8];
1722 	uint32_t		cmd_flow_index;
1723 	uint8_t			cmd_reserved2[28];
1724 } __packed __aligned(4);
1725 
1726 struct mcx_cmd_query_flow_table_entry_out {
1727 	uint8_t			cmd_status;
1728 	uint8_t			cmd_reserved0[3];
1729 	uint32_t		cmd_syndrome;
1730 	uint8_t			cmd_reserved1[8];
1731 } __packed __aligned(4);
1732 
1733 struct mcx_cmd_query_flow_table_entry_mb_out {
1734 	uint8_t			cmd_reserved0[48];
1735 	struct mcx_flow_ctx	cmd_flow_ctx;
1736 } __packed __aligned(4);
1737 
1738 struct mcx_cmd_delete_flow_table_entry_in {
1739 	uint16_t		cmd_opcode;
1740 	uint8_t			cmd_reserved0[4];
1741 	uint16_t		cmd_op_mod;
1742 	uint8_t			cmd_reserved1[8];
1743 } __packed __aligned(4);
1744 
1745 struct mcx_cmd_delete_flow_table_entry_mb_in {
1746 	uint8_t			cmd_table_type;
1747 	uint8_t			cmd_reserved0[3];
1748 	uint32_t		cmd_table_id;
1749 	uint8_t			cmd_reserved1[8];
1750 	uint32_t		cmd_flow_index;
1751 	uint8_t			cmd_reserved2[28];
1752 } __packed __aligned(4);
1753 
1754 struct mcx_cmd_delete_flow_table_entry_out {
1755 	uint8_t			cmd_status;
1756 	uint8_t			cmd_reserved0[3];
1757 	uint32_t		cmd_syndrome;
1758 	uint8_t			cmd_reserved1[8];
1759 } __packed __aligned(4);
1760 
1761 struct mcx_cmd_query_flow_group_in {
1762 	uint16_t		cmd_opcode;
1763 	uint8_t			cmd_reserved0[4];
1764 	uint16_t		cmd_op_mod;
1765 	uint8_t			cmd_reserved1[8];
1766 } __packed __aligned(4);
1767 
1768 struct mcx_cmd_query_flow_group_mb_in {
1769 	uint8_t			cmd_table_type;
1770 	uint8_t			cmd_reserved0[3];
1771 	uint32_t		cmd_table_id;
1772 	uint32_t		cmd_group_id;
1773 	uint8_t			cmd_reserved1[36];
1774 } __packed __aligned(4);
1775 
1776 struct mcx_cmd_query_flow_group_out {
1777 	uint8_t			cmd_status;
1778 	uint8_t			cmd_reserved0[3];
1779 	uint32_t		cmd_syndrome;
1780 	uint8_t			cmd_reserved1[8];
1781 } __packed __aligned(4);
1782 
1783 struct mcx_cmd_query_flow_group_mb_out {
1784 	uint8_t			cmd_reserved0[12];
1785 	uint32_t		cmd_start_flow_index;
1786 	uint8_t			cmd_reserved1[4];
1787 	uint32_t		cmd_end_flow_index;
1788 	uint8_t			cmd_reserved2[20];
1789 	uint32_t		cmd_match_criteria_enable;
1790 	uint8_t			cmd_match_criteria[512];
1791 	uint8_t			cmd_reserved4[448];
1792 } __packed __aligned(4);
1793 
1794 struct mcx_cmd_query_flow_table_in {
1795 	uint16_t		cmd_opcode;
1796 	uint8_t			cmd_reserved0[4];
1797 	uint16_t		cmd_op_mod;
1798 	uint8_t			cmd_reserved1[8];
1799 } __packed __aligned(4);
1800 
1801 struct mcx_cmd_query_flow_table_mb_in {
1802 	uint8_t			cmd_table_type;
1803 	uint8_t			cmd_reserved0[3];
1804 	uint32_t		cmd_table_id;
1805 	uint8_t			cmd_reserved1[40];
1806 } __packed __aligned(4);
1807 
1808 struct mcx_cmd_query_flow_table_out {
1809 	uint8_t			cmd_status;
1810 	uint8_t			cmd_reserved0[3];
1811 	uint32_t		cmd_syndrome;
1812 	uint8_t			cmd_reserved1[8];
1813 } __packed __aligned(4);
1814 
1815 struct mcx_cmd_query_flow_table_mb_out {
1816 	uint8_t			cmd_reserved0[4];
1817 	struct mcx_flow_table_ctx cmd_ctx;
1818 } __packed __aligned(4);
1819 
1820 struct mcx_cmd_alloc_flow_counter_in {
1821 	uint16_t		cmd_opcode;
1822 	uint8_t			cmd_reserved0[4];
1823 	uint16_t		cmd_op_mod;
1824 	uint8_t			cmd_reserved1[8];
1825 } __packed __aligned(4);
1826 
1827 struct mcx_cmd_query_rq_in {
1828 	uint16_t		cmd_opcode;
1829 	uint8_t			cmd_reserved0[4];
1830 	uint16_t		cmd_op_mod;
1831 	uint32_t		cmd_rqn;
1832 	uint8_t			cmd_reserved1[4];
1833 } __packed __aligned(4);
1834 
1835 struct mcx_cmd_query_rq_out {
1836 	uint8_t			cmd_status;
1837 	uint8_t			cmd_reserved0[3];
1838 	uint32_t		cmd_syndrome;
1839 	uint8_t			cmd_reserved1[8];
1840 } __packed __aligned(4);
1841 
1842 struct mcx_cmd_query_rq_mb_out {
1843 	uint8_t			cmd_reserved0[16];
1844 	struct mcx_rq_ctx	cmd_ctx;
1845 };
1846 
1847 struct mcx_cmd_query_sq_in {
1848 	uint16_t		cmd_opcode;
1849 	uint8_t			cmd_reserved0[4];
1850 	uint16_t		cmd_op_mod;
1851 	uint32_t		cmd_sqn;
1852 	uint8_t			cmd_reserved1[4];
1853 } __packed __aligned(4);
1854 
1855 struct mcx_cmd_query_sq_out {
1856 	uint8_t			cmd_status;
1857 	uint8_t			cmd_reserved0[3];
1858 	uint32_t		cmd_syndrome;
1859 	uint8_t			cmd_reserved1[8];
1860 } __packed __aligned(4);
1861 
1862 struct mcx_cmd_query_sq_mb_out {
1863 	uint8_t			cmd_reserved0[16];
1864 	struct mcx_sq_ctx	cmd_ctx;
1865 };
1866 
1867 struct mcx_cmd_alloc_flow_counter_out {
1868 	uint8_t			cmd_status;
1869 	uint8_t			cmd_reserved0[3];
1870 	uint32_t		cmd_syndrome;
1871 	uint8_t			cmd_reserved1[2];
1872 	uint16_t		cmd_flow_counter_id;
1873 	uint8_t			cmd_reserved2[4];
1874 } __packed __aligned(4);
1875 
1876 struct mcx_wq_doorbell {
1877 	uint32_t		 db_recv_counter;
1878 	uint32_t		 db_send_counter;
1879 } __packed __aligned(8);
1880 
1881 struct mcx_dmamem {
1882 	bus_dmamap_t		 mxm_map;
1883 	bus_dma_segment_t	 mxm_seg;
1884 	int			 mxm_nsegs;
1885 	size_t			 mxm_size;
1886 	void			*mxm_kva;
1887 };
1888 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
1889 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
1890 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
1891 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
1892 
1893 struct mcx_hwmem {
1894 	bus_dmamap_t		 mhm_map;
1895 	bus_dma_segment_t	*mhm_segs;
1896 	unsigned int		 mhm_seg_count;
1897 	unsigned int		 mhm_npages;
1898 };
1899 
1900 struct mcx_slot {
1901 	bus_dmamap_t		 ms_map;
1902 	struct mbuf		*ms_m;
1903 };
1904 
1905 struct mcx_cq {
1906 	int			 cq_n;
1907 	struct mcx_dmamem	 cq_mem;
1908 	uint32_t		*cq_doorbell;
1909 	uint32_t		 cq_cons;
1910 	uint32_t		 cq_count;
1911 };
1912 
1913 struct mcx_calibration {
1914 	uint64_t		 c_timestamp;	/* previous mcx chip time */
1915 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
1916 	uint64_t		 c_tbase;	/* mcx chip time */
1917 	uint64_t		 c_ubase;	/* kernel nanouptime */
1918 	uint64_t		 c_tdiff;
1919 	uint64_t		 c_udiff;
1920 };
1921 
1922 #define MCX_CALIBRATE_FIRST    2
1923 #define MCX_CALIBRATE_NORMAL   30
1924 
1925 struct mcx_rxring {
1926 	u_int			 rxr_total;
1927 	u_int			 rxr_inuse;
1928 };
1929 
1930 MBUFQ_HEAD(mcx_mbufq);
1931 
1932 struct mcx_softc {
1933 	device_t		 sc_dev;
1934 	struct ethercom		 sc_ec;
1935 	struct ifmedia		 sc_media;
1936 	uint64_t		 sc_media_status;
1937 	uint64_t		 sc_media_active;
1938 
1939 	pci_chipset_tag_t	 sc_pc;
1940 	pci_intr_handle_t	*sc_intrs;
1941 	void			*sc_ihs[MCX_MAX_NINTR];
1942 	pcitag_t		 sc_tag;
1943 
1944 	bus_dma_tag_t		 sc_dmat;
1945 	bus_space_tag_t		 sc_memt;
1946 	bus_space_handle_t	 sc_memh;
1947 	bus_size_t		 sc_mems;
1948 
1949 	struct mcx_dmamem	 sc_cmdq_mem;
1950 	unsigned int		 sc_cmdq_mask;
1951 	unsigned int		 sc_cmdq_size;
1952 
1953 	unsigned int		 sc_cmdq_token;
1954 
1955 	struct mcx_hwmem	 sc_boot_pages;
1956 	struct mcx_hwmem	 sc_init_pages;
1957 	struct mcx_hwmem	 sc_regular_pages;
1958 
1959 	int			 sc_uar;
1960 	int			 sc_pd;
1961 	int			 sc_tdomain;
1962 	uint32_t		 sc_lkey;
1963 
1964 	struct mcx_dmamem	 sc_doorbell_mem;
1965 
1966 	int			 sc_eqn;
1967 	int			 sc_eq_cons;
1968 	struct mcx_dmamem	 sc_eq_mem;
1969 	int			 sc_hardmtu;
1970 
1971 	struct workqueue	*sc_workq;
1972 	struct work		 sc_port_change;
1973 
1974 	int			 sc_flow_table_id;
1975 #define MCX_FLOW_GROUP_PROMISC	 0
1976 #define MCX_FLOW_GROUP_ALLMULTI	 1
1977 #define MCX_FLOW_GROUP_MAC	 2
1978 #define MCX_NUM_FLOW_GROUPS	 3
1979 	int			 sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1980 	int			 sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1981 	int			 sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1982 	int			 sc_promisc_flow_enabled;
1983 	int			 sc_allmulti_flow_enabled;
1984 	int			 sc_mcast_flow_base;
1985 	int			 sc_extra_mcast;
1986 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1987 
1988 	struct mcx_calibration	 sc_calibration[2];
1989 	unsigned int		 sc_calibration_gen;
1990 	callout_t		 sc_calibrate;
1991 
1992 	struct mcx_cq		 sc_cq[MCX_MAX_CQS];
1993 	int			 sc_num_cq;
1994 
1995 	/* rx */
1996 	int			 sc_tirn;
1997 	int			 sc_rqn;
1998 	struct mcx_dmamem	 sc_rq_mem;
1999 	struct mcx_slot		*sc_rx_slots;
2000 	uint32_t		*sc_rx_doorbell;
2001 
2002 	uint32_t		 sc_rx_prod;
2003 	callout_t		 sc_rx_refill;
2004 	struct mcx_rxring	 sc_rxr;
2005 
2006 	/* tx */
2007 	int			 sc_tisn;
2008 	int			 sc_sqn;
2009 	struct mcx_dmamem	 sc_sq_mem;
2010 	struct mcx_slot		*sc_tx_slots;
2011 	uint32_t		*sc_tx_doorbell;
2012 	int			 sc_bf_size;
2013 	int			 sc_bf_offset;
2014 
2015 	uint32_t		 sc_tx_cons;
2016 	uint32_t		 sc_tx_prod;
2017 
2018 	uint64_t		 sc_last_cq_db;
2019 	uint64_t		 sc_last_srq_db;
2020 };
2021 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2022 
2023 static int	mcx_match(device_t, cfdata_t, void *);
2024 static void	mcx_attach(device_t, device_t, void *);
2025 
2026 static void	mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2027 static u_int	mcx_rxr_get(struct mcx_rxring *, u_int);
2028 static void	mcx_rxr_put(struct mcx_rxring *, u_int);
2029 static u_int	mcx_rxr_inuse(struct mcx_rxring *);
2030 
2031 static int	mcx_version(struct mcx_softc *);
2032 static int	mcx_init_wait(struct mcx_softc *);
2033 static int	mcx_enable_hca(struct mcx_softc *);
2034 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
2035 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2036 		    int);
2037 static int	mcx_issi(struct mcx_softc *);
2038 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2039 static int	mcx_hca_max_caps(struct mcx_softc *);
2040 static int	mcx_hca_set_caps(struct mcx_softc *);
2041 static int	mcx_init_hca(struct mcx_softc *);
2042 static int	mcx_set_driver_version(struct mcx_softc *);
2043 static int	mcx_iff(struct mcx_softc *);
2044 static int	mcx_alloc_uar(struct mcx_softc *);
2045 static int	mcx_alloc_pd(struct mcx_softc *);
2046 static int	mcx_alloc_tdomain(struct mcx_softc *);
2047 static int	mcx_create_eq(struct mcx_softc *);
2048 static int	mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2049 static int	mcx_query_special_contexts(struct mcx_softc *);
2050 static int	mcx_set_port_mtu(struct mcx_softc *, int);
2051 static int	mcx_create_cq(struct mcx_softc *, int);
2052 static int	mcx_destroy_cq(struct mcx_softc *, int);
2053 static int	mcx_create_sq(struct mcx_softc *, int);
2054 static int	mcx_destroy_sq(struct mcx_softc *);
2055 static int	mcx_ready_sq(struct mcx_softc *);
2056 static int	mcx_create_rq(struct mcx_softc *, int);
2057 static int	mcx_destroy_rq(struct mcx_softc *);
2058 static int	mcx_ready_rq(struct mcx_softc *);
2059 static int	mcx_create_tir(struct mcx_softc *);
2060 static int	mcx_destroy_tir(struct mcx_softc *);
2061 static int	mcx_create_tis(struct mcx_softc *);
2062 static int	mcx_destroy_tis(struct mcx_softc *);
2063 static int	mcx_create_flow_table(struct mcx_softc *, int);
2064 static int	mcx_set_flow_table_root(struct mcx_softc *);
2065 static int	mcx_destroy_flow_table(struct mcx_softc *);
2066 static int	mcx_create_flow_group(struct mcx_softc *, int, int,
2067 		    int, int, struct mcx_flow_match *);
2068 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
2069 static int	mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2070 		    const uint8_t *);
2071 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2072 
2073 #if 0
2074 static int	mcx_dump_flow_table(struct mcx_softc *);
2075 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int);
2076 static int	mcx_dump_flow_group(struct mcx_softc *);
2077 static int	mcx_dump_rq(struct mcx_softc *);
2078 static int	mcx_dump_sq(struct mcx_softc *);
2079 #endif
2080 
2081 
2082 /*
2083 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2084 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2085 */
2086 static void	mcx_refill(void *);
2087 static int	mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2088 		    struct mcx_mbufq *, const struct mcx_calibration *);
2089 static void	mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2090 		    int *);
2091 static void	mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2092 
2093 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2094 static void	mcx_arm_eq(struct mcx_softc *);
2095 static int	mcx_intr(void *);
2096 
2097 static int	mcx_init(struct ifnet *);
2098 static void	mcx_stop(struct ifnet *, int);
2099 static int	mcx_ioctl(struct ifnet *, u_long, void *);
2100 static void	mcx_start(struct ifnet *);
2101 static void	mcx_watchdog(struct ifnet *);
2102 static void	mcx_media_add_types(struct mcx_softc *);
2103 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
2104 static int	mcx_media_change(struct ifnet *);
2105 #if 0
2106 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2107 #endif
2108 static void	mcx_port_change(struct work *, void *);
2109 
2110 static void	mcx_calibrate_first(struct mcx_softc *);
2111 static void	mcx_calibrate(void *);
2112 
2113 static inline uint32_t
2114 		mcx_rd(struct mcx_softc *, bus_size_t);
2115 static inline void
2116 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2117 static inline void
2118 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2119 
2120 static uint64_t	mcx_timer(struct mcx_softc *);
2121 
2122 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2123 		    bus_size_t, u_int align);
2124 static void	mcx_dmamem_zero(struct mcx_dmamem *);
2125 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2126 
2127 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2128 		    unsigned int);
2129 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2130 
2131 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2132 
2133 static const struct {
2134 	pci_vendor_id_t		vendor;
2135 	pci_product_id_t	product;
2136 } mcx_devices[] = {
2137 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
2138 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
2139 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
2140 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
2141 };
2142 
2143 static const uint64_t mcx_eth_cap_map[] = {
2144 	IFM_1000_SGMII,
2145 	IFM_1000_KX,
2146 	IFM_10G_CX4,
2147 	IFM_10G_KX4,
2148 	IFM_10G_KR,
2149 	0,
2150 	IFM_40G_CR4,
2151 	IFM_40G_KR4,
2152 	0,
2153 	0,
2154 	0,
2155 	0,
2156 	IFM_10G_T,
2157 	IFM_10G_SR,
2158 	IFM_10G_LR,
2159 	IFM_40G_SR4,
2160 	IFM_40G_LR4,
2161 	0,
2162 	IFM_50G_SR2,
2163 	0,
2164 	IFM_100G_CR4,
2165 	IFM_100G_SR4,
2166 	IFM_100G_KR4,
2167 	0,
2168 	0,
2169 	0,
2170 	0,
2171 	IFM_25G_CR,
2172 	IFM_25G_KR,
2173 	IFM_25G_SR,
2174 	IFM_50G_CR2,
2175 	IFM_50G_KR2
2176 };
2177 
2178 static int
2179 mcx_match(device_t parent, cfdata_t cf, void *aux)
2180 {
2181 	struct pci_attach_args *pa = aux;
2182 	int n;
2183 
2184 	for (n = 0; n < __arraycount(mcx_devices); n++) {
2185 		if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2186 		    PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2187 			return 1;
2188 	}
2189 
2190 	return 0;
2191 }
2192 
2193 void
2194 mcx_attach(device_t parent, device_t self, void *aux)
2195 {
2196 	struct mcx_softc *sc = device_private(self);
2197 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2198 	struct pci_attach_args *pa = aux;
2199 	uint8_t enaddr[ETHER_ADDR_LEN];
2200 	int counts[PCI_INTR_TYPE_SIZE];
2201 	char intrbuf[PCI_INTRSTR_LEN];
2202 	pcireg_t memtype;
2203 	uint32_t r;
2204 	unsigned int cq_stride;
2205 	unsigned int cq_size;
2206 	const char *intrstr;
2207 	int i;
2208 
2209 	sc->sc_dev = self;
2210 	sc->sc_pc = pa->pa_pc;
2211 	sc->sc_tag = pa->pa_tag;
2212 	sc->sc_dmat = pa->pa_dmat;
2213 
2214 	/* Map the PCI memory space */
2215 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2216 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2217 	    0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2218 	    NULL, &sc->sc_mems)) {
2219 		aprint_error(": unable to map register memory\n");
2220 		return;
2221 	}
2222 
2223 	pci_aprint_devinfo(pa, "Ethernet controller");
2224 
2225 	if (mcx_version(sc) != 0) {
2226 		/* error printed by mcx_version */
2227 		goto unmap;
2228 	}
2229 
2230 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2231 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2232 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2233 	if (cq_size > MCX_MAX_CQE) {
2234 		aprint_error_dev(self,
2235 		    "command queue size overflow %u\n", cq_size);
2236 		goto unmap;
2237 	}
2238 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2239 		aprint_error_dev(self,
2240 		    "command queue entry size underflow %u\n", cq_stride);
2241 		goto unmap;
2242 	}
2243 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2244 		aprint_error_dev(self, "command queue page overflow\n");
2245 		goto unmap;
2246 	}
2247 
2248 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2249 	    MCX_PAGE_SIZE) != 0) {
2250 		aprint_error_dev(self, "unable to allocate doorbell memory\n");
2251 		goto unmap;
2252 	}
2253 
2254 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2255 	    MCX_PAGE_SIZE) != 0) {
2256 		aprint_error_dev(self, "unable to allocate command queue\n");
2257 		goto dbfree;
2258 	}
2259 
2260 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2261 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2262 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2263 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2264 
2265 	if (mcx_init_wait(sc) != 0) {
2266 		aprint_error_dev(self, "timeout waiting for init\n");
2267 		goto cqfree;
2268 	}
2269 
2270 	sc->sc_cmdq_mask = cq_size - 1;
2271 	sc->sc_cmdq_size = cq_stride;
2272 
2273 	if (mcx_enable_hca(sc) != 0) {
2274 		/* error printed by mcx_enable_hca */
2275 		goto cqfree;
2276 	}
2277 
2278 	if (mcx_issi(sc) != 0) {
2279 		/* error printed by mcx_issi */
2280 		goto teardown;
2281 	}
2282 
2283 	if (mcx_pages(sc, &sc->sc_boot_pages,
2284 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2285 		/* error printed by mcx_pages */
2286 		goto teardown;
2287 	}
2288 
2289 	if (mcx_hca_max_caps(sc) != 0) {
2290 		/* error printed by mcx_hca_max_caps */
2291 		goto teardown;
2292 	}
2293 
2294 	if (mcx_hca_set_caps(sc) != 0) {
2295 		/* error printed by mcx_hca_set_caps */
2296 		goto teardown;
2297 	}
2298 
2299 	if (mcx_pages(sc, &sc->sc_init_pages,
2300 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2301 		/* error printed by mcx_pages */
2302 		goto teardown;
2303 	}
2304 
2305 	if (mcx_init_hca(sc) != 0) {
2306 		/* error printed by mcx_init_hca */
2307 		goto teardown;
2308 	}
2309 
2310 	if (mcx_pages(sc, &sc->sc_regular_pages,
2311 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2312 		/* error printed by mcx_pages */
2313 		goto teardown;
2314 	}
2315 
2316 	/* apparently not necessary? */
2317 	if (mcx_set_driver_version(sc) != 0) {
2318 		/* error printed by mcx_set_driver_version */
2319 		goto teardown;
2320 	}
2321 
2322 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
2323 		/* error printed by mcx_iff? */
2324 		goto teardown;
2325 	}
2326 
2327 	if (mcx_alloc_uar(sc) != 0) {
2328 		/* error printed by mcx_alloc_uar */
2329 		goto teardown;
2330 	}
2331 
2332 	if (mcx_alloc_pd(sc) != 0) {
2333 		/* error printed by mcx_alloc_pd */
2334 		goto teardown;
2335 	}
2336 
2337 	if (mcx_alloc_tdomain(sc) != 0) {
2338 		/* error printed by mcx_alloc_tdomain */
2339 		goto teardown;
2340 	}
2341 
2342 	/*
2343 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2344 	 * mellanox support tells me legacy interrupts are not supported,
2345 	 * so we're stuck with just msi-x.
2346 	 */
2347 	counts[PCI_INTR_TYPE_MSIX] = 1;
2348 	counts[PCI_INTR_TYPE_MSI] = 0;
2349 	counts[PCI_INTR_TYPE_INTX] = 0;
2350 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2351 		aprint_error_dev(self, "unable to allocate interrupt\n");
2352 		goto teardown;
2353 	}
2354 	KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2355 
2356 #ifdef MCX_MPSAFE
2357 	pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2358 #endif
2359 
2360 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2361 	    sizeof(intrbuf));
2362 	sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2363 	    IPL_NET, mcx_intr, sc, DEVNAME(sc));
2364 	if (sc->sc_ihs[0] == NULL) {
2365 		aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2366 		    intrstr ? " at " : "",
2367 		    intrstr ? intrstr : "");
2368 		goto teardown;
2369 	}
2370 
2371 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2372 
2373 	if (mcx_create_eq(sc) != 0) {
2374 		/* error printed by mcx_create_eq */
2375 		goto teardown;
2376 	}
2377 
2378 	if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2379 		/* error printed by mcx_query_nic_vport_context */
2380 		goto teardown;
2381 	}
2382 
2383 	if (mcx_query_special_contexts(sc) != 0) {
2384 		/* error printed by mcx_query_special_contexts */
2385 		goto teardown;
2386 	}
2387 
2388 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2389 		/* error printed by mcx_set_port_mtu */
2390 		goto teardown;
2391 	}
2392 
2393 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2394 	    ether_sprintf(enaddr));
2395 
2396 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2397 	ifp->if_softc = sc;
2398 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2399 #ifdef MCX_MPSAFE
2400 	ifp->if_extflags = IFEF_MPSAFE;
2401 #endif
2402 	ifp->if_init = mcx_init;
2403 	ifp->if_stop = mcx_stop;
2404 	ifp->if_ioctl = mcx_ioctl;
2405 	ifp->if_start = mcx_start;
2406 	ifp->if_watchdog = mcx_watchdog;
2407 	ifp->if_mtu = sc->sc_hardmtu;
2408 	IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2409 	IFQ_SET_READY(&ifp->if_snd);
2410 
2411 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2412 
2413 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
2414 	ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2415 	    mcx_media_status);
2416 	mcx_media_add_types(sc);
2417 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2418 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2419 
2420 	if_attach(ifp);
2421 	if_deferred_start_init(ifp, NULL);
2422 
2423 	ether_ifattach(ifp, enaddr);
2424 
2425 	callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2426 	callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2427 	callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2428 	callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2429 
2430 	if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2431 	    PRI_NONE, IPL_NET, 0) != 0) {
2432 		aprint_error_dev(self, "couldn't create port change workq\n");
2433 		goto teardown;
2434 	}
2435 
2436 	mcx_port_change(&sc->sc_port_change, sc);
2437 
2438 	sc->sc_flow_table_id = -1;
2439 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2440 		sc->sc_flow_group_id[i] = -1;
2441 		sc->sc_flow_group_size[i] = 0;
2442 		sc->sc_flow_group_start[i] = 0;
2443 	}
2444 	sc->sc_extra_mcast = 0;
2445 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2446 	return;
2447 
2448 teardown:
2449 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2450 	/* error printed by mcx_teardown_hca, and we're already unwinding */
2451 cqfree:
2452 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2453 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2454 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2455 	    MCX_CMDQ_INTERFACE_DISABLED);
2456 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2457 
2458 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2459 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2460 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2461 
2462 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2463 dbfree:
2464 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2465 unmap:
2466 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2467 	sc->sc_mems = 0;
2468 }
2469 
2470 static void
2471 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2472 {
2473 	rxr->rxr_total = hwm;
2474 	rxr->rxr_inuse = 0;
2475 }
2476 
2477 static u_int
2478 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2479 {
2480 	const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2481 
2482 	rxr->rxr_inuse += taken;
2483 
2484 	return taken;
2485 }
2486 
2487 static void
2488 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2489 {
2490 	rxr->rxr_inuse -= n;
2491 }
2492 
2493 static u_int
2494 mcx_rxr_inuse(struct mcx_rxring *rxr)
2495 {
2496 	return rxr->rxr_inuse;
2497 }
2498 
2499 static int
2500 mcx_version(struct mcx_softc *sc)
2501 {
2502 	uint32_t fw0, fw1;
2503 	uint16_t cmdif;
2504 
2505 	fw0 = mcx_rd(sc, MCX_FW_VER);
2506 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2507 
2508 	aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2509 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2510 
2511 	cmdif = MCX_CMDIF(fw1);
2512 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
2513 		aprint_error_dev(sc->sc_dev,
2514 		    "unsupported command interface %u\n", cmdif);
2515 		return (-1);
2516 	}
2517 
2518 	return (0);
2519 }
2520 
2521 static int
2522 mcx_init_wait(struct mcx_softc *sc)
2523 {
2524 	unsigned int i;
2525 	uint32_t r;
2526 
2527 	for (i = 0; i < 2000; i++) {
2528 		r = mcx_rd(sc, MCX_STATE);
2529 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2530 			return (0);
2531 
2532 		delay(1000);
2533 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2534 		    BUS_SPACE_BARRIER_READ);
2535 	}
2536 
2537 	return (-1);
2538 }
2539 
2540 static uint8_t
2541 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2542     unsigned int msec)
2543 {
2544 	unsigned int i;
2545 
2546 	for (i = 0; i < msec; i++) {
2547 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2548 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2549 
2550 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2551 		    MCX_CQ_STATUS_OWN_SW) {
2552 			if (sc->sc_eqn != 0)
2553 				mcx_intr(sc);
2554 			return (0);
2555 		}
2556 
2557 		delay(1000);
2558 	}
2559 
2560 	return (ETIMEDOUT);
2561 }
2562 
2563 static uint32_t
2564 mcx_mix_u64(uint32_t xor, uint64_t u64)
2565 {
2566 	xor ^= u64 >> 32;
2567 	xor ^= u64;
2568 
2569 	return (xor);
2570 }
2571 
2572 static uint32_t
2573 mcx_mix_u32(uint32_t xor, uint32_t u32)
2574 {
2575 	xor ^= u32;
2576 
2577 	return (xor);
2578 }
2579 
2580 static uint32_t
2581 mcx_mix_u8(uint32_t xor, uint8_t u8)
2582 {
2583 	xor ^= u8;
2584 
2585 	return (xor);
2586 }
2587 
2588 static uint8_t
2589 mcx_mix_done(uint32_t xor)
2590 {
2591 	xor ^= xor >> 16;
2592 	xor ^= xor >> 8;
2593 
2594 	return (xor);
2595 }
2596 
2597 static uint8_t
2598 mcx_xor(const void *buf, size_t len)
2599 {
2600 	const uint32_t *dwords = buf;
2601 	uint32_t xor = 0xff;
2602 	size_t i;
2603 
2604 	len /= sizeof(*dwords);
2605 
2606 	for (i = 0; i < len; i++)
2607 		xor ^= dwords[i];
2608 
2609 	return (mcx_mix_done(xor));
2610 }
2611 
2612 static uint8_t
2613 mcx_cmdq_token(struct mcx_softc *sc)
2614 {
2615 	uint8_t token;
2616 
2617 	do {
2618 		token = ++sc->sc_cmdq_token;
2619 	} while (token == 0);
2620 
2621 	return (token);
2622 }
2623 
2624 static void
2625 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2626     uint32_t ilen, uint32_t olen, uint8_t token)
2627 {
2628 	memset(cqe, 0, sc->sc_cmdq_size);
2629 
2630 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2631 	be32enc(&cqe->cq_input_length, ilen);
2632 	be32enc(&cqe->cq_output_length, olen);
2633 	cqe->cq_token = token;
2634 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2635 }
2636 
2637 static void
2638 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2639 {
2640 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2641 }
2642 
2643 static int
2644 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2645 {
2646 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
2647 	return (0);
2648 }
2649 
2650 static void *
2651 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2652 {
2653 	return (&cqe->cq_input_data);
2654 }
2655 
2656 static void *
2657 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2658 {
2659 	return (&cqe->cq_output_data);
2660 }
2661 
2662 static void
2663 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2664     unsigned int slot)
2665 {
2666 	mcx_cmdq_sign(cqe);
2667 
2668 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2669 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2670 
2671 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2672 }
2673 
2674 static int
2675 mcx_enable_hca(struct mcx_softc *sc)
2676 {
2677 	struct mcx_cmdq_entry *cqe;
2678 	struct mcx_cmd_enable_hca_in *in;
2679 	struct mcx_cmd_enable_hca_out *out;
2680 	int error;
2681 	uint8_t status;
2682 
2683 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2684 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2685 
2686 	in = mcx_cmdq_in(cqe);
2687 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2688 	in->cmd_op_mod = htobe16(0);
2689 	in->cmd_function_id = htobe16(0);
2690 
2691 	mcx_cmdq_post(sc, cqe, 0);
2692 
2693 	error = mcx_cmdq_poll(sc, cqe, 1000);
2694 	if (error != 0) {
2695 		printf(", hca enable timeout\n");
2696 		return (-1);
2697 	}
2698 	if (mcx_cmdq_verify(cqe) != 0) {
2699 		printf(", hca enable command corrupt\n");
2700 		return (-1);
2701 	}
2702 
2703 	status = cqe->cq_output_data[0];
2704 	if (status != MCX_CQ_STATUS_OK) {
2705 		printf(", hca enable failed (%x)\n", status);
2706 		return (-1);
2707 	}
2708 
2709 	return (0);
2710 }
2711 
2712 static int
2713 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2714 {
2715 	struct mcx_cmdq_entry *cqe;
2716 	struct mcx_cmd_teardown_hca_in *in;
2717 	struct mcx_cmd_teardown_hca_out *out;
2718 	int error;
2719 	uint8_t status;
2720 
2721 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2722 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2723 
2724 	in = mcx_cmdq_in(cqe);
2725 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2726 	in->cmd_op_mod = htobe16(0);
2727 	in->cmd_profile = profile;
2728 
2729 	mcx_cmdq_post(sc, cqe, 0);
2730 
2731 	error = mcx_cmdq_poll(sc, cqe, 1000);
2732 	if (error != 0) {
2733 		printf(", hca teardown timeout\n");
2734 		return (-1);
2735 	}
2736 	if (mcx_cmdq_verify(cqe) != 0) {
2737 		printf(", hca teardown command corrupt\n");
2738 		return (-1);
2739 	}
2740 
2741 	status = cqe->cq_output_data[0];
2742 	if (status != MCX_CQ_STATUS_OK) {
2743 		printf(", hca teardown failed (%x)\n", status);
2744 		return (-1);
2745 	}
2746 
2747 	return (0);
2748 }
2749 
2750 static int
2751 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2752     unsigned int nmb, uint64_t *ptr, uint8_t token)
2753 {
2754 	uint8_t *kva;
2755 	uint64_t dva;
2756 	int i;
2757 	int error;
2758 
2759 	error = mcx_dmamem_alloc(sc, mxm,
2760 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2761 	if (error != 0)
2762 		return (error);
2763 
2764 	mcx_dmamem_zero(mxm);
2765 
2766 	dva = MCX_DMA_DVA(mxm);
2767 	kva = MCX_DMA_KVA(mxm);
2768 	for (i = 0; i < nmb; i++) {
2769 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2770 
2771 		/* patch the cqe or mbox pointing at this one */
2772 		be64enc(ptr, dva);
2773 
2774 		/* fill in this mbox */
2775 		be32enc(&mbox->mb_block_number, i);
2776 		mbox->mb_token = token;
2777 
2778 		/* move to the next one */
2779 		ptr = &mbox->mb_next_ptr;
2780 
2781 		dva += MCX_CMDQ_MAILBOX_SIZE;
2782 		kva += MCX_CMDQ_MAILBOX_SIZE;
2783 	}
2784 
2785 	return (0);
2786 }
2787 
2788 static uint32_t
2789 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2790 {
2791 	uint32_t xor = 0xff;
2792 
2793 	/* only 3 fields get set, so mix them directly */
2794 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2795 	xor = mcx_mix_u32(xor, mb->mb_block_number);
2796 	xor = mcx_mix_u8(xor, mb->mb_token);
2797 
2798 	return (mcx_mix_done(xor));
2799 }
2800 
2801 static void
2802 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2803 {
2804 	uint8_t *kva;
2805 	int i;
2806 
2807 	kva = MCX_DMA_KVA(mxm);
2808 
2809 	for (i = 0; i < nmb; i++) {
2810 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2811 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2812 		mb->mb_ctrl_signature = sig;
2813 		mb->mb_signature = sig ^
2814 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2815 
2816 		kva += MCX_CMDQ_MAILBOX_SIZE;
2817 	}
2818 }
2819 
2820 static void
2821 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2822 {
2823 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2824 	    0, MCX_DMA_LEN(mxm), ops);
2825 }
2826 
2827 static struct mcx_cmdq_mailbox *
2828 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2829 {
2830 	uint8_t *kva;
2831 
2832 	kva = MCX_DMA_KVA(mxm);
2833 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
2834 
2835 	return ((struct mcx_cmdq_mailbox *)kva);
2836 }
2837 
2838 static inline void *
2839 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2840 {
2841 	return (&mb->mb_data);
2842 }
2843 
2844 static void
2845 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2846     void *b, size_t len)
2847 {
2848 	uint8_t *buf = b;
2849 	struct mcx_cmdq_mailbox *mb;
2850 	int i;
2851 
2852 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2853 	for (i = 0; i < nmb; i++) {
2854 
2855 		memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2856 
2857 		if (sizeof(mb->mb_data) >= len)
2858 			break;
2859 
2860 		buf += sizeof(mb->mb_data);
2861 		len -= sizeof(mb->mb_data);
2862 		mb++;
2863 	}
2864 }
2865 
2866 static void
2867 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2868 {
2869 	uint8_t *buf = b;
2870 	struct mcx_cmdq_mailbox *mb;
2871 	int i;
2872 
2873 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2874 	for (i = 0; i < nmb; i++) {
2875 		memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2876 
2877 		if (sizeof(mb->mb_data) >= len)
2878 			break;
2879 
2880 		buf += sizeof(mb->mb_data);
2881 		len -= sizeof(mb->mb_data);
2882 		mb++;
2883 	}
2884 }
2885 
2886 static void
2887 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2888 {
2889 	mcx_dmamem_free(sc, mxm);
2890 }
2891 
2892 #if 0
2893 static void
2894 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2895 {
2896 	unsigned int i;
2897 
2898 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2899 	    be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2900 
2901 	printf(", idata ");
2902 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2903 		printf("%02x", cqe->cq_input_data[i]);
2904 
2905 	printf(", odata ");
2906 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2907 		printf("%02x", cqe->cq_output_data[i]);
2908 
2909 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2910 	    be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2911 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2912 }
2913 
2914 static void
2915 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2916 {
2917 	int i, j;
2918 	uint8_t *d;
2919 
2920 	for (i = 0; i < num; i++) {
2921 		struct mcx_cmdq_mailbox *mbox;
2922 		mbox = mcx_cq_mbox(mboxes, i);
2923 
2924 		d = mcx_cq_mbox_data(mbox);
2925 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2926 			if (j != 0 && (j % 16 == 0))
2927 				printf("\n");
2928 			printf("%.2x ", d[j]);
2929 		}
2930 	}
2931 }
2932 #endif
2933 
2934 static int
2935 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2936     int len)
2937 {
2938 	struct mcx_dmamem mxm;
2939 	struct mcx_cmdq_entry *cqe;
2940 	struct mcx_cmd_access_reg_in *in;
2941 	struct mcx_cmd_access_reg_out *out;
2942 	uint8_t token = mcx_cmdq_token(sc);
2943 	int error, nmb;
2944 
2945 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2946 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2947 	    token);
2948 
2949 	in = mcx_cmdq_in(cqe);
2950 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2951 	in->cmd_op_mod = htobe16(op);
2952 	in->cmd_register_id = htobe16(reg);
2953 
2954 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2955 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2956 		printf(", unable to allocate access reg mailboxen\n");
2957 		return (-1);
2958 	}
2959 	cqe->cq_input_ptr = cqe->cq_output_ptr;
2960 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
2961 	mcx_cmdq_mboxes_sign(&mxm, nmb);
2962 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
2963 
2964 	mcx_cmdq_post(sc, cqe, 0);
2965 	error = mcx_cmdq_poll(sc, cqe, 1000);
2966 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
2967 
2968 	if (error != 0) {
2969 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
2970 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
2971 		goto free;
2972 	}
2973 	error = mcx_cmdq_verify(cqe);
2974 	if (error != 0) {
2975 		printf("%s: access reg (%s %x) reply corrupt\n",
2976 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
2977 		    reg);
2978 		goto free;
2979 	}
2980 
2981 	out = mcx_cmdq_out(cqe);
2982 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
2983 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
2984 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
2985 		    reg, out->cmd_status, out->cmd_syndrome);
2986 		error = -1;
2987 		goto free;
2988 	}
2989 
2990 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
2991 free:
2992 	mcx_dmamem_free(sc, &mxm);
2993 
2994 	return (error);
2995 }
2996 
2997 static int
2998 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
2999 {
3000 	struct mcx_cmd_set_issi_in *in;
3001 	struct mcx_cmd_set_issi_out *out;
3002 	uint8_t status;
3003 
3004 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3005 
3006 	in = mcx_cmdq_in(cqe);
3007 	in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3008 	in->cmd_op_mod = htobe16(0);
3009 	in->cmd_current_issi = htobe16(MCX_ISSI);
3010 
3011 	mcx_cmdq_post(sc, cqe, slot);
3012 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3013 		return (-1);
3014 	if (mcx_cmdq_verify(cqe) != 0)
3015 		return (-1);
3016 
3017 	status = cqe->cq_output_data[0];
3018 	if (status != MCX_CQ_STATUS_OK)
3019 		return (-1);
3020 
3021 	return (0);
3022 }
3023 
3024 static int
3025 mcx_issi(struct mcx_softc *sc)
3026 {
3027 	struct mcx_dmamem mxm;
3028 	struct mcx_cmdq_entry *cqe;
3029 	struct mcx_cmd_query_issi_in *in;
3030 	struct mcx_cmd_query_issi_il_out *out;
3031 	struct mcx_cmd_query_issi_mb_out *mb;
3032 	uint8_t token = mcx_cmdq_token(sc);
3033 	uint8_t status;
3034 	int error;
3035 
3036 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3037 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3038 
3039 	in = mcx_cmdq_in(cqe);
3040 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3041 	in->cmd_op_mod = htobe16(0);
3042 
3043 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3044 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3045 	    &cqe->cq_output_ptr, token) != 0) {
3046 		printf(", unable to allocate query issi mailbox\n");
3047 		return (-1);
3048 	}
3049 	mcx_cmdq_mboxes_sign(&mxm, 1);
3050 
3051 	mcx_cmdq_post(sc, cqe, 0);
3052 	error = mcx_cmdq_poll(sc, cqe, 1000);
3053 	if (error != 0) {
3054 		printf(", query issi timeout\n");
3055 		goto free;
3056 	}
3057 	error = mcx_cmdq_verify(cqe);
3058 	if (error != 0) {
3059 		printf(", query issi reply corrupt\n");
3060 		goto free;
3061 	}
3062 
3063 	status = cqe->cq_output_data[0];
3064 	switch (status) {
3065 	case MCX_CQ_STATUS_OK:
3066 		break;
3067 	case MCX_CQ_STATUS_BAD_OPCODE:
3068 		/* use ISSI 0 */
3069 		goto free;
3070 	default:
3071 		printf(", query issi failed (%x)\n", status);
3072 		error = -1;
3073 		goto free;
3074 	}
3075 
3076 	out = mcx_cmdq_out(cqe);
3077 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3078 		/* use ISSI 1 */
3079 		goto free;
3080 	}
3081 
3082 	/* don't need to read cqe anymore, can be used for SET ISSI */
3083 
3084 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3085 	CTASSERT(MCX_ISSI < NBBY);
3086 	 /* XXX math is hard */
3087 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3088 		/* use ISSI 0 */
3089 		goto free;
3090 	}
3091 
3092 	if (mcx_set_issi(sc, cqe, 0) != 0) {
3093 		/* ignore the error, just use ISSI 0 */
3094 	} else {
3095 		/* use ISSI 1 */
3096 	}
3097 
3098 free:
3099 	mcx_cq_mboxes_free(sc, &mxm);
3100 	return (error);
3101 }
3102 
3103 static int
3104 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3105     uint32_t *npages, uint16_t *func_id)
3106 {
3107 	struct mcx_cmdq_entry *cqe;
3108 	struct mcx_cmd_query_pages_in *in;
3109 	struct mcx_cmd_query_pages_out *out;
3110 
3111 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3112 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3113 
3114 	in = mcx_cmdq_in(cqe);
3115 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3116 	in->cmd_op_mod = type;
3117 
3118 	mcx_cmdq_post(sc, cqe, 0);
3119 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3120 		printf(", query pages timeout\n");
3121 		return (-1);
3122 	}
3123 	if (mcx_cmdq_verify(cqe) != 0) {
3124 		printf(", query pages reply corrupt\n");
3125 		return (-1);
3126 	}
3127 
3128 	out = mcx_cmdq_out(cqe);
3129 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3130 		printf(", query pages failed (%x)\n", out->cmd_status);
3131 		return (-1);
3132 	}
3133 
3134 	*func_id = out->cmd_func_id;
3135 	*npages = be32dec(&out->cmd_num_pages);
3136 
3137 	return (0);
3138 }
3139 
3140 struct bus_dma_iter {
3141 	bus_dmamap_t		i_map;
3142 	bus_size_t		i_offset;
3143 	unsigned int		i_index;
3144 };
3145 
3146 static void
3147 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3148 {
3149 	i->i_map = map;
3150 	i->i_offset = 0;
3151 	i->i_index = 0;
3152 }
3153 
3154 static bus_addr_t
3155 bus_dma_iter_addr(struct bus_dma_iter *i)
3156 {
3157 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3158 }
3159 
3160 static void
3161 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3162 {
3163 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3164 	bus_size_t diff;
3165 
3166 	do {
3167 		diff = seg->ds_len - i->i_offset;
3168 		if (size < diff)
3169 			break;
3170 
3171 		size -= diff;
3172 
3173 		seg++;
3174 
3175 		i->i_offset = 0;
3176 		i->i_index++;
3177 	} while (size > 0);
3178 
3179 	i->i_offset += size;
3180 }
3181 
3182 static int
3183 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3184 {
3185 	struct mcx_dmamem mxm;
3186 	struct mcx_cmdq_entry *cqe;
3187 	struct mcx_cmd_manage_pages_in *in;
3188 	struct mcx_cmd_manage_pages_out *out;
3189 	unsigned int paslen, nmb, i, j, npages;
3190 	struct bus_dma_iter iter;
3191 	uint64_t *pas;
3192 	uint8_t status;
3193 	uint8_t token = mcx_cmdq_token(sc);
3194 	int error;
3195 
3196 	npages = mhm->mhm_npages;
3197 
3198 	paslen = sizeof(*pas) * npages;
3199 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3200 
3201 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3202 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3203 
3204 	in = mcx_cmdq_in(cqe);
3205 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3206 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3207 	in->cmd_func_id = func_id;
3208 	be32enc(&in->cmd_input_num_entries, npages);
3209 
3210 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3211 	    &cqe->cq_input_ptr, token) != 0) {
3212 		printf(", unable to allocate manage pages mailboxen\n");
3213 		return (-1);
3214 	}
3215 
3216 	bus_dma_iter_init(&iter, mhm->mhm_map);
3217 	for (i = 0; i < nmb; i++) {
3218 		unsigned int lim;
3219 
3220 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3221 		lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3222 
3223 		for (j = 0; j < lim; j++) {
3224 			be64enc(&pas[j], bus_dma_iter_addr(&iter));
3225 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3226 		}
3227 
3228 		npages -= lim;
3229 	}
3230 
3231 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3232 
3233 	mcx_cmdq_post(sc, cqe, 0);
3234 	error = mcx_cmdq_poll(sc, cqe, 1000);
3235 	if (error != 0) {
3236 		printf(", manage pages timeout\n");
3237 		goto free;
3238 	}
3239 	error = mcx_cmdq_verify(cqe);
3240 	if (error != 0) {
3241 		printf(", manage pages reply corrupt\n");
3242 		goto free;
3243 	}
3244 
3245 	status = cqe->cq_output_data[0];
3246 	if (status != MCX_CQ_STATUS_OK) {
3247 		printf(", manage pages failed (%x)\n", status);
3248 		error = -1;
3249 		goto free;
3250 	}
3251 
3252 free:
3253 	mcx_dmamem_free(sc, &mxm);
3254 
3255 	return (error);
3256 }
3257 
3258 static int
3259 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3260 {
3261 	uint32_t npages;
3262 	uint16_t func_id;
3263 
3264 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3265 		/* error printed by mcx_query_pages */
3266 		return (-1);
3267 	}
3268 
3269 	if (npages == 0)
3270 		return (0);
3271 
3272 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3273 		printf(", unable to allocate hwmem\n");
3274 		return (-1);
3275 	}
3276 
3277 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
3278 		printf(", unable to add hwmem\n");
3279 		goto free;
3280 	}
3281 
3282 	return (0);
3283 
3284 free:
3285 	mcx_hwmem_free(sc, mhm);
3286 
3287 	return (-1);
3288 }
3289 
3290 static int
3291 mcx_hca_max_caps(struct mcx_softc *sc)
3292 {
3293 	struct mcx_dmamem mxm;
3294 	struct mcx_cmdq_entry *cqe;
3295 	struct mcx_cmd_query_hca_cap_in *in;
3296 	struct mcx_cmd_query_hca_cap_out *out;
3297 	struct mcx_cmdq_mailbox *mb;
3298 	struct mcx_cap_device *hca;
3299 	uint8_t status;
3300 	uint8_t token = mcx_cmdq_token(sc);
3301 	int error;
3302 
3303 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3304 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3305 	    token);
3306 
3307 	in = mcx_cmdq_in(cqe);
3308 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3309 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3310 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3311 
3312 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3313 	    &cqe->cq_output_ptr, token) != 0) {
3314 		printf(", unable to allocate query hca caps mailboxen\n");
3315 		return (-1);
3316 	}
3317 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3318 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3319 
3320 	mcx_cmdq_post(sc, cqe, 0);
3321 	error = mcx_cmdq_poll(sc, cqe, 1000);
3322 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3323 
3324 	if (error != 0) {
3325 		printf(", query hca caps timeout\n");
3326 		goto free;
3327 	}
3328 	error = mcx_cmdq_verify(cqe);
3329 	if (error != 0) {
3330 		printf(", query hca caps reply corrupt\n");
3331 		goto free;
3332 	}
3333 
3334 	status = cqe->cq_output_data[0];
3335 	if (status != MCX_CQ_STATUS_OK) {
3336 		printf(", query hca caps failed (%x)\n", status);
3337 		error = -1;
3338 		goto free;
3339 	}
3340 
3341 	mb = mcx_cq_mbox(&mxm, 0);
3342 	hca = mcx_cq_mbox_data(mb);
3343 
3344 	if (hca->log_pg_sz > PAGE_SHIFT) {
3345 		printf(", minimum system page shift %u is too large\n",
3346 		    hca->log_pg_sz);
3347 		error = -1;
3348 		goto free;
3349 	}
3350 	/*
3351 	 * blueflame register is split into two buffers, and we must alternate
3352 	 * between the two of them.
3353 	 */
3354 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3355 
3356 free:
3357 	mcx_dmamem_free(sc, &mxm);
3358 
3359 	return (error);
3360 }
3361 
3362 static int
3363 mcx_hca_set_caps(struct mcx_softc *sc)
3364 {
3365 	struct mcx_dmamem mxm;
3366 	struct mcx_cmdq_entry *cqe;
3367 	struct mcx_cmd_query_hca_cap_in *in;
3368 	struct mcx_cmd_query_hca_cap_out *out;
3369 	struct mcx_cmdq_mailbox *mb;
3370 	struct mcx_cap_device *hca;
3371 	uint8_t status;
3372 	uint8_t token = mcx_cmdq_token(sc);
3373 	int error;
3374 
3375 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3376 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3377 	    token);
3378 
3379 	in = mcx_cmdq_in(cqe);
3380 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3381 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3382 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3383 
3384 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3385 	    &cqe->cq_output_ptr, token) != 0) {
3386 		printf(", unable to allocate manage pages mailboxen\n");
3387 		return (-1);
3388 	}
3389 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3390 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3391 
3392 	mcx_cmdq_post(sc, cqe, 0);
3393 	error = mcx_cmdq_poll(sc, cqe, 1000);
3394 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3395 
3396 	if (error != 0) {
3397 		printf(", query hca caps timeout\n");
3398 		goto free;
3399 	}
3400 	error = mcx_cmdq_verify(cqe);
3401 	if (error != 0) {
3402 		printf(", query hca caps reply corrupt\n");
3403 		goto free;
3404 	}
3405 
3406 	status = cqe->cq_output_data[0];
3407 	if (status != MCX_CQ_STATUS_OK) {
3408 		printf(", query hca caps failed (%x)\n", status);
3409 		error = -1;
3410 		goto free;
3411 	}
3412 
3413 	mb = mcx_cq_mbox(&mxm, 0);
3414 	hca = mcx_cq_mbox_data(mb);
3415 
3416 	hca->log_pg_sz = PAGE_SHIFT;
3417 
3418 free:
3419 	mcx_dmamem_free(sc, &mxm);
3420 
3421 	return (error);
3422 }
3423 
3424 
3425 static int
3426 mcx_init_hca(struct mcx_softc *sc)
3427 {
3428 	struct mcx_cmdq_entry *cqe;
3429 	struct mcx_cmd_init_hca_in *in;
3430 	struct mcx_cmd_init_hca_out *out;
3431 	int error;
3432 	uint8_t status;
3433 
3434 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3435 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3436 
3437 	in = mcx_cmdq_in(cqe);
3438 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3439 	in->cmd_op_mod = htobe16(0);
3440 
3441 	mcx_cmdq_post(sc, cqe, 0);
3442 
3443 	error = mcx_cmdq_poll(sc, cqe, 1000);
3444 	if (error != 0) {
3445 		printf(", hca init timeout\n");
3446 		return (-1);
3447 	}
3448 	if (mcx_cmdq_verify(cqe) != 0) {
3449 		printf(", hca init command corrupt\n");
3450 		return (-1);
3451 	}
3452 
3453 	status = cqe->cq_output_data[0];
3454 	if (status != MCX_CQ_STATUS_OK) {
3455 		printf(", hca init failed (%x)\n", status);
3456 		return (-1);
3457 	}
3458 
3459 	return (0);
3460 }
3461 
3462 static int
3463 mcx_set_driver_version(struct mcx_softc *sc)
3464 {
3465 	struct mcx_dmamem mxm;
3466 	struct mcx_cmdq_entry *cqe;
3467 	struct mcx_cmd_set_driver_version_in *in;
3468 	struct mcx_cmd_set_driver_version_out *out;
3469 	int error;
3470 	int token;
3471 	uint8_t status;
3472 
3473 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3474 	token = mcx_cmdq_token(sc);
3475 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
3476 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3477 
3478 	in = mcx_cmdq_in(cqe);
3479 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3480 	in->cmd_op_mod = htobe16(0);
3481 
3482 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3483 	    &cqe->cq_input_ptr, token) != 0) {
3484 		printf(", unable to allocate set driver version mailboxen\n");
3485 		return (-1);
3486 	}
3487 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3488 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3489 
3490 	mcx_cmdq_mboxes_sign(&mxm, 1);
3491 	mcx_cmdq_post(sc, cqe, 0);
3492 
3493 	error = mcx_cmdq_poll(sc, cqe, 1000);
3494 	if (error != 0) {
3495 		printf(", set driver version timeout\n");
3496 		goto free;
3497 	}
3498 	if (mcx_cmdq_verify(cqe) != 0) {
3499 		printf(", set driver version command corrupt\n");
3500 		goto free;
3501 	}
3502 
3503 	status = cqe->cq_output_data[0];
3504 	if (status != MCX_CQ_STATUS_OK) {
3505 		printf(", set driver version failed (%x)\n", status);
3506 		error = -1;
3507 		goto free;
3508 	}
3509 
3510 free:
3511 	mcx_dmamem_free(sc, &mxm);
3512 
3513 	return (error);
3514 }
3515 
3516 static int
3517 mcx_iff(struct mcx_softc *sc)
3518 {
3519 	struct ifnet *ifp = &sc->sc_ec.ec_if;
3520 	struct mcx_dmamem mxm;
3521 	struct mcx_cmdq_entry *cqe;
3522 	struct mcx_cmd_modify_nic_vport_context_in *in;
3523 	struct mcx_cmd_modify_nic_vport_context_out *out;
3524 	struct mcx_nic_vport_ctx *ctx;
3525 	int error;
3526 	int token;
3527 	int insize;
3528 
3529 	/* enable or disable the promisc flow */
3530 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3531 		if (sc->sc_promisc_flow_enabled == 0) {
3532 			mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3533 			    0, NULL);
3534 			sc->sc_promisc_flow_enabled = 1;
3535 		}
3536 	} else if (sc->sc_promisc_flow_enabled != 0) {
3537 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3538 		sc->sc_promisc_flow_enabled = 0;
3539 	}
3540 
3541 	/* enable or disable the all-multicast flow */
3542 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3543 		if (sc->sc_allmulti_flow_enabled == 0) {
3544 			uint8_t mcast[ETHER_ADDR_LEN];
3545 
3546 			memset(mcast, 0, sizeof(mcast));
3547 			mcast[0] = 0x01;
3548 			mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3549 			    0, mcast);
3550 			sc->sc_allmulti_flow_enabled = 1;
3551 		}
3552 	} else if (sc->sc_allmulti_flow_enabled != 0) {
3553 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3554 		sc->sc_allmulti_flow_enabled = 0;
3555 	}
3556 
3557 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3558 
3559 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3560 	token = mcx_cmdq_token(sc);
3561 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3562 
3563 	in = mcx_cmdq_in(cqe);
3564 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3565 	in->cmd_op_mod = htobe16(0);
3566 	in->cmd_field_select = htobe32(
3567 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3568 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3569 
3570 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3571 		printf(", unable to allocate modify nic vport context mailboxen\n");
3572 		return (-1);
3573 	}
3574 	ctx = (struct mcx_nic_vport_ctx *)
3575 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3576 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3577 	/*
3578 	 * always leave promisc-all enabled on the vport since we can't give it
3579 	 * a vlan list, and we're already doing multicast filtering in the flow
3580 	 * table.
3581 	 */
3582 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3583 
3584 	mcx_cmdq_mboxes_sign(&mxm, 1);
3585 	mcx_cmdq_post(sc, cqe, 0);
3586 
3587 	error = mcx_cmdq_poll(sc, cqe, 1000);
3588 	if (error != 0) {
3589 		printf(", modify nic vport context timeout\n");
3590 		goto free;
3591 	}
3592 	if (mcx_cmdq_verify(cqe) != 0) {
3593 		printf(", modify nic vport context command corrupt\n");
3594 		goto free;
3595 	}
3596 
3597 	out = mcx_cmdq_out(cqe);
3598 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3599 		printf(", modify nic vport context failed (%x, %x)\n",
3600 		    out->cmd_status, out->cmd_syndrome);
3601 		error = -1;
3602 		goto free;
3603 	}
3604 
3605 free:
3606 	mcx_dmamem_free(sc, &mxm);
3607 
3608 	return (error);
3609 }
3610 
3611 static int
3612 mcx_alloc_uar(struct mcx_softc *sc)
3613 {
3614 	struct mcx_cmdq_entry *cqe;
3615 	struct mcx_cmd_alloc_uar_in *in;
3616 	struct mcx_cmd_alloc_uar_out *out;
3617 	int error;
3618 
3619 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3620 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3621 
3622 	in = mcx_cmdq_in(cqe);
3623 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3624 	in->cmd_op_mod = htobe16(0);
3625 
3626 	mcx_cmdq_post(sc, cqe, 0);
3627 
3628 	error = mcx_cmdq_poll(sc, cqe, 1000);
3629 	if (error != 0) {
3630 		printf(", alloc uar timeout\n");
3631 		return (-1);
3632 	}
3633 	if (mcx_cmdq_verify(cqe) != 0) {
3634 		printf(", alloc uar command corrupt\n");
3635 		return (-1);
3636 	}
3637 
3638 	out = mcx_cmdq_out(cqe);
3639 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3640 		printf(", alloc uar failed (%x)\n", out->cmd_status);
3641 		return (-1);
3642 	}
3643 
3644 	sc->sc_uar = be32toh(out->cmd_uar);
3645 
3646 	return (0);
3647 }
3648 
3649 static int
3650 mcx_create_eq(struct mcx_softc *sc)
3651 {
3652 	struct mcx_cmdq_entry *cqe;
3653 	struct mcx_dmamem mxm;
3654 	struct mcx_cmd_create_eq_in *in;
3655 	struct mcx_cmd_create_eq_mb_in *mbin;
3656 	struct mcx_cmd_create_eq_out *out;
3657 	struct mcx_eq_entry *eqe;
3658 	int error;
3659 	uint64_t *pas;
3660 	int insize, npages, paslen, i, token;
3661 
3662 	sc->sc_eq_cons = 0;
3663 
3664 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3665 	    MCX_PAGE_SIZE);
3666 	paslen = npages * sizeof(*pas);
3667 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3668 
3669 	if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3670 	    MCX_PAGE_SIZE) != 0) {
3671 		printf(", unable to allocate event queue memory\n");
3672 		return (-1);
3673 	}
3674 
3675 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3676 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3677 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3678 	}
3679 
3680 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3681 	token = mcx_cmdq_token(sc);
3682 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3683 
3684 	in = mcx_cmdq_in(cqe);
3685 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3686 	in->cmd_op_mod = htobe16(0);
3687 
3688 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3689 	    &cqe->cq_input_ptr, token) != 0) {
3690 		printf(", unable to allocate create eq mailboxen\n");
3691 		return (-1);
3692 	}
3693 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3694 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3695 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3696 	mbin->cmd_event_bitmask = htobe64(
3697 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3698 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3699 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3700 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3701 
3702 	/* physical addresses follow the mailbox in data */
3703 	pas = (uint64_t *)(mbin + 1);
3704 	for (i = 0; i < npages; i++) {
3705 		pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_eq_mem) +
3706 		    (i * MCX_PAGE_SIZE));
3707 	}
3708 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3709 	mcx_cmdq_post(sc, cqe, 0);
3710 
3711 	error = mcx_cmdq_poll(sc, cqe, 1000);
3712 	if (error != 0) {
3713 		printf(", create eq timeout\n");
3714 		goto free;
3715 	}
3716 	if (mcx_cmdq_verify(cqe) != 0) {
3717 		printf(", create eq command corrupt\n");
3718 		goto free;
3719 	}
3720 
3721 	out = mcx_cmdq_out(cqe);
3722 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3723 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
3724 		    be32toh(out->cmd_syndrome));
3725 		error = -1;
3726 		goto free;
3727 	}
3728 
3729 	sc->sc_eqn = be32toh(out->cmd_eqn);
3730 	mcx_arm_eq(sc);
3731 free:
3732 	mcx_dmamem_free(sc, &mxm);
3733 	return (error);
3734 }
3735 
3736 static int
3737 mcx_alloc_pd(struct mcx_softc *sc)
3738 {
3739 	struct mcx_cmdq_entry *cqe;
3740 	struct mcx_cmd_alloc_pd_in *in;
3741 	struct mcx_cmd_alloc_pd_out *out;
3742 	int error;
3743 
3744 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3745 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3746 
3747 	in = mcx_cmdq_in(cqe);
3748 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3749 	in->cmd_op_mod = htobe16(0);
3750 
3751 	mcx_cmdq_post(sc, cqe, 0);
3752 
3753 	error = mcx_cmdq_poll(sc, cqe, 1000);
3754 	if (error != 0) {
3755 		printf(", alloc pd timeout\n");
3756 		return (-1);
3757 	}
3758 	if (mcx_cmdq_verify(cqe) != 0) {
3759 		printf(", alloc pd command corrupt\n");
3760 		return (-1);
3761 	}
3762 
3763 	out = mcx_cmdq_out(cqe);
3764 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3765 		printf(", alloc pd failed (%x)\n", out->cmd_status);
3766 		return (-1);
3767 	}
3768 
3769 	sc->sc_pd = be32toh(out->cmd_pd);
3770 	return (0);
3771 }
3772 
3773 static int
3774 mcx_alloc_tdomain(struct mcx_softc *sc)
3775 {
3776 	struct mcx_cmdq_entry *cqe;
3777 	struct mcx_cmd_alloc_td_in *in;
3778 	struct mcx_cmd_alloc_td_out *out;
3779 	int error;
3780 
3781 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3782 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3783 
3784 	in = mcx_cmdq_in(cqe);
3785 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3786 	in->cmd_op_mod = htobe16(0);
3787 
3788 	mcx_cmdq_post(sc, cqe, 0);
3789 
3790 	error = mcx_cmdq_poll(sc, cqe, 1000);
3791 	if (error != 0) {
3792 		printf(", alloc transport domain timeout\n");
3793 		return (-1);
3794 	}
3795 	if (mcx_cmdq_verify(cqe) != 0) {
3796 		printf(", alloc transport domain command corrupt\n");
3797 		return (-1);
3798 	}
3799 
3800 	out = mcx_cmdq_out(cqe);
3801 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3802 		printf(", alloc transport domain failed (%x)\n",
3803 		    out->cmd_status);
3804 		return (-1);
3805 	}
3806 
3807 	sc->sc_tdomain = be32toh(out->cmd_tdomain);
3808 	return (0);
3809 }
3810 
3811 static int
3812 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3813 {
3814 	struct mcx_dmamem mxm;
3815 	struct mcx_cmdq_entry *cqe;
3816 	struct mcx_cmd_query_nic_vport_context_in *in;
3817 	struct mcx_cmd_query_nic_vport_context_out *out;
3818 	struct mcx_nic_vport_ctx *ctx;
3819 	uint8_t *addr;
3820 	int error, token, i;
3821 
3822 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3823 	token = mcx_cmdq_token(sc);
3824 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3825 
3826 	in = mcx_cmdq_in(cqe);
3827 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3828 	in->cmd_op_mod = htobe16(0);
3829 	in->cmd_allowed_list_type = 0;
3830 
3831 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3832 		printf(", unable to allocate query nic vport context mailboxen\n");
3833 		return (-1);
3834 	}
3835 	mcx_cmdq_mboxes_sign(&mxm, 1);
3836 	mcx_cmdq_post(sc, cqe, 0);
3837 
3838 	error = mcx_cmdq_poll(sc, cqe, 1000);
3839 	if (error != 0) {
3840 		printf(", query nic vport context timeout\n");
3841 		goto free;
3842 	}
3843 	if (mcx_cmdq_verify(cqe) != 0) {
3844 		printf(", query nic vport context command corrupt\n");
3845 		goto free;
3846 	}
3847 
3848 	out = mcx_cmdq_out(cqe);
3849 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3850 		printf(", query nic vport context failed (%x, %x)\n",
3851 		    out->cmd_status, out->cmd_syndrome);
3852 		error = -1;
3853 		goto free;
3854 	}
3855 
3856 	ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3857 	addr = (uint8_t *)&ctx->vp_perm_addr;
3858 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3859 		enaddr[i] = addr[i + 2];
3860 	}
3861 free:
3862 	mcx_dmamem_free(sc, &mxm);
3863 
3864 	return (error);
3865 }
3866 
3867 static int
3868 mcx_query_special_contexts(struct mcx_softc *sc)
3869 {
3870 	struct mcx_cmdq_entry *cqe;
3871 	struct mcx_cmd_query_special_ctx_in *in;
3872 	struct mcx_cmd_query_special_ctx_out *out;
3873 	int error;
3874 
3875 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3876 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3877 
3878 	in = mcx_cmdq_in(cqe);
3879 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3880 	in->cmd_op_mod = htobe16(0);
3881 
3882 	mcx_cmdq_post(sc, cqe, 0);
3883 
3884 	error = mcx_cmdq_poll(sc, cqe, 1000);
3885 	if (error != 0) {
3886 		printf(", query special contexts timeout\n");
3887 		return (-1);
3888 	}
3889 	if (mcx_cmdq_verify(cqe) != 0) {
3890 		printf(", query special contexts command corrupt\n");
3891 		return (-1);
3892 	}
3893 
3894 	out = mcx_cmdq_out(cqe);
3895 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3896 		printf(", query special contexts failed (%x)\n",
3897 		    out->cmd_status);
3898 		return (-1);
3899 	}
3900 
3901 	sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3902 	return (0);
3903 }
3904 
3905 static int
3906 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3907 {
3908 	struct mcx_reg_pmtu pmtu;
3909 	int error;
3910 
3911 	/* read max mtu */
3912 	memset(&pmtu, 0, sizeof(pmtu));
3913 	pmtu.rp_local_port = 1;
3914 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3915 	    sizeof(pmtu));
3916 	if (error != 0) {
3917 		printf(", unable to get port MTU\n");
3918 		return error;
3919 	}
3920 
3921 	mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3922 	pmtu.rp_admin_mtu = htobe16(mtu);
3923 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3924 	    sizeof(pmtu));
3925 	if (error != 0) {
3926 		printf(", unable to set port MTU\n");
3927 		return error;
3928 	}
3929 
3930 	sc->sc_hardmtu = mtu;
3931 	return 0;
3932 }
3933 
3934 static int
3935 mcx_create_cq(struct mcx_softc *sc, int eqn)
3936 {
3937 	struct mcx_cmdq_entry *cmde;
3938 	struct mcx_cq_entry *cqe;
3939 	struct mcx_cq *cq;
3940 	struct mcx_dmamem mxm;
3941 	struct mcx_cmd_create_cq_in *in;
3942 	struct mcx_cmd_create_cq_mb_in *mbin;
3943 	struct mcx_cmd_create_cq_out *out;
3944 	int error;
3945 	uint64_t *pas;
3946 	int insize, npages, paslen, i, token;
3947 
3948 	if (sc->sc_num_cq >= MCX_MAX_CQS) {
3949 		printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3950 		return (-1);
3951 	}
3952 	cq = &sc->sc_cq[sc->sc_num_cq];
3953 
3954 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3955 	    MCX_PAGE_SIZE);
3956 	paslen = npages * sizeof(*pas);
3957 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3958 
3959 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3960 	    MCX_PAGE_SIZE) != 0) {
3961 		printf("%s: unable to allocate completion queue memory\n",
3962 		    DEVNAME(sc));
3963 		return (-1);
3964 	}
3965 	cqe = MCX_DMA_KVA(&cq->cq_mem);
3966 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
3967 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
3968 	}
3969 
3970 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3971 	token = mcx_cmdq_token(sc);
3972 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
3973 
3974 	in = mcx_cmdq_in(cmde);
3975 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
3976 	in->cmd_op_mod = htobe16(0);
3977 
3978 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3979 	    &cmde->cq_input_ptr, token) != 0) {
3980 		printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
3981 		error = -1;
3982 		goto free;
3983 	}
3984 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3985 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
3986 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
3987 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
3988 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
3989 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
3990 	    MCX_CQ_MOD_COUNTER);
3991 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
3992 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) +
3993 	    MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
3994 
3995 	/* physical addresses follow the mailbox in data */
3996 	pas = (uint64_t *)(mbin + 1);
3997 	for (i = 0; i < npages; i++) {
3998 		pas[i] = htobe64(MCX_DMA_DVA(&cq->cq_mem) + (i * MCX_PAGE_SIZE));
3999 	}
4000 	mcx_cmdq_post(sc, cmde, 0);
4001 
4002 	error = mcx_cmdq_poll(sc, cmde, 1000);
4003 	if (error != 0) {
4004 		printf("%s: create cq timeout\n", DEVNAME(sc));
4005 		goto free;
4006 	}
4007 	if (mcx_cmdq_verify(cmde) != 0) {
4008 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
4009 		goto free;
4010 	}
4011 
4012 	out = mcx_cmdq_out(cmde);
4013 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4014 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4015 		    out->cmd_status, be32toh(out->cmd_syndrome));
4016 		error = -1;
4017 		goto free;
4018 	}
4019 
4020 	cq->cq_n = be32toh(out->cmd_cqn);
4021 	cq->cq_cons = 0;
4022 	cq->cq_count = 0;
4023 	cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4024 	    MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4025 	mcx_arm_cq(sc, cq);
4026 	sc->sc_num_cq++;
4027 
4028 free:
4029 	mcx_dmamem_free(sc, &mxm);
4030 	return (error);
4031 }
4032 
4033 static int
4034 mcx_destroy_cq(struct mcx_softc *sc, int index)
4035 {
4036 	struct mcx_cmdq_entry *cqe;
4037 	struct mcx_cmd_destroy_cq_in *in;
4038 	struct mcx_cmd_destroy_cq_out *out;
4039 	int error;
4040 	int token;
4041 
4042 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4043 	token = mcx_cmdq_token(sc);
4044 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4045 
4046 	in = mcx_cmdq_in(cqe);
4047 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4048 	in->cmd_op_mod = htobe16(0);
4049 	in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4050 
4051 	mcx_cmdq_post(sc, cqe, 0);
4052 	error = mcx_cmdq_poll(sc, cqe, 1000);
4053 	if (error != 0) {
4054 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
4055 		return error;
4056 	}
4057 	if (mcx_cmdq_verify(cqe) != 0) {
4058 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4059 		return error;
4060 	}
4061 
4062 	out = mcx_cmdq_out(cqe);
4063 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4064 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4065 		    out->cmd_status, be32toh(out->cmd_syndrome));
4066 		return -1;
4067 	}
4068 
4069 	sc->sc_cq[index].cq_n = 0;
4070 	mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4071 	sc->sc_cq[index].cq_cons = 0;
4072 	sc->sc_cq[index].cq_count = 0;
4073 	return 0;
4074 }
4075 
4076 static int
4077 mcx_create_rq(struct mcx_softc *sc, int cqn)
4078 {
4079 	struct mcx_cmdq_entry *cqe;
4080 	struct mcx_dmamem mxm;
4081 	struct mcx_cmd_create_rq_in *in;
4082 	struct mcx_cmd_create_rq_out *out;
4083 	struct mcx_rq_ctx *mbin;
4084 	int error;
4085 	uint64_t *pas;
4086 	uint8_t *doorbell;
4087 	int insize, npages, paslen, i, token;
4088 
4089 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4090 	    MCX_PAGE_SIZE);
4091 	paslen = npages * sizeof(*pas);
4092 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4093 
4094 	if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4095 	    MCX_PAGE_SIZE) != 0) {
4096 		printf("%s: unable to allocate receive queue memory\n",
4097 		    DEVNAME(sc));
4098 		return (-1);
4099 	}
4100 
4101 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4102 	token = mcx_cmdq_token(sc);
4103 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4104 
4105 	in = mcx_cmdq_in(cqe);
4106 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4107 	in->cmd_op_mod = htobe16(0);
4108 
4109 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4110 	    &cqe->cq_input_ptr, token) != 0) {
4111 		printf("%s: unable to allocate create rq mailboxen\n",
4112 		    DEVNAME(sc));
4113 		error = -1;
4114 		goto free;
4115 	}
4116 	mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4117 	mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4118 	mbin->rq_cqn = htobe32(cqn);
4119 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4120 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4121 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4122 	    MCX_RQ_DOORBELL_OFFSET);
4123 	mbin->rq_wq.wq_log_stride = htobe16(4);
4124 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4125 
4126 	/* physical addresses follow the mailbox in data */
4127 	pas = (uint64_t *)(mbin + 1);
4128 	for (i = 0; i < npages; i++) {
4129 		pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_rq_mem) +
4130 		    (i * MCX_PAGE_SIZE));
4131 	}
4132 	mcx_cmdq_post(sc, cqe, 0);
4133 
4134 	error = mcx_cmdq_poll(sc, cqe, 1000);
4135 	if (error != 0) {
4136 		printf("%s: create rq timeout\n", DEVNAME(sc));
4137 		goto free;
4138 	}
4139 	if (mcx_cmdq_verify(cqe) != 0) {
4140 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
4141 		goto free;
4142 	}
4143 
4144 	out = mcx_cmdq_out(cqe);
4145 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4146 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4147 		    out->cmd_status, be32toh(out->cmd_syndrome));
4148 		error = -1;
4149 		goto free;
4150 	}
4151 
4152 	sc->sc_rqn = be32toh(out->cmd_rqn);
4153 
4154 	doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4155 	sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4156 
4157 free:
4158 	mcx_dmamem_free(sc, &mxm);
4159 	return (error);
4160 }
4161 
4162 static int
4163 mcx_ready_rq(struct mcx_softc *sc)
4164 {
4165 	struct mcx_cmdq_entry *cqe;
4166 	struct mcx_dmamem mxm;
4167 	struct mcx_cmd_modify_rq_in *in;
4168 	struct mcx_cmd_modify_rq_mb_in *mbin;
4169 	struct mcx_cmd_modify_rq_out *out;
4170 	int error;
4171 	int token;
4172 
4173 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4174 	token = mcx_cmdq_token(sc);
4175 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4176 
4177 	in = mcx_cmdq_in(cqe);
4178 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4179 	in->cmd_op_mod = htobe16(0);
4180 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4181 
4182 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4183 		printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4184 		return (-1);
4185 	}
4186 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4187 	mbin->cmd_rq_ctx.rq_flags = htobe32(
4188 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4189 
4190 	mcx_cmdq_mboxes_sign(&mxm, 1);
4191 	mcx_cmdq_post(sc, cqe, 0);
4192 	error = mcx_cmdq_poll(sc, cqe, 1000);
4193 	if (error != 0) {
4194 		printf("%s: modify rq timeout\n", DEVNAME(sc));
4195 		goto free;
4196 	}
4197 	if (mcx_cmdq_verify(cqe) != 0) {
4198 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4199 		goto free;
4200 	}
4201 
4202 	out = mcx_cmdq_out(cqe);
4203 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4204 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4205 		    out->cmd_status, be32toh(out->cmd_syndrome));
4206 		error = -1;
4207 		goto free;
4208 	}
4209 
4210 free:
4211 	mcx_dmamem_free(sc, &mxm);
4212 	return (error);
4213 }
4214 
4215 static int
4216 mcx_destroy_rq(struct mcx_softc *sc)
4217 {
4218 	struct mcx_cmdq_entry *cqe;
4219 	struct mcx_cmd_destroy_rq_in *in;
4220 	struct mcx_cmd_destroy_rq_out *out;
4221 	int error;
4222 	int token;
4223 
4224 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4225 	token = mcx_cmdq_token(sc);
4226 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4227 
4228 	in = mcx_cmdq_in(cqe);
4229 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4230 	in->cmd_op_mod = htobe16(0);
4231 	in->cmd_rqn = htobe32(sc->sc_rqn);
4232 
4233 	mcx_cmdq_post(sc, cqe, 0);
4234 	error = mcx_cmdq_poll(sc, cqe, 1000);
4235 	if (error != 0) {
4236 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
4237 		return error;
4238 	}
4239 	if (mcx_cmdq_verify(cqe) != 0) {
4240 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4241 		return error;
4242 	}
4243 
4244 	out = mcx_cmdq_out(cqe);
4245 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4246 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4247 		    out->cmd_status, be32toh(out->cmd_syndrome));
4248 		return -1;
4249 	}
4250 
4251 	sc->sc_rqn = 0;
4252 	return 0;
4253 }
4254 
4255 static int
4256 mcx_create_tir(struct mcx_softc *sc)
4257 {
4258 	struct mcx_cmdq_entry *cqe;
4259 	struct mcx_dmamem mxm;
4260 	struct mcx_cmd_create_tir_in *in;
4261 	struct mcx_cmd_create_tir_mb_in *mbin;
4262 	struct mcx_cmd_create_tir_out *out;
4263 	int error;
4264 	int token;
4265 
4266 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4267 	token = mcx_cmdq_token(sc);
4268 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4269 
4270 	in = mcx_cmdq_in(cqe);
4271 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4272 	in->cmd_op_mod = htobe16(0);
4273 
4274 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4275 		printf("%s: unable to allocate create tir mailbox\n",
4276 		    DEVNAME(sc));
4277 		return (-1);
4278 	}
4279 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4280 	/* leave disp_type = 0, so packets get sent to the inline rqn */
4281 	mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4282 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4283 
4284 	mcx_cmdq_post(sc, cqe, 0);
4285 	error = mcx_cmdq_poll(sc, cqe, 1000);
4286 	if (error != 0) {
4287 		printf("%s: create tir timeout\n", DEVNAME(sc));
4288 		goto free;
4289 	}
4290 	if (mcx_cmdq_verify(cqe) != 0) {
4291 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
4292 		goto free;
4293 	}
4294 
4295 	out = mcx_cmdq_out(cqe);
4296 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4297 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4298 		    out->cmd_status, be32toh(out->cmd_syndrome));
4299 		error = -1;
4300 		goto free;
4301 	}
4302 
4303 	sc->sc_tirn = be32toh(out->cmd_tirn);
4304 free:
4305 	mcx_dmamem_free(sc, &mxm);
4306 	return (error);
4307 }
4308 
4309 static int
4310 mcx_destroy_tir(struct mcx_softc *sc)
4311 {
4312 	struct mcx_cmdq_entry *cqe;
4313 	struct mcx_cmd_destroy_tir_in *in;
4314 	struct mcx_cmd_destroy_tir_out *out;
4315 	int error;
4316 	int token;
4317 
4318 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4319 	token = mcx_cmdq_token(sc);
4320 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4321 
4322 	in = mcx_cmdq_in(cqe);
4323 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4324 	in->cmd_op_mod = htobe16(0);
4325 	in->cmd_tirn = htobe32(sc->sc_tirn);
4326 
4327 	mcx_cmdq_post(sc, cqe, 0);
4328 	error = mcx_cmdq_poll(sc, cqe, 1000);
4329 	if (error != 0) {
4330 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
4331 		return error;
4332 	}
4333 	if (mcx_cmdq_verify(cqe) != 0) {
4334 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4335 		return error;
4336 	}
4337 
4338 	out = mcx_cmdq_out(cqe);
4339 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4340 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4341 		    out->cmd_status, be32toh(out->cmd_syndrome));
4342 		return -1;
4343 	}
4344 
4345 	sc->sc_tirn = 0;
4346 	return 0;
4347 }
4348 
4349 static int
4350 mcx_create_sq(struct mcx_softc *sc, int cqn)
4351 {
4352 	struct mcx_cmdq_entry *cqe;
4353 	struct mcx_dmamem mxm;
4354 	struct mcx_cmd_create_sq_in *in;
4355 	struct mcx_sq_ctx *mbin;
4356 	struct mcx_cmd_create_sq_out *out;
4357 	int error;
4358 	uint64_t *pas;
4359 	uint8_t *doorbell;
4360 	int insize, npages, paslen, i, token;
4361 
4362 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4363 	    MCX_PAGE_SIZE);
4364 	paslen = npages * sizeof(*pas);
4365 	insize = sizeof(struct mcx_sq_ctx) + paslen;
4366 
4367 	if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4368 	    MCX_PAGE_SIZE) != 0) {
4369 		printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4370 		return (-1);
4371 	}
4372 
4373 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4374 	token = mcx_cmdq_token(sc);
4375 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4376 	    token);
4377 
4378 	in = mcx_cmdq_in(cqe);
4379 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4380 	in->cmd_op_mod = htobe16(0);
4381 
4382 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4383 	    &cqe->cq_input_ptr, token) != 0) {
4384 		printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4385 		error = -1;
4386 		goto free;
4387 	}
4388 	mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4389 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4390 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4391 	mbin->sq_cqn = htobe32(cqn);
4392 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4393 	mbin->sq_tis_num = htobe32(sc->sc_tisn);
4394 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4395 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4396 	mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4397 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4398 	    MCX_SQ_DOORBELL_OFFSET);
4399 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4400 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4401 
4402 	/* physical addresses follow the mailbox in data */
4403 	pas = (uint64_t *)(mbin + 1);
4404 	for (i = 0; i < npages; i++) {
4405 		pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_sq_mem) +
4406 		    (i * MCX_PAGE_SIZE));
4407 	}
4408 	mcx_cmdq_post(sc, cqe, 0);
4409 
4410 	error = mcx_cmdq_poll(sc, cqe, 1000);
4411 	if (error != 0) {
4412 		printf("%s: create sq timeout\n", DEVNAME(sc));
4413 		goto free;
4414 	}
4415 	if (mcx_cmdq_verify(cqe) != 0) {
4416 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
4417 		goto free;
4418 	}
4419 
4420 	out = mcx_cmdq_out(cqe);
4421 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4422 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4423 		    out->cmd_status, be32toh(out->cmd_syndrome));
4424 		error = -1;
4425 		goto free;
4426 	}
4427 
4428 	sc->sc_sqn = be32toh(out->cmd_sqn);
4429 
4430 	doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4431 	sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4432 free:
4433 	mcx_dmamem_free(sc, &mxm);
4434 	return (error);
4435 }
4436 
4437 static int
4438 mcx_destroy_sq(struct mcx_softc *sc)
4439 {
4440 	struct mcx_cmdq_entry *cqe;
4441 	struct mcx_cmd_destroy_sq_in *in;
4442 	struct mcx_cmd_destroy_sq_out *out;
4443 	int error;
4444 	int token;
4445 
4446 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4447 	token = mcx_cmdq_token(sc);
4448 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4449 
4450 	in = mcx_cmdq_in(cqe);
4451 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4452 	in->cmd_op_mod = htobe16(0);
4453 	in->cmd_sqn = htobe32(sc->sc_sqn);
4454 
4455 	mcx_cmdq_post(sc, cqe, 0);
4456 	error = mcx_cmdq_poll(sc, cqe, 1000);
4457 	if (error != 0) {
4458 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
4459 		return error;
4460 	}
4461 	if (mcx_cmdq_verify(cqe) != 0) {
4462 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4463 		return error;
4464 	}
4465 
4466 	out = mcx_cmdq_out(cqe);
4467 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4468 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4469 		    out->cmd_status, be32toh(out->cmd_syndrome));
4470 		return -1;
4471 	}
4472 
4473 	sc->sc_sqn = 0;
4474 	return 0;
4475 }
4476 
4477 static int
4478 mcx_ready_sq(struct mcx_softc *sc)
4479 {
4480 	struct mcx_cmdq_entry *cqe;
4481 	struct mcx_dmamem mxm;
4482 	struct mcx_cmd_modify_sq_in *in;
4483 	struct mcx_cmd_modify_sq_mb_in *mbin;
4484 	struct mcx_cmd_modify_sq_out *out;
4485 	int error;
4486 	int token;
4487 
4488 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4489 	token = mcx_cmdq_token(sc);
4490 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4491 
4492 	in = mcx_cmdq_in(cqe);
4493 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4494 	in->cmd_op_mod = htobe16(0);
4495 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4496 
4497 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4498 		printf("%s: unable to allocate modify sq mailbox\n",
4499 		    DEVNAME(sc));
4500 		return (-1);
4501 	}
4502 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4503 	mbin->cmd_sq_ctx.sq_flags = htobe32(
4504 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4505 
4506 	mcx_cmdq_mboxes_sign(&mxm, 1);
4507 	mcx_cmdq_post(sc, cqe, 0);
4508 	error = mcx_cmdq_poll(sc, cqe, 1000);
4509 	if (error != 0) {
4510 		printf("%s: modify sq timeout\n", DEVNAME(sc));
4511 		goto free;
4512 	}
4513 	if (mcx_cmdq_verify(cqe) != 0) {
4514 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4515 		goto free;
4516 	}
4517 
4518 	out = mcx_cmdq_out(cqe);
4519 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4520 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4521 		    out->cmd_status, be32toh(out->cmd_syndrome));
4522 		error = -1;
4523 		goto free;
4524 	}
4525 
4526 free:
4527 	mcx_dmamem_free(sc, &mxm);
4528 	return (error);
4529 }
4530 
4531 static int
4532 mcx_create_tis(struct mcx_softc *sc)
4533 {
4534 	struct mcx_cmdq_entry *cqe;
4535 	struct mcx_dmamem mxm;
4536 	struct mcx_cmd_create_tis_in *in;
4537 	struct mcx_cmd_create_tis_mb_in *mbin;
4538 	struct mcx_cmd_create_tis_out *out;
4539 	int error;
4540 	int token;
4541 
4542 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4543 	token = mcx_cmdq_token(sc);
4544 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4545 
4546 	in = mcx_cmdq_in(cqe);
4547 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4548 	in->cmd_op_mod = htobe16(0);
4549 
4550 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4551 		printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4552 		return (-1);
4553 	}
4554 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4555 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4556 
4557 	mcx_cmdq_mboxes_sign(&mxm, 1);
4558 	mcx_cmdq_post(sc, cqe, 0);
4559 	error = mcx_cmdq_poll(sc, cqe, 1000);
4560 	if (error != 0) {
4561 		printf("%s: create tis timeout\n", DEVNAME(sc));
4562 		goto free;
4563 	}
4564 	if (mcx_cmdq_verify(cqe) != 0) {
4565 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
4566 		goto free;
4567 	}
4568 
4569 	out = mcx_cmdq_out(cqe);
4570 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4571 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4572 		    out->cmd_status, be32toh(out->cmd_syndrome));
4573 		error = -1;
4574 		goto free;
4575 	}
4576 
4577 	sc->sc_tisn = be32toh(out->cmd_tisn);
4578 free:
4579 	mcx_dmamem_free(sc, &mxm);
4580 	return (error);
4581 }
4582 
4583 static int
4584 mcx_destroy_tis(struct mcx_softc *sc)
4585 {
4586 	struct mcx_cmdq_entry *cqe;
4587 	struct mcx_cmd_destroy_tis_in *in;
4588 	struct mcx_cmd_destroy_tis_out *out;
4589 	int error;
4590 	int token;
4591 
4592 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4593 	token = mcx_cmdq_token(sc);
4594 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4595 
4596 	in = mcx_cmdq_in(cqe);
4597 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4598 	in->cmd_op_mod = htobe16(0);
4599 	in->cmd_tisn = htobe32(sc->sc_tisn);
4600 
4601 	mcx_cmdq_post(sc, cqe, 0);
4602 	error = mcx_cmdq_poll(sc, cqe, 1000);
4603 	if (error != 0) {
4604 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
4605 		return error;
4606 	}
4607 	if (mcx_cmdq_verify(cqe) != 0) {
4608 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4609 		return error;
4610 	}
4611 
4612 	out = mcx_cmdq_out(cqe);
4613 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4614 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4615 		    out->cmd_status, be32toh(out->cmd_syndrome));
4616 		return -1;
4617 	}
4618 
4619 	sc->sc_tirn = 0;
4620 	return 0;
4621 }
4622 
4623 #if 0
4624 static int
4625 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4626 {
4627 	struct mcx_cmdq_entry *cqe;
4628 	struct mcx_cmd_alloc_flow_counter_in *in;
4629 	struct mcx_cmd_alloc_flow_counter_out *out;
4630 	int error;
4631 
4632 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4633 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4634 
4635 	in = mcx_cmdq_in(cqe);
4636 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4637 	in->cmd_op_mod = htobe16(0);
4638 
4639 	mcx_cmdq_post(sc, cqe, 0);
4640 
4641 	error = mcx_cmdq_poll(sc, cqe, 1000);
4642 	if (error != 0) {
4643 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4644 		return (-1);
4645 	}
4646 	if (mcx_cmdq_verify(cqe) != 0) {
4647 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4648 		return (-1);
4649 	}
4650 
4651 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4652 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4653 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4654 		    out->cmd_status);
4655 		return (-1);
4656 	}
4657 
4658 	sc->sc_flow_counter_id[i]  = be16toh(out->cmd_flow_counter_id);
4659 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4660 
4661 	return (0);
4662 }
4663 #endif
4664 
4665 static int
4666 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4667 {
4668 	struct mcx_cmdq_entry *cqe;
4669 	struct mcx_dmamem mxm;
4670 	struct mcx_cmd_create_flow_table_in *in;
4671 	struct mcx_cmd_create_flow_table_mb_in *mbin;
4672 	struct mcx_cmd_create_flow_table_out *out;
4673 	int error;
4674 	int token;
4675 
4676 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4677 	token = mcx_cmdq_token(sc);
4678 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4679 
4680 	in = mcx_cmdq_in(cqe);
4681 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4682 	in->cmd_op_mod = htobe16(0);
4683 
4684 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4685 		printf("%s: unable to allocate create flow table mailbox\n",
4686 		    DEVNAME(sc));
4687 		return (-1);
4688 	}
4689 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4690 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4691 	mbin->cmd_ctx.ft_log_size = log_size;
4692 
4693 	mcx_cmdq_mboxes_sign(&mxm, 1);
4694 	mcx_cmdq_post(sc, cqe, 0);
4695 	error = mcx_cmdq_poll(sc, cqe, 1000);
4696 	if (error != 0) {
4697 		printf("%s: create flow table timeout\n", DEVNAME(sc));
4698 		goto free;
4699 	}
4700 	if (mcx_cmdq_verify(cqe) != 0) {
4701 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4702 		goto free;
4703 	}
4704 
4705 	out = mcx_cmdq_out(cqe);
4706 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4707 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4708 		    out->cmd_status, be32toh(out->cmd_syndrome));
4709 		error = -1;
4710 		goto free;
4711 	}
4712 
4713 	sc->sc_flow_table_id = be32toh(out->cmd_table_id);
4714 free:
4715 	mcx_dmamem_free(sc, &mxm);
4716 	return (error);
4717 }
4718 
4719 static int
4720 mcx_set_flow_table_root(struct mcx_softc *sc)
4721 {
4722 	struct mcx_cmdq_entry *cqe;
4723 	struct mcx_dmamem mxm;
4724 	struct mcx_cmd_set_flow_table_root_in *in;
4725 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4726 	struct mcx_cmd_set_flow_table_root_out *out;
4727 	int error;
4728 	int token;
4729 
4730 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4731 	token = mcx_cmdq_token(sc);
4732 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4733 
4734 	in = mcx_cmdq_in(cqe);
4735 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4736 	in->cmd_op_mod = htobe16(0);
4737 
4738 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4739 		printf("%s: unable to allocate set flow table root mailbox\n",
4740 		    DEVNAME(sc));
4741 		return (-1);
4742 	}
4743 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4744 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4745 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4746 
4747 	mcx_cmdq_mboxes_sign(&mxm, 1);
4748 	mcx_cmdq_post(sc, cqe, 0);
4749 	error = mcx_cmdq_poll(sc, cqe, 1000);
4750 	if (error != 0) {
4751 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
4752 		goto free;
4753 	}
4754 	if (mcx_cmdq_verify(cqe) != 0) {
4755 		printf("%s: set flow table root command corrupt\n",
4756 		    DEVNAME(sc));
4757 		goto free;
4758 	}
4759 
4760 	out = mcx_cmdq_out(cqe);
4761 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4762 		printf("%s: set flow table root failed (%x, %x)\n",
4763 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4764 		error = -1;
4765 		goto free;
4766 	}
4767 
4768 free:
4769 	mcx_dmamem_free(sc, &mxm);
4770 	return (error);
4771 }
4772 
4773 static int
4774 mcx_destroy_flow_table(struct mcx_softc *sc)
4775 {
4776 	struct mcx_cmdq_entry *cqe;
4777 	struct mcx_dmamem mxm;
4778 	struct mcx_cmd_destroy_flow_table_in *in;
4779 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
4780 	struct mcx_cmd_destroy_flow_table_out *out;
4781 	int error;
4782 	int token;
4783 
4784 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4785 	token = mcx_cmdq_token(sc);
4786 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4787 
4788 	in = mcx_cmdq_in(cqe);
4789 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4790 	in->cmd_op_mod = htobe16(0);
4791 
4792 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4793 		printf("%s: unable to allocate destroy flow table mailbox\n",
4794 		    DEVNAME(sc));
4795 		return (-1);
4796 	}
4797 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4798 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4799 	mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4800 
4801 	mcx_cmdq_mboxes_sign(&mxm, 1);
4802 	mcx_cmdq_post(sc, cqe, 0);
4803 	error = mcx_cmdq_poll(sc, cqe, 1000);
4804 	if (error != 0) {
4805 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4806 		goto free;
4807 	}
4808 	if (mcx_cmdq_verify(cqe) != 0) {
4809 		printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4810 		goto free;
4811 	}
4812 
4813 	out = mcx_cmdq_out(cqe);
4814 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4815 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4816 		    out->cmd_status, be32toh(out->cmd_syndrome));
4817 		error = -1;
4818 		goto free;
4819 	}
4820 
4821 	sc->sc_flow_table_id = -1;
4822 free:
4823 	mcx_dmamem_free(sc, &mxm);
4824 	return (error);
4825 }
4826 
4827 
4828 static int
4829 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4830     int match_enable, struct mcx_flow_match *match)
4831 {
4832 	struct mcx_cmdq_entry *cqe;
4833 	struct mcx_dmamem mxm;
4834 	struct mcx_cmd_create_flow_group_in *in;
4835 	struct mcx_cmd_create_flow_group_mb_in *mbin;
4836 	struct mcx_cmd_create_flow_group_out *out;
4837 	int error;
4838 	int token;
4839 
4840 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4841 	token = mcx_cmdq_token(sc);
4842 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4843 	    token);
4844 
4845 	in = mcx_cmdq_in(cqe);
4846 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4847 	in->cmd_op_mod = htobe16(0);
4848 
4849 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4850 	    != 0) {
4851 		printf("%s: unable to allocate create flow group mailbox\n",
4852 		    DEVNAME(sc));
4853 		return (-1);
4854 	}
4855 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4856 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4857 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4858 	mbin->cmd_start_flow_index = htobe32(start);
4859 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4860 
4861 	mbin->cmd_match_criteria_enable = match_enable;
4862 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4863 
4864 	mcx_cmdq_mboxes_sign(&mxm, 2);
4865 	mcx_cmdq_post(sc, cqe, 0);
4866 	error = mcx_cmdq_poll(sc, cqe, 1000);
4867 	if (error != 0) {
4868 		printf("%s: create flow group timeout\n", DEVNAME(sc));
4869 		goto free;
4870 	}
4871 	if (mcx_cmdq_verify(cqe) != 0) {
4872 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4873 		goto free;
4874 	}
4875 
4876 	out = mcx_cmdq_out(cqe);
4877 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4878 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4879 		    out->cmd_status, be32toh(out->cmd_syndrome));
4880 		error = -1;
4881 		goto free;
4882 	}
4883 
4884 	sc->sc_flow_group_id[group] = be32toh(out->cmd_group_id);
4885 	sc->sc_flow_group_size[group] = size;
4886 	sc->sc_flow_group_start[group] = start;
4887 
4888 free:
4889 	mcx_dmamem_free(sc, &mxm);
4890 	return (error);
4891 }
4892 
4893 static int
4894 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4895 {
4896 	struct mcx_cmdq_entry *cqe;
4897 	struct mcx_dmamem mxm;
4898 	struct mcx_cmd_destroy_flow_group_in *in;
4899 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
4900 	struct mcx_cmd_destroy_flow_group_out *out;
4901 	int error;
4902 	int token;
4903 
4904 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4905 	token = mcx_cmdq_token(sc);
4906 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4907 
4908 	in = mcx_cmdq_in(cqe);
4909 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4910 	in->cmd_op_mod = htobe16(0);
4911 
4912 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4913 		printf("%s: unable to allocate destroy flow group mailbox\n",
4914 		    DEVNAME(sc));
4915 		return (-1);
4916 	}
4917 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4918 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4919 	mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4920 	mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4921 
4922 	mcx_cmdq_mboxes_sign(&mxm, 2);
4923 	mcx_cmdq_post(sc, cqe, 0);
4924 	error = mcx_cmdq_poll(sc, cqe, 1000);
4925 	if (error != 0) {
4926 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4927 		goto free;
4928 	}
4929 	if (mcx_cmdq_verify(cqe) != 0) {
4930 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4931 		goto free;
4932 	}
4933 
4934 	out = mcx_cmdq_out(cqe);
4935 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4936 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4937 		    out->cmd_status, be32toh(out->cmd_syndrome));
4938 		error = -1;
4939 		goto free;
4940 	}
4941 
4942 	sc->sc_flow_group_id[group] = -1;
4943 	sc->sc_flow_group_size[group] = 0;
4944 free:
4945 	mcx_dmamem_free(sc, &mxm);
4946 	return (error);
4947 }
4948 
4949 static int
4950 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4951     const uint8_t *macaddr)
4952 {
4953 	struct mcx_cmdq_entry *cqe;
4954 	struct mcx_dmamem mxm;
4955 	struct mcx_cmd_set_flow_table_entry_in *in;
4956 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4957 	struct mcx_cmd_set_flow_table_entry_out *out;
4958 	uint32_t *dest;
4959 	int error;
4960 	int token;
4961 
4962 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4963 	token = mcx_cmdq_token(sc);
4964 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4965 	    sizeof(*out), token);
4966 
4967 	in = mcx_cmdq_in(cqe);
4968 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4969 	in->cmd_op_mod = htobe16(0);
4970 
4971 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4972 	    != 0) {
4973 		printf("%s: unable to allocate set flow table entry mailbox\n",
4974 		    DEVNAME(sc));
4975 		return (-1);
4976 	}
4977 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4978 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4979 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4980 	mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4981 	mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
4982 
4983 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
4984 	dest = (uint32_t *)
4985 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
4986 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
4987 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
4988 	*dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
4989 
4990 	/* the only thing we match on at the moment is the dest mac address */
4991 	if (macaddr != NULL) {
4992 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
4993 		    ETHER_ADDR_LEN);
4994 	}
4995 
4996 	mcx_cmdq_mboxes_sign(&mxm, 2);
4997 	mcx_cmdq_post(sc, cqe, 0);
4998 	error = mcx_cmdq_poll(sc, cqe, 1000);
4999 	if (error != 0) {
5000 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5001 		goto free;
5002 	}
5003 	if (mcx_cmdq_verify(cqe) != 0) {
5004 		printf("%s: set flow table entry command corrupt\n",
5005 		    DEVNAME(sc));
5006 		goto free;
5007 	}
5008 
5009 	out = mcx_cmdq_out(cqe);
5010 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5011 		printf("%s: set flow table entry failed (%x, %x)\n",
5012 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5013 		error = -1;
5014 		goto free;
5015 	}
5016 
5017 free:
5018 	mcx_dmamem_free(sc, &mxm);
5019 	return (error);
5020 }
5021 
5022 static int
5023 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5024 {
5025 	struct mcx_cmdq_entry *cqe;
5026 	struct mcx_dmamem mxm;
5027 	struct mcx_cmd_delete_flow_table_entry_in *in;
5028 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5029 	struct mcx_cmd_delete_flow_table_entry_out *out;
5030 	int error;
5031 	int token;
5032 
5033 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5034 	token = mcx_cmdq_token(sc);
5035 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5036 	    token);
5037 
5038 	in = mcx_cmdq_in(cqe);
5039 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5040 	in->cmd_op_mod = htobe16(0);
5041 
5042 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5043 		printf("%s: unable to allocate delete flow table entry mailbox\n",
5044 		    DEVNAME(sc));
5045 		return (-1);
5046 	}
5047 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5048 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5049 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5050 	mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5051 
5052 	mcx_cmdq_mboxes_sign(&mxm, 2);
5053 	mcx_cmdq_post(sc, cqe, 0);
5054 	error = mcx_cmdq_poll(sc, cqe, 1000);
5055 	if (error != 0) {
5056 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5057 		goto free;
5058 	}
5059 	if (mcx_cmdq_verify(cqe) != 0) {
5060 		printf("%s: delete flow table entry command corrupt\n",
5061 		    DEVNAME(sc));
5062 		goto free;
5063 	}
5064 
5065 	out = mcx_cmdq_out(cqe);
5066 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5067 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5068 		    DEVNAME(sc), group, index, out->cmd_status,
5069 		    be32toh(out->cmd_syndrome));
5070 		error = -1;
5071 		goto free;
5072 	}
5073 
5074 free:
5075 	mcx_dmamem_free(sc, &mxm);
5076 	return (error);
5077 }
5078 
5079 #if 0
5080 int
5081 mcx_dump_flow_table(struct mcx_softc *sc)
5082 {
5083 	struct mcx_dmamem mxm;
5084 	struct mcx_cmdq_entry *cqe;
5085 	struct mcx_cmd_query_flow_table_in *in;
5086 	struct mcx_cmd_query_flow_table_mb_in *mbin;
5087 	struct mcx_cmd_query_flow_table_out *out;
5088 	struct mcx_cmd_query_flow_table_mb_out *mbout;
5089 	uint8_t token = mcx_cmdq_token(sc);
5090 	int error;
5091 	int i;
5092 	uint8_t *dump;
5093 
5094 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5095 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5096 	    sizeof(*out) + sizeof(*mbout) + 16, token);
5097 
5098 	in = mcx_cmdq_in(cqe);
5099 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5100 	in->cmd_op_mod = htobe16(0);
5101 
5102 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5103 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5104 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5105 	    &cqe->cq_output_ptr, token) != 0) {
5106 		printf(", unable to allocate query flow table mailboxes\n");
5107 		return (-1);
5108 	}
5109 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5110 
5111 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5112 	mbin->cmd_table_type = 0;
5113 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5114 
5115 	mcx_cmdq_mboxes_sign(&mxm, 1);
5116 
5117 	mcx_cmdq_post(sc, cqe, 0);
5118 	error = mcx_cmdq_poll(sc, cqe, 1000);
5119 	if (error != 0) {
5120 		printf("%s: query flow table timeout\n", DEVNAME(sc));
5121 		goto free;
5122 	}
5123 	error = mcx_cmdq_verify(cqe);
5124 	if (error != 0) {
5125 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5126 		goto free;
5127 	}
5128 
5129 	out = mcx_cmdq_out(cqe);
5130 	switch (out->cmd_status) {
5131 	case MCX_CQ_STATUS_OK:
5132 		break;
5133 	default:
5134 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5135 		    out->cmd_status, be32toh(out->cmd_syndrome));
5136 		error = -1;
5137 		goto free;
5138 	}
5139 
5140         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5141 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5142 	dump = (uint8_t *)mbout + 8;
5143 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5144 		printf("%.2x ", dump[i]);
5145 		if (i % 16 == 15)
5146 			printf("\n");
5147 	}
5148 free:
5149 	mcx_cq_mboxes_free(sc, &mxm);
5150 	return (error);
5151 }
5152 int
5153 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5154 {
5155 	struct mcx_dmamem mxm;
5156 	struct mcx_cmdq_entry *cqe;
5157 	struct mcx_cmd_query_flow_table_entry_in *in;
5158 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5159 	struct mcx_cmd_query_flow_table_entry_out *out;
5160 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5161 	uint8_t token = mcx_cmdq_token(sc);
5162 	int error;
5163 	int i;
5164 	uint8_t *dump;
5165 
5166 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5167 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5168 	    sizeof(*out) + sizeof(*mbout) + 16, token);
5169 
5170 	in = mcx_cmdq_in(cqe);
5171 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5172 	in->cmd_op_mod = htobe16(0);
5173 
5174 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5175 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5176 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5177 	    &cqe->cq_output_ptr, token) != 0) {
5178 		printf(", unable to allocate query flow table entry mailboxes\n");
5179 		return (-1);
5180 	}
5181 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5182 
5183 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5184 	mbin->cmd_table_type = 0;
5185 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5186 	mbin->cmd_flow_index = htobe32(index);
5187 
5188 	mcx_cmdq_mboxes_sign(&mxm, 1);
5189 
5190 	mcx_cmdq_post(sc, cqe, 0);
5191 	error = mcx_cmdq_poll(sc, cqe, 1000);
5192 	if (error != 0) {
5193 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5194 		goto free;
5195 	}
5196 	error = mcx_cmdq_verify(cqe);
5197 	if (error != 0) {
5198 		printf("%s: query flow table entry reply corrupt\n",
5199 		    DEVNAME(sc));
5200 		goto free;
5201 	}
5202 
5203 	out = mcx_cmdq_out(cqe);
5204 	switch (out->cmd_status) {
5205 	case MCX_CQ_STATUS_OK:
5206 		break;
5207 	default:
5208 		printf("%s: query flow table entry failed (%x/%x)\n",
5209 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5210 		error = -1;
5211 		goto free;
5212 	}
5213 
5214         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5215 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5216 	dump = (uint8_t *)mbout;
5217 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5218 		printf("%.2x ", dump[i]);
5219 		if (i % 16 == 15)
5220 			printf("\n");
5221 	}
5222 
5223 free:
5224 	mcx_cq_mboxes_free(sc, &mxm);
5225 	return (error);
5226 }
5227 
5228 int
5229 mcx_dump_flow_group(struct mcx_softc *sc)
5230 {
5231 	struct mcx_dmamem mxm;
5232 	struct mcx_cmdq_entry *cqe;
5233 	struct mcx_cmd_query_flow_group_in *in;
5234 	struct mcx_cmd_query_flow_group_mb_in *mbin;
5235 	struct mcx_cmd_query_flow_group_out *out;
5236 	struct mcx_cmd_query_flow_group_mb_out *mbout;
5237 	uint8_t token = mcx_cmdq_token(sc);
5238 	int error;
5239 	int i;
5240 	uint8_t *dump;
5241 
5242 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5243 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5244 	    sizeof(*out) + sizeof(*mbout) + 16, token);
5245 
5246 	in = mcx_cmdq_in(cqe);
5247 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5248 	in->cmd_op_mod = htobe16(0);
5249 
5250 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5251 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5252 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5253 	    &cqe->cq_output_ptr, token) != 0) {
5254 		printf(", unable to allocate query flow group mailboxes\n");
5255 		return (-1);
5256 	}
5257 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5258 
5259 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5260 	mbin->cmd_table_type = 0;
5261 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5262 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5263 
5264 	mcx_cmdq_mboxes_sign(&mxm, 1);
5265 
5266 	mcx_cmdq_post(sc, cqe, 0);
5267 	error = mcx_cmdq_poll(sc, cqe, 1000);
5268 	if (error != 0) {
5269 		printf("%s: query flow group timeout\n", DEVNAME(sc));
5270 		goto free;
5271 	}
5272 	error = mcx_cmdq_verify(cqe);
5273 	if (error != 0) {
5274 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5275 		goto free;
5276 	}
5277 
5278 	out = mcx_cmdq_out(cqe);
5279 	switch (out->cmd_status) {
5280 	case MCX_CQ_STATUS_OK:
5281 		break;
5282 	default:
5283 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5284 		    out->cmd_status, be32toh(out->cmd_syndrome));
5285 		error = -1;
5286 		goto free;
5287 	}
5288 
5289         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5290 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5291 	dump = (uint8_t *)mbout;
5292 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5293 		printf("%.2x ", dump[i]);
5294 		if (i % 16 == 15)
5295 			printf("\n");
5296 	}
5297 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5298 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5299 		printf("%.2x ", dump[i]);
5300 		if (i % 16 == 15)
5301 			printf("\n");
5302 	}
5303 
5304 free:
5305 	mcx_cq_mboxes_free(sc, &mxm);
5306 	return (error);
5307 }
5308 
5309 int
5310 mcx_dump_rq(struct mcx_softc *sc)
5311 {
5312 	struct mcx_dmamem mxm;
5313 	struct mcx_cmdq_entry *cqe;
5314 	struct mcx_cmd_query_rq_in *in;
5315 	struct mcx_cmd_query_rq_out *out;
5316 	struct mcx_cmd_query_rq_mb_out *mbout;
5317 	uint8_t token = mcx_cmdq_token(sc);
5318 	int error;
5319 
5320 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5321 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5322 	    token);
5323 
5324 	in = mcx_cmdq_in(cqe);
5325 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5326 	in->cmd_op_mod = htobe16(0);
5327 	in->cmd_rqn = htobe32(sc->sc_rqn);
5328 
5329 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5330 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5331 	    &cqe->cq_output_ptr, token) != 0) {
5332 		printf(", unable to allocate query flow group mailboxes\n");
5333 		return (-1);
5334 	}
5335 
5336 	mcx_cmdq_mboxes_sign(&mxm, 1);
5337 
5338 	mcx_cmdq_post(sc, cqe, 0);
5339 	error = mcx_cmdq_poll(sc, cqe, 1000);
5340 	if (error != 0) {
5341 		printf("%s: query rq timeout\n", DEVNAME(sc));
5342 		goto free;
5343 	}
5344 	error = mcx_cmdq_verify(cqe);
5345 	if (error != 0) {
5346 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5347 		goto free;
5348 	}
5349 
5350 	out = mcx_cmdq_out(cqe);
5351 	switch (out->cmd_status) {
5352 	case MCX_CQ_STATUS_OK:
5353 		break;
5354 	default:
5355 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5356 		    out->cmd_status, be32toh(out->cmd_syndrome));
5357 		error = -1;
5358 		goto free;
5359 	}
5360 
5361         mbout = (struct mcx_cmd_query_rq_mb_out *)
5362 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5363 	printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5364 	    DEVNAME(sc),
5365 	    (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5366 	    be32toh(mbout->cmd_ctx.rq_user_index),
5367 	    be32toh(mbout->cmd_ctx.rq_cqn),
5368 	    be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5369 	    mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5370 	    mbout->cmd_ctx.rq_wq.wq_log_size,
5371 	    be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5372 	    be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5373 
5374 free:
5375 	mcx_cq_mboxes_free(sc, &mxm);
5376 	return (error);
5377 }
5378 
5379 int
5380 mcx_dump_sq(struct mcx_softc *sc)
5381 {
5382 	struct mcx_dmamem mxm;
5383 	struct mcx_cmdq_entry *cqe;
5384 	struct mcx_cmd_query_sq_in *in;
5385 	struct mcx_cmd_query_sq_out *out;
5386 	struct mcx_cmd_query_sq_mb_out *mbout;
5387 	uint8_t token = mcx_cmdq_token(sc);
5388 	int error;
5389 	int i;
5390 	uint8_t *dump;
5391 
5392 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5393 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5394 	    token);
5395 
5396 	in = mcx_cmdq_in(cqe);
5397 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5398 	in->cmd_op_mod = htobe16(0);
5399 	in->cmd_sqn = htobe32(sc->sc_sqn);
5400 
5401 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5402 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5403 	    &cqe->cq_output_ptr, token) != 0) {
5404 		printf(", unable to allocate query sq mailboxes\n");
5405 		return (-1);
5406 	}
5407 
5408 	mcx_cmdq_mboxes_sign(&mxm, 1);
5409 
5410 	mcx_cmdq_post(sc, cqe, 0);
5411 	error = mcx_cmdq_poll(sc, cqe, 1000);
5412 	if (error != 0) {
5413 		printf("%s: query sq timeout\n", DEVNAME(sc));
5414 		goto free;
5415 	}
5416 	error = mcx_cmdq_verify(cqe);
5417 	if (error != 0) {
5418 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5419 		goto free;
5420 	}
5421 
5422 	out = mcx_cmdq_out(cqe);
5423 	switch (out->cmd_status) {
5424 	case MCX_CQ_STATUS_OK:
5425 		break;
5426 	default:
5427 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5428 		    out->cmd_status, be32toh(out->cmd_syndrome));
5429 		error = -1;
5430 		goto free;
5431 	}
5432 
5433         mbout = (struct mcx_cmd_query_sq_mb_out *)
5434 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5435 /*
5436 	printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5437 	    DEVNAME(sc),
5438 	    (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5439 	    be32toh(mbout->cmd_ctx.rq_user_index),
5440 	    be32toh(mbout->cmd_ctx.rq_cqn),
5441 	    be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5442 	    mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5443 	    mbout->cmd_ctx.rq_wq.wq_log_size,
5444 	    be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5445 	    be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5446 */
5447 	dump = (uint8_t *)mbout;
5448 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5449 		printf("%.2x ", dump[i]);
5450 		if (i % 16 == 15)
5451 			printf("\n");
5452 	}
5453 
5454 free:
5455 	mcx_cq_mboxes_free(sc, &mxm);
5456 	return (error);
5457 }
5458 
5459 static int
5460 mcx_dump_counters(struct mcx_softc *sc)
5461 {
5462 	struct mcx_dmamem mxm;
5463 	struct mcx_cmdq_entry *cqe;
5464 	struct mcx_cmd_query_vport_counters_in *in;
5465 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
5466 	struct mcx_cmd_query_vport_counters_out *out;
5467 	struct mcx_nic_vport_counters *counters;
5468 	int error, token;
5469 
5470 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5471 	token = mcx_cmdq_token(sc);
5472 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5473 	    sizeof(*out) + sizeof(*counters), token);
5474 
5475 	in = mcx_cmdq_in(cqe);
5476 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5477 	in->cmd_op_mod = htobe16(0);
5478 
5479 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5480 		printf(", unable to allocate query nic vport counters mailboxen\n");
5481 		return (-1);
5482 	}
5483 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5484 
5485 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5486 	mbin->cmd_clear = 0x80;
5487 
5488 	mcx_cmdq_mboxes_sign(&mxm, 1);
5489 	mcx_cmdq_post(sc, cqe, 0);
5490 
5491 	error = mcx_cmdq_poll(sc, cqe, 1000);
5492 	if (error != 0) {
5493 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5494 		goto free;
5495 	}
5496 	if (mcx_cmdq_verify(cqe) != 0) {
5497 		printf("%s: query nic vport counters command corrupt\n",
5498 		    DEVNAME(sc));
5499 		goto free;
5500 	}
5501 
5502 	out = mcx_cmdq_out(cqe);
5503 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5504 		printf("%s: query nic vport counters failed (%x, %x)\n",
5505 		    DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5506 		error = -1;
5507 		goto free;
5508 	}
5509 
5510 	counters = (struct mcx_nic_vport_counters *)
5511 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5512 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5513 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
5514 	    counters->rx_err.packets + counters->tx_err.packets)
5515 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5516 		    DEVNAME(sc),
5517 		    be64toh(counters->tx_err.packets),
5518 		    be64toh(counters->rx_err.packets),
5519 		    be64toh(counters->tx_ucast.packets),
5520 		    be64toh(counters->rx_ucast.packets),
5521 		    be64toh(counters->tx_bcast.packets),
5522 		    be64toh(counters->rx_bcast.packets));
5523 free:
5524 	mcx_dmamem_free(sc, &mxm);
5525 
5526 	return (error);
5527 }
5528 
5529 static int
5530 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5531 {
5532 	struct mcx_dmamem mxm;
5533 	struct mcx_cmdq_entry *cqe;
5534 	struct mcx_cmd_query_flow_counter_in *in;
5535 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
5536 	struct mcx_cmd_query_flow_counter_out *out;
5537 	struct mcx_counter *counters;
5538 	int error, token;
5539 
5540 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5541 	token = mcx_cmdq_token(sc);
5542 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5543 	    sizeof(*counters), token);
5544 
5545 	in = mcx_cmdq_in(cqe);
5546 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5547 	in->cmd_op_mod = htobe16(0);
5548 
5549 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5550 		printf(", unable to allocate query flow counter mailboxen\n");
5551 		return (-1);
5552 	}
5553 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5554 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5555 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5556 	mbin->cmd_clear = 0x80;
5557 
5558 	mcx_cmdq_mboxes_sign(&mxm, 1);
5559 	mcx_cmdq_post(sc, cqe, 0);
5560 
5561 	error = mcx_cmdq_poll(sc, cqe, 1000);
5562 	if (error != 0) {
5563 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
5564 		goto free;
5565 	}
5566 	if (mcx_cmdq_verify(cqe) != 0) {
5567 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5568 		goto free;
5569 	}
5570 
5571 	out = mcx_cmdq_out(cqe);
5572 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5573 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5574 		    out->cmd_status, out->cmd_syndrome);
5575 		error = -1;
5576 		goto free;
5577 	}
5578 
5579 	counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5580 	if (counters->packets)
5581 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5582 		    be64toh(counters->packets));
5583 free:
5584 	mcx_dmamem_free(sc, &mxm);
5585 
5586 	return (error);
5587 }
5588 
5589 #endif
5590 
5591 static int
5592 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5593     uint *prod, int bufsize, uint nslots)
5594 {
5595 	struct mcx_rq_entry *rqe;
5596 	struct mcx_slot *ms;
5597 	struct mbuf *m;
5598 	uint slot, p, fills;
5599 
5600 	p = *prod;
5601 	slot = (p % (1 << MCX_LOG_RQ_SIZE));
5602 	rqe = ring;
5603 	for (fills = 0; fills < nslots; fills++) {
5604 		ms = &slots[slot];
5605 #if 0
5606 		m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5607 		if (m == NULL)
5608 			break;
5609 #else
5610 		m = NULL;
5611 		MGETHDR(m, M_DONTWAIT, MT_DATA);
5612 		if (m == NULL)
5613 			break;
5614 
5615 		MCLGET(m, M_DONTWAIT);
5616 		if ((m->m_flags & M_EXT) == 0) {
5617 			m_freem(m);
5618 			break;
5619 		}
5620 #endif
5621 
5622 		m->m_data += ETHER_ALIGN;
5623 		m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5624 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5625 		    BUS_DMA_NOWAIT) != 0) {
5626 			m_freem(m);
5627 			break;
5628 		}
5629 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5630 		ms->ms_m = m;
5631 
5632 		rqe[slot].rqe_byte_count = htobe32(m->m_len);
5633 		rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5634 		rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5635 
5636 		p++;
5637 		slot++;
5638 		if (slot == (1 << MCX_LOG_RQ_SIZE))
5639 			slot = 0;
5640 	}
5641 
5642 	if (fills != 0) {
5643 		*sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5644 		/* barrier? */
5645 	}
5646 
5647 	*prod = p;
5648 
5649 	return (nslots - fills);
5650 }
5651 
5652 static int
5653 mcx_rx_fill(struct mcx_softc *sc)
5654 {
5655 	u_int slots;
5656 
5657 	slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5658 	if (slots == 0)
5659 		return (1);
5660 
5661 	slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5662 	    sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5663 	mcx_rxr_put(&sc->sc_rxr, slots);
5664 	return (0);
5665 }
5666 
5667 void
5668 mcx_refill(void *xsc)
5669 {
5670 	struct mcx_softc *sc = xsc;
5671 
5672 	mcx_rx_fill(sc);
5673 
5674 	if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5675 		callout_schedule(&sc->sc_rx_refill, 1);
5676 }
5677 
5678 void
5679 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5680 {
5681 	struct mcx_slot *ms;
5682 	bus_dmamap_t map;
5683 	int slot, slots;
5684 
5685 	slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5686 
5687 	ms = &sc->sc_tx_slots[slot];
5688 	map = ms->ms_map;
5689 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5690 	    BUS_DMASYNC_POSTWRITE);
5691 
5692 	slots = 1;
5693 	if (map->dm_nsegs > 1)
5694 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5695 
5696 	(*txfree) += slots;
5697 	bus_dmamap_unload(sc->sc_dmat, map);
5698 	m_freem(ms->ms_m);
5699 	ms->ms_m = NULL;
5700 }
5701 
5702 static uint64_t
5703 mcx_uptime(void)
5704 {
5705 	struct timespec ts;
5706 
5707 	nanouptime(&ts);
5708 
5709 	return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5710 }
5711 
5712 static void
5713 mcx_calibrate_first(struct mcx_softc *sc)
5714 {
5715 	struct mcx_calibration *c = &sc->sc_calibration[0];
5716 
5717 	sc->sc_calibration_gen = 0;
5718 
5719 	c->c_ubase = mcx_uptime();
5720 	c->c_tbase = mcx_timer(sc);
5721 	c->c_tdiff = 0;
5722 
5723 	callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5724 }
5725 
5726 #define MCX_TIMESTAMP_SHIFT 10
5727 
5728 static void
5729 mcx_calibrate(void *arg)
5730 {
5731 	struct mcx_softc *sc = arg;
5732 	struct mcx_calibration *nc, *pc;
5733 	unsigned int gen;
5734 
5735 	if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5736 		return;
5737 
5738 	callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5739 
5740 	gen = sc->sc_calibration_gen;
5741 	pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5742 	gen++;
5743 	nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5744 
5745 	nc->c_uptime = pc->c_ubase;
5746 	nc->c_timestamp = pc->c_tbase;
5747 
5748 	nc->c_ubase = mcx_uptime();
5749 	nc->c_tbase = mcx_timer(sc);
5750 
5751 	nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5752 	nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5753 
5754 	membar_producer();
5755 	sc->sc_calibration_gen = gen;
5756 }
5757 
5758 static int
5759 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5760     struct mcx_mbufq *mq, const struct mcx_calibration *c)
5761 {
5762 	struct mcx_slot *ms;
5763 	struct mbuf *m;
5764 	int slot;
5765 
5766 	slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5767 
5768 	ms = &sc->sc_rx_slots[slot];
5769 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5770 	    BUS_DMASYNC_POSTREAD);
5771 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5772 
5773 	m = ms->ms_m;
5774 	ms->ms_m = NULL;
5775 
5776 	m_set_rcvif(m, &sc->sc_ec.ec_if);
5777 	m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5778 
5779 #if 0
5780 	if (cqe->cq_rx_hash_type) {
5781 		m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5782 		    be32toh(cqe->cq_rx_hash);
5783 	}
5784 #endif
5785 
5786 #if 0
5787 	if (c->c_tdiff) {
5788 		uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5789 		t *= c->c_udiff;
5790 		t /= c->c_tdiff;
5791 
5792 		m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5793 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5794 	}
5795 #endif
5796 
5797 	MBUFQ_ENQUEUE(mq, m);
5798 
5799 	return (1);
5800 }
5801 
5802 static struct mcx_cq_entry *
5803 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5804 {
5805 	struct mcx_cq_entry *cqe;
5806 	int next;
5807 
5808 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5809 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5810 
5811 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5812 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5813 		return (&cqe[next]);
5814 	}
5815 
5816 	return (NULL);
5817 }
5818 
5819 static void
5820 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5821 {
5822 	bus_size_t offset;
5823 	uint32_t val;
5824 	uint64_t uval;
5825 
5826 	/* different uar per cq? */
5827 	offset = (MCX_PAGE_SIZE * sc->sc_uar);
5828 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5829 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5830 
5831 	cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5832 	cq->cq_doorbell[1] = htobe32(val);
5833 
5834 	uval = val;
5835 	uval <<= 32;
5836 	uval |= cq->cq_n;
5837 	bus_space_write_8(sc->sc_memt, sc->sc_memh,
5838 	    offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5839 	mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5840 	    BUS_SPACE_BARRIER_WRITE);
5841 }
5842 
5843 void
5844 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5845 {
5846 	struct ifnet *ifp = &sc->sc_ec.ec_if;
5847 	const struct mcx_calibration *c;
5848 	unsigned int gen;
5849 	struct mcx_cq_entry *cqe;
5850 	struct mcx_mbufq mq;
5851 	struct mbuf *m;
5852 	int rxfree, txfree;
5853 
5854 	MBUFQ_INIT(&mq);
5855 
5856 	gen = sc->sc_calibration_gen;
5857 	membar_consumer();
5858 	c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5859 
5860 	rxfree = 0;
5861 	txfree = 0;
5862 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
5863 		uint8_t opcode;
5864 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5865 		switch (opcode) {
5866 		case MCX_CQ_ENTRY_OPCODE_REQ:
5867 			mcx_process_txeof(sc, cqe, &txfree);
5868 			break;
5869 		case MCX_CQ_ENTRY_OPCODE_SEND:
5870 			rxfree += mcx_process_rx(sc, cqe, &mq, c);
5871 			break;
5872 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5873 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5874 			/* uint8_t *cqp = (uint8_t *)cqe; */
5875 			/* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5876 			break;
5877 
5878 		default:
5879 			/* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5880 			break;
5881 		}
5882 
5883 		cq->cq_cons++;
5884 	}
5885 
5886 	cq->cq_count++;
5887 	mcx_arm_cq(sc, cq);
5888 
5889 	if (rxfree > 0) {
5890 		mcx_rxr_put(&sc->sc_rxr, rxfree);
5891 		while (MBUFQ_FIRST(&mq) != NULL) {
5892 			MBUFQ_DEQUEUE(&mq, m);
5893 			if_percpuq_enqueue(ifp->if_percpuq, m);
5894 		}
5895 
5896 		mcx_rx_fill(sc);
5897 
5898 		if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5899 			callout_schedule(&sc->sc_rx_refill, 1);
5900 	}
5901 	if (txfree > 0) {
5902 		sc->sc_tx_cons += txfree;
5903 		if_schedule_deferred_start(ifp);
5904 	}
5905 }
5906 
5907 static void
5908 mcx_arm_eq(struct mcx_softc *sc)
5909 {
5910 	bus_size_t offset;
5911 	uint32_t val;
5912 
5913 	offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5914 	val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5915 
5916 	mcx_wr(sc, offset, val);
5917 	/* barrier? */
5918 }
5919 
5920 static struct mcx_eq_entry *
5921 mcx_next_eq_entry(struct mcx_softc *sc)
5922 {
5923 	struct mcx_eq_entry *eqe;
5924 	int next;
5925 
5926 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5927 	next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5928 	if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5929 		sc->sc_eq_cons++;
5930 		return (&eqe[next]);
5931 	}
5932 	return (NULL);
5933 }
5934 
5935 int
5936 mcx_intr(void *xsc)
5937 {
5938 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
5939 	struct mcx_eq_entry *eqe;
5940 	int i, cq;
5941 
5942 	while ((eqe = mcx_next_eq_entry(sc))) {
5943 		switch (eqe->eq_event_type) {
5944 		case MCX_EVENT_TYPE_COMPLETION:
5945 			cq = be32toh(eqe->eq_event_data[6]);
5946 			for (i = 0; i < sc->sc_num_cq; i++) {
5947 				if (sc->sc_cq[i].cq_n == cq) {
5948 					mcx_process_cq(sc, &sc->sc_cq[i]);
5949 					break;
5950 				}
5951 			}
5952 			break;
5953 
5954 		case MCX_EVENT_TYPE_LAST_WQE:
5955 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5956 			break;
5957 
5958 		case MCX_EVENT_TYPE_CQ_ERROR:
5959 			/* printf("%s: cq error\n", DEVNAME(sc)); */
5960 			break;
5961 
5962 		case MCX_EVENT_TYPE_CMD_COMPLETION:
5963 			/* wakeup probably */
5964 			break;
5965 
5966 		case MCX_EVENT_TYPE_PORT_CHANGE:
5967 			workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5968 			break;
5969 
5970 		default:
5971 			/* printf("%s: something happened\n", DEVNAME(sc)); */
5972 			break;
5973 		}
5974 	}
5975 	mcx_arm_eq(sc);
5976 	return (1);
5977 }
5978 
5979 static void
5980 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
5981     int total)
5982 {
5983 	struct mcx_slot *ms;
5984 
5985 	int i = allocated;
5986 	while (i-- > 0) {
5987 		ms = &slots[i];
5988 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
5989 		if (ms->ms_m != NULL)
5990 			m_freem(ms->ms_m);
5991 	}
5992 	kmem_free(slots, total * sizeof(*ms));
5993 }
5994 
5995 static int
5996 mcx_init(struct ifnet *ifp)
5997 {
5998 	struct mcx_softc *sc = ifp->if_softc;
5999 	struct mcx_slot *ms;
6000 	int i, start;
6001 	struct mcx_flow_match match_crit;
6002 
6003 	if (ISSET(ifp->if_flags, IFF_RUNNING))
6004 		mcx_stop(ifp, 0);
6005 
6006 	sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6007 	    KM_SLEEP);
6008 
6009 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6010 		ms = &sc->sc_rx_slots[i];
6011 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6012 		    sc->sc_hardmtu, 0,
6013 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6014 		    &ms->ms_map) != 0) {
6015 			printf("%s: failed to allocate rx dma maps\n",
6016 			    DEVNAME(sc));
6017 			goto destroy_rx_slots;
6018 		}
6019 	}
6020 
6021 	sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6022 	     KM_SLEEP);
6023 
6024 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6025 		ms = &sc->sc_tx_slots[i];
6026 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6027 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6028 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6029 		    &ms->ms_map) != 0) {
6030 			printf("%s: failed to allocate tx dma maps\n",
6031 			    DEVNAME(sc));
6032 			goto destroy_tx_slots;
6033 		}
6034 	}
6035 
6036 	if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6037 		goto down;
6038 
6039 	/* send queue */
6040 	if (mcx_create_tis(sc) != 0)
6041 		goto down;
6042 
6043 	if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6044 		goto down;
6045 
6046 	/* receive queue */
6047 	if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6048 		goto down;
6049 
6050 	if (mcx_create_tir(sc) != 0)
6051 		goto down;
6052 
6053 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6054 		goto down;
6055 
6056 	/* promisc flow group */
6057 	start = 0;
6058 	memset(&match_crit, 0, sizeof(match_crit));
6059 	if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6060 	    0, &match_crit) != 0)
6061 		goto down;
6062 	sc->sc_promisc_flow_enabled = 0;
6063 	start++;
6064 
6065 	/* all multicast flow group */
6066 	match_crit.mc_dest_mac[0] = 0x01;
6067 	if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6068 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6069 		goto down;
6070 	sc->sc_allmulti_flow_enabled = 0;
6071 	start++;
6072 
6073 	/* mac address matching flow group */
6074 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6075 	if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6076 	    (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6077 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6078 		goto down;
6079 
6080 	/* flow table entries for unicast and broadcast */
6081 	start = 0;
6082 	if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6083 	    LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6084 		goto down;
6085 	start++;
6086 
6087 	if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6088 	    etherbroadcastaddr) != 0)
6089 		goto down;
6090 	start++;
6091 
6092 	/* multicast entries go after that */
6093 	sc->sc_mcast_flow_base = start;
6094 
6095 	/* re-add any existing multicast flows */
6096 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6097 		if (sc->sc_mcast_flows[i][0] != 0) {
6098 			mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6099 			    sc->sc_mcast_flow_base + i,
6100 			    sc->sc_mcast_flows[i]);
6101 		}
6102 	}
6103 
6104 	if (mcx_set_flow_table_root(sc) != 0)
6105 		goto down;
6106 
6107 	/* start the queues */
6108 	if (mcx_ready_sq(sc) != 0)
6109 		goto down;
6110 
6111 	if (mcx_ready_rq(sc) != 0)
6112 		goto down;
6113 
6114 	mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6115 	sc->sc_rx_prod = 0;
6116 	mcx_rx_fill(sc);
6117 
6118 	mcx_calibrate_first(sc);
6119 
6120 	SET(ifp->if_flags, IFF_RUNNING);
6121 
6122 	sc->sc_tx_cons = 0;
6123 	sc->sc_tx_prod = 0;
6124 	CLR(ifp->if_flags, IFF_OACTIVE);
6125 	if_schedule_deferred_start(ifp);
6126 
6127 	return 0;
6128 destroy_tx_slots:
6129 	mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6130 	sc->sc_rx_slots = NULL;
6131 
6132 	i = (1 << MCX_LOG_RQ_SIZE);
6133 destroy_rx_slots:
6134 	mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6135 	sc->sc_rx_slots = NULL;
6136 down:
6137 	mcx_stop(ifp, 0);
6138 	return EIO;
6139 }
6140 
6141 static void
6142 mcx_stop(struct ifnet *ifp, int disable)
6143 {
6144 	struct mcx_softc *sc = ifp->if_softc;
6145 	int group, i;
6146 
6147 	CLR(ifp->if_flags, IFF_RUNNING);
6148 
6149 	/*
6150 	 * delete flow table entries first, so no packets can arrive
6151 	 * after the barriers
6152 	 */
6153 	if (sc->sc_promisc_flow_enabled)
6154 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6155 	if (sc->sc_allmulti_flow_enabled)
6156 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6157 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6158 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6159 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6160 		if (sc->sc_mcast_flows[i][0] != 0) {
6161 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6162 			    sc->sc_mcast_flow_base + i);
6163 		}
6164 	}
6165 
6166 	callout_halt(&sc->sc_calibrate, NULL);
6167 
6168 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6169 		if (sc->sc_flow_group_id[group] != -1)
6170 			mcx_destroy_flow_group(sc,
6171 			    sc->sc_flow_group_id[group]);
6172 	}
6173 
6174 	if (sc->sc_flow_table_id != -1)
6175 		mcx_destroy_flow_table(sc);
6176 
6177 	if (sc->sc_tirn != 0)
6178 		mcx_destroy_tir(sc);
6179 	if (sc->sc_rqn != 0)
6180 		mcx_destroy_rq(sc);
6181 
6182 	if (sc->sc_sqn != 0)
6183 		mcx_destroy_sq(sc);
6184 	if (sc->sc_tisn != 0)
6185 		mcx_destroy_tis(sc);
6186 
6187 	for (i = 0; i < sc->sc_num_cq; i++)
6188 		mcx_destroy_cq(sc, i);
6189 	sc->sc_num_cq = 0;
6190 
6191 	if (sc->sc_tx_slots != NULL) {
6192 		mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6193 		    (1 << MCX_LOG_SQ_SIZE));
6194 		sc->sc_tx_slots = NULL;
6195 	}
6196 	if (sc->sc_rx_slots != NULL) {
6197 		mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6198 		    (1 << MCX_LOG_RQ_SIZE));
6199 		sc->sc_rx_slots = NULL;
6200 	}
6201 }
6202 
6203 static int
6204 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6205 {
6206 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6207 	struct ifreq *ifr = (struct ifreq *)data;
6208 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6209 	int s, i, error = 0;
6210 
6211 	s = splnet();
6212 	switch (cmd) {
6213 
6214 	case SIOCADDMULTI:
6215 		if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6216 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6217 			if (error != 0)
6218 				return (error);
6219 
6220 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6221 				if (sc->sc_mcast_flows[i][0] == 0) {
6222 					memcpy(sc->sc_mcast_flows[i], addrlo,
6223 					    ETHER_ADDR_LEN);
6224 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6225 						mcx_set_flow_table_entry(sc,
6226 						    MCX_FLOW_GROUP_MAC,
6227 						    sc->sc_mcast_flow_base + i,
6228 						    sc->sc_mcast_flows[i]);
6229 					}
6230 					break;
6231 				}
6232 			}
6233 
6234 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6235 				if (i == MCX_NUM_MCAST_FLOWS) {
6236 					SET(ifp->if_flags, IFF_ALLMULTI);
6237 					sc->sc_extra_mcast++;
6238 					error = ENETRESET;
6239 				}
6240 
6241 				if (sc->sc_ec.ec_multicnt > 0) {
6242 					SET(ifp->if_flags, IFF_ALLMULTI);
6243 					error = ENETRESET;
6244 				}
6245 			}
6246 		}
6247 		break;
6248 
6249 	case SIOCDELMULTI:
6250 		if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6251 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6252 			if (error != 0)
6253 				return (error);
6254 
6255 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6256 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
6257 				    ETHER_ADDR_LEN) == 0) {
6258 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6259 						mcx_delete_flow_table_entry(sc,
6260 						    MCX_FLOW_GROUP_MAC,
6261 						    sc->sc_mcast_flow_base + i);
6262 					}
6263 					sc->sc_mcast_flows[i][0] = 0;
6264 					break;
6265 				}
6266 			}
6267 
6268 			if (i == MCX_NUM_MCAST_FLOWS)
6269 				sc->sc_extra_mcast--;
6270 
6271 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6272 			    (sc->sc_extra_mcast == 0) &&
6273 			    (sc->sc_ec.ec_multicnt == 0)) {
6274 				CLR(ifp->if_flags, IFF_ALLMULTI);
6275 				error = ENETRESET;
6276 			}
6277 		}
6278 		break;
6279 
6280 	default:
6281 		error = ether_ioctl(ifp, cmd, data);
6282 	}
6283 
6284 	if (error == ENETRESET) {
6285 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6286 		    (IFF_UP | IFF_RUNNING))
6287 			mcx_iff(sc);
6288 		error = 0;
6289 	}
6290 	splx(s);
6291 
6292 	return (error);
6293 }
6294 
6295 #if 0
6296 static int
6297 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6298 {
6299 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6300 	struct mcx_reg_mcia mcia;
6301 	struct mcx_reg_pmlp pmlp;
6302 	int offset, error;
6303 
6304 	/* get module number */
6305 	memset(&pmlp, 0, sizeof(pmlp));
6306 	pmlp.rp_local_port = 1;
6307 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6308 	    sizeof(pmlp));
6309 	if (error != 0) {
6310 		printf("%s: unable to get eeprom module number\n",
6311 		    DEVNAME(sc));
6312 		return error;
6313 	}
6314 
6315 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6316 		memset(&mcia, 0, sizeof(mcia));
6317 		mcia.rm_l = 0;
6318 		mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6319 		    MCX_PMLP_MODULE_NUM_MASK;
6320 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
6321 		mcia.rm_page_num = sff->sff_page;
6322 		mcia.rm_dev_addr = htobe16(offset);
6323 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6324 
6325 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6326 		    &mcia, sizeof(mcia));
6327 		if (error != 0) {
6328 			printf("%s: unable to read eeprom at %x\n",
6329 			    DEVNAME(sc), offset);
6330 			return error;
6331 		}
6332 
6333 		memcpy(sff->sff_data + offset, mcia.rm_data,
6334 		    MCX_MCIA_EEPROM_BYTES);
6335 	}
6336 
6337 	return 0;
6338 }
6339 #endif
6340 
6341 static int
6342 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6343 {
6344 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6345 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6346 	case 0:
6347 		break;
6348 
6349 	case EFBIG:
6350 		if (m_defrag(m, M_DONTWAIT) == 0 &&
6351 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6352 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6353 			break;
6354 
6355 		/* FALLTHROUGH */
6356 	default:
6357 		return (1);
6358 	}
6359 
6360 	ms->ms_m = m;
6361 	return (0);
6362 }
6363 
6364 static void
6365 mcx_start(struct ifnet *ifp)
6366 {
6367 	struct mcx_softc *sc = ifp->if_softc;
6368 	struct mcx_sq_entry *sq, *sqe;
6369 	struct mcx_sq_entry_seg *sqs;
6370 	struct mcx_slot *ms;
6371 	bus_dmamap_t map;
6372 	struct mbuf *m;
6373 	u_int idx, free, used;
6374 	uint64_t *bf;
6375 	size_t bf_base;
6376 	int i, seg, nseg;
6377 
6378 	bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6379 
6380 	idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6381 	free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6382 
6383 	used = 0;
6384 	bf = NULL;
6385 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6386 
6387 	for (;;) {
6388 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6389 			SET(ifp->if_flags, IFF_OACTIVE);
6390 			break;
6391 		}
6392 
6393 		IFQ_DEQUEUE(&ifp->if_snd, m);
6394 		if (m == NULL) {
6395 			break;
6396 		}
6397 
6398 		sqe = sq + idx;
6399 		ms = &sc->sc_tx_slots[idx];
6400 		memset(sqe, 0, sizeof(*sqe));
6401 
6402 		/* ctrl segment */
6403 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6404 		    ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6405 		/* always generate a completion event */
6406 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6407 
6408 		/* eth segment */
6409 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6410 		m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6411 		m_adj(m, MCX_SQ_INLINE_SIZE);
6412 
6413 		if (mcx_load_mbuf(sc, ms, m) != 0) {
6414 			m_freem(m);
6415 			ifp->if_oerrors++;
6416 			continue;
6417 		}
6418 		bf = (uint64_t *)sqe;
6419 
6420 		if (ifp->if_bpf != NULL)
6421 			bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6422 			    MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6423 
6424 		map = ms->ms_map;
6425 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6426 		    BUS_DMASYNC_PREWRITE);
6427 
6428 		sqe->sqe_ds_sq_num =
6429 		    htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6430 		    (map->dm_nsegs + 3));
6431 
6432 		/* data segment - first wqe has one segment */
6433 		sqs = sqe->sqe_segs;
6434 		seg = 0;
6435 		nseg = 1;
6436 		for (i = 0; i < map->dm_nsegs; i++) {
6437 			if (seg == nseg) {
6438 				/* next slot */
6439 				idx++;
6440 				if (idx == (1 << MCX_LOG_SQ_SIZE))
6441 					idx = 0;
6442 				sc->sc_tx_prod++;
6443 				used++;
6444 
6445 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6446 				seg = 0;
6447 				nseg = MCX_SQ_SEGS_PER_SLOT;
6448 			}
6449 			sqs[seg].sqs_byte_count =
6450 			    htobe32(map->dm_segs[i].ds_len);
6451 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6452 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6453 			seg++;
6454 		}
6455 
6456 		idx++;
6457 		if (idx == (1 << MCX_LOG_SQ_SIZE))
6458 			idx = 0;
6459 		sc->sc_tx_prod++;
6460 		used++;
6461 	}
6462 
6463 	if (used) {
6464 		*sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6465 
6466 		membar_sync();
6467 
6468 		/*
6469 		 * write the first 64 bits of the last sqe we produced
6470 		 * to the blue flame buffer
6471 		 */
6472 		bus_space_write_8(sc->sc_memt, sc->sc_memh,
6473 		    bf_base + sc->sc_bf_offset, *bf);
6474 		/* next write goes to the other buffer */
6475 		sc->sc_bf_offset ^= sc->sc_bf_size;
6476 
6477 		membar_sync();
6478 	}
6479 }
6480 
6481 static void
6482 mcx_watchdog(struct ifnet *ifp)
6483 {
6484 }
6485 
6486 static void
6487 mcx_media_add_types(struct mcx_softc *sc)
6488 {
6489 	struct mcx_reg_ptys ptys;
6490 	int i;
6491 	uint32_t proto_cap;
6492 
6493 	memset(&ptys, 0, sizeof(ptys));
6494 	ptys.rp_local_port = 1;
6495 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6496 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6497 	    sizeof(ptys)) != 0) {
6498 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6499 		return;
6500 	}
6501 
6502 	proto_cap = be32toh(ptys.rp_eth_proto_cap);
6503 	for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6504 		if ((proto_cap & (1U << i)) && (mcx_eth_cap_map[i] != 0))
6505 			ifmedia_add(&sc->sc_media, IFM_ETHER |
6506 			    mcx_eth_cap_map[i], 0, NULL);
6507 	}
6508 }
6509 
6510 static void
6511 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6512 {
6513 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6514 	struct mcx_reg_ptys ptys;
6515 	int i;
6516 	uint32_t /* proto_cap, */ proto_oper;
6517 	uint64_t media_oper;
6518 
6519 	memset(&ptys, 0, sizeof(ptys));
6520 	ptys.rp_local_port = 1;
6521 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6522 
6523 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6524 	    sizeof(ptys)) != 0) {
6525 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6526 		return;
6527 	}
6528 
6529 	/* proto_cap = be32toh(ptys.rp_eth_proto_cap); */
6530 	proto_oper = be32toh(ptys.rp_eth_proto_oper);
6531 
6532 	media_oper = 0;
6533 	for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6534 		if (proto_oper & (1U << i)) {
6535 			media_oper = mcx_eth_cap_map[i];
6536 		}
6537 	}
6538 
6539 	ifmr->ifm_status = IFM_AVALID;
6540 	/* not sure if this is the right thing to check, maybe paos? */
6541 	if (proto_oper != 0) {
6542 		ifmr->ifm_status |= IFM_ACTIVE;
6543 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6544 		/* txpause, rxpause, duplex? */
6545 	}
6546 }
6547 
6548 static int
6549 mcx_media_change(struct ifnet *ifp)
6550 {
6551 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6552 	struct mcx_reg_ptys ptys;
6553 	struct mcx_reg_paos paos;
6554 	uint32_t media;
6555 	int i, error;
6556 
6557 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6558 		return EINVAL;
6559 
6560 	error = 0;
6561 
6562 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6563 		/* read ptys to get supported media */
6564 		memset(&ptys, 0, sizeof(ptys));
6565 		ptys.rp_local_port = 1;
6566 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6567 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6568 		    &ptys, sizeof(ptys)) != 0) {
6569 			printf("%s: unable to read port type/speed\n",
6570 			    DEVNAME(sc));
6571 			return EIO;
6572 		}
6573 
6574 		media = be32toh(ptys.rp_eth_proto_cap);
6575 	} else {
6576 		/* map media type */
6577 		media = 0;
6578 		for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6579 			if (mcx_eth_cap_map[i] ==
6580 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6581 				media = (1 << i);
6582 				break;
6583 			}
6584 		}
6585 	}
6586 
6587 	/* disable the port */
6588 	memset(&paos, 0, sizeof(paos));
6589 	paos.rp_local_port = 1;
6590 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6591 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6592 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6593 	    sizeof(paos)) != 0) {
6594 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
6595 		return EIO;
6596 	}
6597 
6598 	memset(&ptys, 0, sizeof(ptys));
6599 	ptys.rp_local_port = 1;
6600 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6601 	ptys.rp_eth_proto_admin = htobe32(media);
6602 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6603 	    sizeof(ptys)) != 0) {
6604 		printf("%s: unable to set port media type/speed\n",
6605 		    DEVNAME(sc));
6606 		error = EIO;
6607 	}
6608 
6609 	/* re-enable the port to start negotiation */
6610 	memset(&paos, 0, sizeof(paos));
6611 	paos.rp_local_port = 1;
6612 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6613 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6614 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6615 	    sizeof(paos)) != 0) {
6616 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
6617 		error = EIO;
6618 	}
6619 
6620 	return error;
6621 }
6622 
6623 static void
6624 mcx_port_change(struct work *wk, void *xsc)
6625 {
6626 	struct mcx_softc *sc = xsc;
6627 	struct ifnet *ifp = &sc->sc_ec.ec_if;
6628 	struct mcx_reg_paos paos;
6629 	int link_state = LINK_STATE_DOWN;
6630 
6631 	memset(&paos, 0, sizeof(paos));
6632 	paos.rp_local_port = 1;
6633 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6634 	    sizeof(paos)) == 0) {
6635 		if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6636 			link_state = LINK_STATE_UP;
6637 	}
6638 
6639 	if (link_state != ifp->if_link_state) {
6640 		if_link_state_change(ifp, link_state);
6641 	}
6642 }
6643 
6644 
6645 static inline uint32_t
6646 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6647 {
6648 	uint32_t word;
6649 
6650 	word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6651 
6652 	return (be32toh(word));
6653 }
6654 
6655 static inline void
6656 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6657 {
6658 	bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6659 }
6660 
6661 static inline void
6662 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6663 {
6664 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6665 }
6666 
6667 static uint64_t
6668 mcx_timer(struct mcx_softc *sc)
6669 {
6670 	uint32_t hi, lo, ni;
6671 
6672 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6673 	for (;;) {
6674 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6675 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6676 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6677 
6678 		if (ni == hi)
6679 			break;
6680 
6681 		hi = ni;
6682 	}
6683 
6684 	return (((uint64_t)hi << 32) | (uint64_t)lo);
6685 }
6686 
6687 static int
6688 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6689     bus_size_t size, u_int align)
6690 {
6691 	mxm->mxm_size = size;
6692 
6693 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6694 	    mxm->mxm_size, 0,
6695 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6696 	    &mxm->mxm_map) != 0)
6697 		return (1);
6698 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6699 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6700 	    BUS_DMA_WAITOK) != 0)
6701 		goto destroy;
6702 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6703 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6704 		goto free;
6705 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6706 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6707 		goto unmap;
6708 
6709 	mcx_dmamem_zero(mxm);
6710 
6711 	return (0);
6712 unmap:
6713 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6714 free:
6715 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6716 destroy:
6717 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6718 	return (1);
6719 }
6720 
6721 static void
6722 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6723 {
6724 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6725 }
6726 
6727 static void
6728 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6729 {
6730 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6731 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6732 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6733 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6734 }
6735 
6736 static int
6737 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6738 {
6739 	bus_dma_segment_t *segs;
6740 	bus_size_t len = pages * MCX_PAGE_SIZE;
6741 	size_t seglen;
6742 
6743 	segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6744 	seglen = sizeof(*segs) * pages;
6745 
6746 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6747 	    segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6748 		goto free_segs;
6749 
6750 	if (mhm->mhm_seg_count < pages) {
6751 		size_t nseglen;
6752 
6753 		mhm->mhm_segs = kmem_alloc(
6754 		    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6755 
6756 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6757 
6758 		memcpy(mhm->mhm_segs, segs, nseglen);
6759 
6760 		kmem_free(segs, seglen);
6761 
6762 		segs = mhm->mhm_segs;
6763 		seglen = nseglen;
6764 	} else
6765 		mhm->mhm_segs = segs;
6766 
6767 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6768 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6769 	    &mhm->mhm_map) != 0)
6770 		goto free_dmamem;
6771 
6772 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6773 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6774 		goto destroy;
6775 
6776 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6777 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6778 
6779 	mhm->mhm_npages = pages;
6780 
6781 	return (0);
6782 
6783 destroy:
6784 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6785 free_dmamem:
6786 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6787 free_segs:
6788 	kmem_free(segs, seglen);
6789 	mhm->mhm_segs = NULL;
6790 
6791 	return (-1);
6792 }
6793 
6794 static void
6795 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6796 {
6797 	if (mhm->mhm_npages == 0)
6798 		return;
6799 
6800 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6801 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6802 
6803 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6804 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6805 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6806 	kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6807 
6808 	mhm->mhm_npages = 0;
6809 }
6810