xref: /openbsd-src/sys/dev/pci/if_mcx.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: if_mcx.c,v 1.31 2019/06/22 08:36:55 jmatthew Exp $ */
2 
3 /*
4  * Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/kernel.h>
27 #include <sys/socket.h>
28 #include <sys/device.h>
29 #include <sys/pool.h>
30 #include <sys/queue.h>
31 #include <sys/timeout.h>
32 #include <sys/task.h>
33 #include <sys/atomic.h>
34 
35 #include <machine/bus.h>
36 #include <machine/intr.h>
37 
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_media.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 
46 #include <netinet/in.h>
47 #include <netinet/if_ether.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
54 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
55 
56 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
57 
58 #define MCX_FW_VER	 	  0x0000
59 #define  MCX_FW_VER_MAJOR(_v)		((_v) & 0xffff)
60 #define  MCX_FW_VER_MINOR(_v)		((_v) >> 16)
61 #define MCX_CMDIF_FW_SUBVER	  0x0004
62 #define  MCX_FW_VER_SUBMINOR(_v)	((_v) & 0xffff)
63 #define  MCX_CMDIF(_v)			((_v) >> 16)
64 
65 #define MCX_ISSI		 1 /* as per the PRM */
66 #define MCX_CMD_IF_SUPPORTED	 5
67 
68 #define MCX_HARDMTU		 9500
69 
70 #define MCX_MAX_CQS		 2		/* rq, sq */
71 
72 /* queue sizes */
73 #define MCX_LOG_EQ_SIZE		 6		/* one page */
74 #define MCX_LOG_CQ_SIZE		 11
75 #define MCX_LOG_RQ_SIZE		 10
76 #define MCX_LOG_SQ_SIZE		 11
77 
78 /* completion event moderation - about 10khz, or 90% of the cq */
79 #define MCX_CQ_MOD_PERIOD	50
80 #define MCX_CQ_MOD_COUNTER	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
81 
82 #define MCX_LOG_SQ_ENTRY_SIZE	 6
83 #define MCX_SQ_ENTRY_MAX_SLOTS	 4
84 #define MCX_SQ_SEGS_PER_SLOT	 \
85 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
86 #define MCX_SQ_MAX_SEGMENTS	 \
87 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
88 
89 #define MCX_LOG_FLOW_TABLE_SIZE	 5
90 #define MCX_NUM_STATIC_FLOWS	 4	/* promisc, allmulti, ucast, bcast */
91 #define MCX_NUM_MCAST_FLOWS 	\
92 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
93 
94 #define MCX_SQ_INLINE_SIZE	 18
95 
96 /* doorbell offsets */
97 #define MCX_CQ_DOORBELL_OFFSET	 0
98 #define MCX_CQ_DOORBELL_SIZE	 16
99 #define MCX_RQ_DOORBELL_OFFSET	 64
100 #define MCX_SQ_DOORBELL_OFFSET	 64
101 
102 /* uar registers */
103 #define MCX_UAR_CQ_DOORBELL	 0x20
104 #define MCX_UAR_EQ_DOORBELL_ARM	 0x40
105 #define MCX_UAR_EQ_DOORBELL	 0x48
106 #define MCX_UAR_BF		 0x800
107 
108 #define MCX_CMDQ_ADDR_HI		 0x0010
109 #define MCX_CMDQ_ADDR_LO		 0x0014
110 #define MCX_CMDQ_ADDR_NMASK		0xfff
111 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
112 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
113 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
114 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
115 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
116 
117 #define MCX_CMDQ_DOORBELL		0x0018
118 
119 #define MCX_STATE		0x01fc
120 #define MCX_STATE_MASK			(1 << 31)
121 #define MCX_STATE_INITIALIZING		(1 << 31)
122 #define MCX_STATE_READY			(0 << 31)
123 #define MCX_STATE_INTERFACE_MASK	(0x3 << 24)
124 #define MCX_STATE_INTERFACE_FULL_DRIVER	(0x0 << 24)
125 #define MCX_STATE_INTERFACE_DISABLED	(0x1 << 24)
126 
127 #define MCX_INTERNAL_TIMER	0x1000
128 #define MCX_INTERNAL_TIMER_H	0x1000
129 #define MCX_INTERNAL_TIMER_L	0x1004
130 
131 #define MCX_CLEAR_INT		0x100c
132 
133 #define MCX_REG_OP_WRITE	0
134 #define MCX_REG_OP_READ		1
135 
136 #define MCX_REG_PMLP		0x5002
137 #define MCX_REG_PMTU		0x5003
138 #define MCX_REG_PTYS		0x5004
139 #define MCX_REG_PAOS		0x5006
140 #define MCX_REG_PFCC		0x5007
141 #define MCX_REG_PPCNT		0x5008
142 #define MCX_REG_MCIA		0x9014
143 
144 #define MCX_ETHER_CAP_SGMII	(1 << 0)
145 #define MCX_ETHER_CAP_1000_KX	(1 << 1)
146 #define MCX_ETHER_CAP_10G_CX4	(1 << 2)
147 #define MCX_ETHER_CAP_10G_KX4	(1 << 3)
148 #define MCX_ETHER_CAP_10G_KR	(1 << 4)
149 #define MCX_ETHER_CAP_40G_CR4	(1 << 6)
150 #define MCX_ETHER_CAP_40G_KR4	(1 << 7)
151 #define MCX_ETHER_CAP_10G_CR	(1 << 12)
152 #define MCX_ETHER_CAP_10G_SR	(1 << 13)
153 #define MCX_ETHER_CAP_10G_LR	(1 << 14)
154 #define MCX_ETHER_CAP_40G_SR4	(1 << 15)
155 #define MCX_ETHER_CAP_40G_LR4	(1 << 16)
156 #define MCX_ETHER_CAP_50G_SR2	(1 << 18)
157 #define MCX_ETHER_CAP_100G_CR4	(1 << 20)
158 #define MCX_ETHER_CAP_100G_SR4	(1 << 21)
159 #define MCX_ETHER_CAP_100G_KR4	(1 << 22)
160 #define MCX_ETHER_CAP_25G_CR	(1 << 27)
161 #define MCX_ETHER_CAP_25G_KR	(1 << 28)
162 #define MCX_ETHER_CAP_25G_SR	(1 << 29)
163 #define MCX_ETHER_CAP_50G_CR2	(1 << 30)
164 #define MCX_ETHER_CAP_50G_KR2	(1 << 31)
165 
166 #define MCX_PAGE_SHIFT		12
167 #define MCX_PAGE_SIZE		(1 << MCX_PAGE_SHIFT)
168 #define MCX_MAX_CQE		32
169 
170 #define MCX_CMD_QUERY_HCA_CAP	0x100
171 #define MCX_CMD_QUERY_ADAPTER	0x101
172 #define MCX_CMD_INIT_HCA	0x102
173 #define MCX_CMD_TEARDOWN_HCA	0x103
174 #define MCX_CMD_ENABLE_HCA	0x104
175 #define MCX_CMD_DISABLE_HCA	0x105
176 #define MCX_CMD_QUERY_PAGES	0x107
177 #define MCX_CMD_MANAGE_PAGES	0x108
178 #define MCX_CMD_SET_HCA_CAP	0x109
179 #define MCX_CMD_QUERY_ISSI	0x10a
180 #define MCX_CMD_SET_ISSI	0x10b
181 #define MCX_CMD_SET_DRIVER_VERSION \
182 				0x10d
183 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
184 				0x203
185 #define MCX_CMD_CREATE_EQ	0x301
186 #define MCX_CMD_DESTROY_EQ	0x302
187 #define MCX_CMD_CREATE_CQ	0x400
188 #define MCX_CMD_DESTROY_CQ	0x401
189 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
190 				0x754
191 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
192 				0x755
193 #define MCX_CMD_QUERY_VPORT_COUNTERS \
194 				0x770
195 #define MCX_CMD_ALLOC_PD	0x800
196 #define MCX_CMD_ALLOC_UAR	0x802
197 #define MCX_CMD_ACCESS_REG	0x805
198 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
199 				0x816
200 #define MCX_CMD_CREATE_TIR	0x900
201 #define MCX_CMD_DESTROY_TIR	0x902
202 #define MCX_CMD_CREATE_SQ	0x904
203 #define MCX_CMD_MODIFY_SQ	0x905
204 #define MCX_CMD_DESTROY_SQ	0x906
205 #define MCX_CMD_QUERY_SQ	0x907
206 #define MCX_CMD_CREATE_RQ	0x908
207 #define MCX_CMD_MODIFY_RQ	0x909
208 #define MCX_CMD_DESTROY_RQ	0x90a
209 #define MCX_CMD_QUERY_RQ	0x90b
210 #define MCX_CMD_CREATE_TIS	0x912
211 #define MCX_CMD_DESTROY_TIS	0x914
212 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
213 				0x92f
214 #define MCX_CMD_CREATE_FLOW_TABLE \
215 				0x930
216 #define MCX_CMD_DESTROY_FLOW_TABLE \
217 				0x931
218 #define MCX_CMD_QUERY_FLOW_TABLE \
219 				0x932
220 #define MCX_CMD_CREATE_FLOW_GROUP \
221 				0x933
222 #define MCX_CMD_DESTROY_FLOW_GROUP \
223 				0x934
224 #define MCX_CMD_QUERY_FLOW_GROUP \
225 				0x935
226 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
227 				0x936
228 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
229 				0x937
230 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
231 				0x938
232 #define MCX_CMD_ALLOC_FLOW_COUNTER \
233 				0x939
234 #define MCX_CMD_QUERY_FLOW_COUNTER \
235 				0x93b
236 
237 #define MCX_QUEUE_STATE_RST	0
238 #define MCX_QUEUE_STATE_RDY	1
239 #define MCX_QUEUE_STATE_ERR	3
240 
241 #define MCX_FLOW_TABLE_TYPE_RX	0
242 #define MCX_FLOW_TABLE_TYPE_TX	1
243 
244 #define MCX_CMDQ_INLINE_DATASIZE 16
245 
246 struct mcx_cmdq_entry {
247 	uint8_t			cq_type;
248 #define MCX_CMDQ_TYPE_PCIE		0x7
249 	uint8_t			cq_reserved0[3];
250 
251 	uint32_t		cq_input_length;
252 	uint64_t		cq_input_ptr;
253 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
254 
255 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
256 	uint64_t		cq_output_ptr;
257 	uint32_t		cq_output_length;
258 
259 	uint8_t			cq_token;
260 	uint8_t			cq_signature;
261 	uint8_t			cq_reserved1[1];
262 	uint8_t			cq_status;
263 #define MCX_CQ_STATUS_SHIFT		1
264 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
265 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
266 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
267 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
268 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
269 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
270 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
271 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
272 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
273 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
274 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
275 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
276 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
277 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
278 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
279 					(0x10 << MCX_CQ_STATUS_SHIFT)
280 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
281 #define MCX_CQ_STATUS_OWN_MASK		0x1
282 #define MCX_CQ_STATUS_OWN_SW		0x0
283 #define MCX_CQ_STATUS_OWN_HW		0x1
284 } __packed __aligned(8);
285 
286 #define MCX_CMDQ_MAILBOX_DATASIZE	512
287 
288 struct mcx_cmdq_mailbox {
289 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
290 	uint8_t			mb_reserved0[48];
291 	uint64_t		mb_next_ptr;
292 	uint32_t		mb_block_number;
293 	uint8_t			mb_reserved1[1];
294 	uint8_t			mb_token;
295 	uint8_t			mb_ctrl_signature;
296 	uint8_t			mb_signature;
297 } __packed __aligned(8);
298 
299 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
300 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
301 				    MCX_CMDQ_MAILBOX_ALIGN)
302 /*
303  * command mailbox structres
304  */
305 
306 struct mcx_cmd_enable_hca_in {
307 	uint16_t		cmd_opcode;
308 	uint8_t			cmd_reserved0[4];
309 	uint16_t		cmd_op_mod;
310 	uint8_t			cmd_reserved1[2];
311 	uint16_t		cmd_function_id;
312 	uint8_t			cmd_reserved2[4];
313 } __packed __aligned(4);
314 
315 struct mcx_cmd_enable_hca_out {
316 	uint8_t			cmd_status;
317 	uint8_t			cmd_reserved0[3];
318 	uint32_t		cmd_syndrome;
319 	uint8_t			cmd_reserved1[4];
320 } __packed __aligned(4);
321 
322 struct mcx_cmd_init_hca_in {
323 	uint16_t		cmd_opcode;
324 	uint8_t			cmd_reserved0[4];
325 	uint16_t		cmd_op_mod;
326 	uint8_t			cmd_reserved1[8];
327 } __packed __aligned(4);
328 
329 struct mcx_cmd_init_hca_out {
330 	uint8_t			cmd_status;
331 	uint8_t			cmd_reserved0[3];
332 	uint32_t		cmd_syndrome;
333 	uint8_t			cmd_reserved1[8];
334 } __packed __aligned(4);
335 
336 struct mcx_cmd_teardown_hca_in {
337 	uint16_t		cmd_opcode;
338 	uint8_t			cmd_reserved0[4];
339 	uint16_t		cmd_op_mod;
340 	uint8_t			cmd_reserved1[2];
341 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
342 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
343 	uint16_t		cmd_profile;
344 	uint8_t			cmd_reserved2[4];
345 } __packed __aligned(4);
346 
347 struct mcx_cmd_teardown_hca_out {
348 	uint8_t			cmd_status;
349 	uint8_t			cmd_reserved0[3];
350 	uint32_t		cmd_syndrome;
351 	uint8_t			cmd_reserved1[8];
352 } __packed __aligned(4);
353 
354 struct mcx_cmd_access_reg_in {
355 	uint16_t		cmd_opcode;
356 	uint8_t			cmd_reserved0[4];
357 	uint16_t		cmd_op_mod;
358 	uint8_t			cmd_reserved1[2];
359 	uint16_t		cmd_register_id;
360 	uint32_t		cmd_argument;
361 } __packed __aligned(4);
362 
363 struct mcx_cmd_access_reg_out {
364 	uint8_t			cmd_status;
365 	uint8_t			cmd_reserved0[3];
366 	uint32_t		cmd_syndrome;
367 	uint8_t			cmd_reserved1[8];
368 } __packed __aligned(4);
369 
370 struct mcx_reg_pmtu {
371 	uint8_t			rp_reserved1;
372 	uint8_t			rp_local_port;
373 	uint8_t			rp_reserved2[2];
374 	uint16_t		rp_max_mtu;
375 	uint8_t			rp_reserved3[2];
376 	uint16_t		rp_admin_mtu;
377 	uint8_t			rp_reserved4[2];
378 	uint16_t		rp_oper_mtu;
379 	uint8_t			rp_reserved5[2];
380 } __packed __aligned(4);
381 
382 struct mcx_reg_ptys {
383 	uint8_t			rp_reserved1;
384 	uint8_t			rp_local_port;
385 	uint8_t			rp_reserved2;
386 	uint8_t			rp_proto_mask;
387 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
388 	uint8_t			rp_reserved3[8];
389 	uint32_t		rp_eth_proto_cap;
390 	uint8_t			rp_reserved4[8];
391 	uint32_t		rp_eth_proto_admin;
392 	uint8_t			rp_reserved5[8];
393 	uint32_t		rp_eth_proto_oper;
394 	uint8_t			rp_reserved6[24];
395 } __packed __aligned(4);
396 
397 struct mcx_reg_paos {
398 	uint8_t			rp_reserved1;
399 	uint8_t			rp_local_port;
400 	uint8_t			rp_admin_status;
401 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
402 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
403 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
404 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
405 	uint8_t			rp_oper_status;
406 #define MCX_REG_PAOS_OPER_STATUS_UP		1
407 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
408 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
409 	uint8_t			rp_admin_state_update;
410 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
411 	uint8_t			rp_reserved2[11];
412 } __packed __aligned(4);
413 
414 struct mcx_reg_pfcc {
415 	uint8_t			rp_reserved1;
416 	uint8_t			rp_local_port;
417 	uint8_t			rp_reserved2[3];
418 	uint8_t			rp_prio_mask_tx;
419 	uint8_t			rp_reserved3;
420 	uint8_t			rp_prio_mask_rx;
421 	uint8_t			rp_pptx_aptx;
422 	uint8_t			rp_pfctx;
423 	uint8_t			rp_fctx_dis;
424 	uint8_t			rp_reserved4;
425 	uint8_t			rp_pprx_aprx;
426 	uint8_t			rp_pfcrx;
427 	uint8_t			rp_reserved5[2];
428 	uint16_t		rp_dev_stall_min;
429 	uint16_t		rp_dev_stall_crit;
430 	uint8_t			rp_reserved6[12];
431 } __packed __aligned(4);
432 
433 #define MCX_PMLP_MODULE_NUM_MASK	0xff
434 struct mcx_reg_pmlp {
435 	uint8_t			rp_rxtx;
436 	uint8_t			rp_local_port;
437 	uint8_t			rp_reserved0;
438 	uint8_t			rp_width;
439 	uint32_t		rp_lane0_mapping;
440 	uint32_t		rp_lane1_mapping;
441 	uint32_t		rp_lane2_mapping;
442 	uint32_t		rp_lane3_mapping;
443 	uint8_t			rp_reserved1[44];
444 } __packed __aligned(4);
445 
446 #define MCX_MCIA_EEPROM_BYTES	32
447 struct mcx_reg_mcia {
448 	uint8_t			rm_l;
449 	uint8_t			rm_module;
450 	uint8_t			rm_reserved0;
451 	uint8_t			rm_status;
452 	uint8_t			rm_i2c_addr;
453 	uint8_t			rm_page_num;
454 	uint16_t		rm_dev_addr;
455 	uint16_t		rm_reserved1;
456 	uint16_t		rm_size;
457 	uint32_t		rm_reserved2;
458 	uint8_t			rm_data[48];
459 } __packed __aligned(4);
460 
461 struct mcx_cmd_query_issi_in {
462 	uint16_t		cmd_opcode;
463 	uint8_t			cmd_reserved0[4];
464 	uint16_t		cmd_op_mod;
465 	uint8_t			cmd_reserved1[8];
466 } __packed __aligned(4);
467 
468 struct mcx_cmd_query_issi_il_out {
469 	uint8_t			cmd_status;
470 	uint8_t			cmd_reserved0[3];
471 	uint32_t		cmd_syndrome;
472 	uint8_t			cmd_reserved1[2];
473 	uint16_t		cmd_current_issi;
474 	uint8_t			cmd_reserved2[4];
475 } __packed __aligned(4);
476 
477 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
478 
479 struct mcx_cmd_query_issi_mb_out {
480 	uint8_t			cmd_reserved2[16];
481 	uint8_t			cmd_supported_issi[80]; /* very big endian */
482 } __packed __aligned(4);
483 
484 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
485 
486 struct mcx_cmd_set_issi_in {
487 	uint16_t		cmd_opcode;
488 	uint8_t			cmd_reserved0[4];
489 	uint16_t		cmd_op_mod;
490 	uint8_t			cmd_reserved1[2];
491 	uint16_t		cmd_current_issi;
492 	uint8_t			cmd_reserved2[4];
493 } __packed __aligned(4);
494 
495 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
496 
497 struct mcx_cmd_set_issi_out {
498 	uint8_t			cmd_status;
499 	uint8_t			cmd_reserved0[3];
500 	uint32_t		cmd_syndrome;
501 	uint8_t			cmd_reserved1[8];
502 } __packed __aligned(4);
503 
504 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
505 
506 struct mcx_cmd_query_pages_in {
507 	uint16_t		cmd_opcode;
508 	uint8_t			cmd_reserved0[4];
509 	uint16_t		cmd_op_mod;
510 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
511 #define MCX_CMD_QUERY_PAGES_INIT	0x02
512 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
513 	uint8_t			cmd_reserved1[8];
514 } __packed __aligned(4);
515 
516 struct mcx_cmd_query_pages_out {
517 	uint8_t			cmd_status;
518 	uint8_t			cmd_reserved0[3];
519 	uint32_t		cmd_syndrome;
520 	uint8_t			cmd_reserved1[2];
521 	uint16_t		cmd_func_id;
522 	uint32_t		cmd_num_pages;
523 } __packed __aligned(4);
524 
525 struct mcx_cmd_manage_pages_in {
526 	uint16_t		cmd_opcode;
527 	uint8_t			cmd_reserved0[4];
528 	uint16_t		cmd_op_mod;
529 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
530 					0x00
531 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
532 					0x01
533 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
534 					0x02
535 	uint8_t			cmd_reserved1[2];
536 	uint16_t		cmd_func_id;
537 	uint32_t		cmd_input_num_entries;
538 } __packed __aligned(4);
539 
540 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
541 
542 struct mcx_cmd_manage_pages_out {
543 	uint8_t			cmd_status;
544 	uint8_t			cmd_reserved0[3];
545 	uint32_t		cmd_syndrome;
546 	uint32_t		cmd_output_num_entries;
547 	uint8_t			cmd_reserved1[4];
548 } __packed __aligned(4);
549 
550 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
551 
552 struct mcx_cmd_query_hca_cap_in {
553 	uint16_t		cmd_opcode;
554 	uint8_t			cmd_reserved0[4];
555 	uint16_t		cmd_op_mod;
556 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
557 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
558 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
559 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
560 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
561 	uint8_t			cmd_reserved1[8];
562 } __packed __aligned(4);
563 
564 struct mcx_cmd_query_hca_cap_out {
565 	uint8_t			cmd_status;
566 	uint8_t			cmd_reserved0[3];
567 	uint32_t		cmd_syndrome;
568 	uint8_t			cmd_reserved1[8];
569 } __packed __aligned(4);
570 
571 #define MCX_HCA_CAP_LEN			0x1000
572 #define MCX_HCA_CAP_NMAILBOXES		\
573 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
574 
575 #if __GNUC_PREREQ__(4, 3)
576 #define __counter__		__COUNTER__
577 #else
578 #define __counter__		__LINE__
579 #endif
580 
581 #define __token(_tok, _num)	_tok##_num
582 #define _token(_tok, _num)	__token(_tok, _num)
583 #define __reserved__		_token(__reserved, __counter__)
584 
585 struct mcx_cap_device {
586 	uint8_t			reserved0[16];
587 
588 	uint8_t			log_max_srq_sz;
589 	uint8_t			log_max_qp_sz;
590 	uint8_t			__reserved__[1];
591 	uint8_t			log_max_qp; /* 5 bits */
592 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
593 
594 	uint8_t			__reserved__[1];
595 	uint8_t			log_max_srq; /* 5 bits */
596 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
597 	uint8_t			__reserved__[2];
598 
599 	uint8_t			__reserved__[1];
600 	uint8_t			log_max_cq_sz;
601 	uint8_t			__reserved__[1];
602 	uint8_t			log_max_cq; /* 5 bits */
603 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
604 
605 	uint8_t			log_max_eq_sz;
606 	uint8_t			log_max_mkey; /* 6 bits */
607 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
608 	uint8_t			__reserved__[1];
609 	uint8_t			log_max_eq; /* 4 bits */
610 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
611 
612 	uint8_t			max_indirection;
613 	uint8_t			log_max_mrw_sz; /* 7 bits */
614 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
615 	uint8_t			teardown_log_max_msf_list_size;
616 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
617 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
618 					0x3f
619 	uint8_t			log_max_klm_list_size; /* 6 bits */
620 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
621 					0x3f
622 
623 	uint8_t			__reserved__[1];
624 	uint8_t			log_max_ra_req_dc; /* 6 bits */
625 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
626 	uint8_t			__reserved__[1];
627 	uint8_t			log_max_ra_res_dc; /* 6 bits */
628 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
629 					0x3f
630 
631 	uint8_t			__reserved__[1];
632 	uint8_t			log_max_ra_req_qp; /* 6 bits */
633 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
634 					0x3f
635 	uint8_t			__reserved__[1];
636 	uint8_t			log_max_ra_res_qp; /* 6 bits */
637 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
638 					0x3f
639 
640 	uint8_t			flags1;
641 #define MCX_CAP_DEVICE_END_PAD		0x80
642 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
643 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
644 					0x20
645 #define MCX_CAP_DEVICE_START_PAD	0x10
646 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
647 					0x08
648 	uint8_t			__reserved__[1];
649 	uint16_t		gid_table_size;
650 
651 	uint16_t		flags2;
652 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
653 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
654 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
655 					0x2000
656 #define MCX_CAP_DEVICE_DEBUG		0x1000
657 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
658 					0x8000
659 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
660 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
661 	uint16_t		pkey_table_size;
662 
663 	uint8_t			flags3;
664 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
665 					0x80
666 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
667 					0x40
668 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
669 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
670 #define MCX_CAP_DEVICE_ETS		0x04
671 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
672 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
673 					0x01
674 	uint8_t			local_ca_ack_delay; /* 5 bits */
675 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
676 					0x1f
677 	uint8_t			port_type;
678 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
679 					0x80
680 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
681 	uint8_t			num_ports;
682 
683 	uint8_t			snapshot_log_max_msg;
684 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
685 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
686 	uint8_t			max_tc; /* 4 bits */
687 #define MCX_CAP_DEVICE_MAX_TC		0x0f
688 	uint8_t			flags4;
689 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
690 #define MCX_CAP_DEVICE_DCBX		0x40
691 #define MCX_CAP_DEVICE_ROL_S		0x02
692 #define MCX_CAP_DEVICE_ROL_G		0x01
693 	uint8_t			wol;
694 #define MCX_CAP_DEVICE_WOL_S		0x40
695 #define MCX_CAP_DEVICE_WOL_G		0x20
696 #define MCX_CAP_DEVICE_WOL_A		0x10
697 #define MCX_CAP_DEVICE_WOL_B		0x08
698 #define MCX_CAP_DEVICE_WOL_M		0x04
699 #define MCX_CAP_DEVICE_WOL_U		0x02
700 #define MCX_CAP_DEVICE_WOL_P		0x01
701 
702 	uint16_t		stat_rate_support;
703 	uint8_t			__reserved__[1];
704 	uint8_t			cqe_version; /* 4 bits */
705 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
706 
707 	uint32_t		flags5;
708 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
709 					0x80000000
710 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
711 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
712 					0x10000000
713 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
714 					0x08000000
715 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
716 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
717 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
718 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
719 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
720 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
721 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
722 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
723 #define MCX_CAP_DEVICE_SHO		0x00000100
724 #define MCX_CAP_DEVICE_TPH		0x00000080
725 #define MCX_CAP_DEVICE_RF		0x00000040
726 #define MCX_CAP_DEVICE_DCT		0x00000020
727 #define MCX_CAP_DEVICE_QOS		0x00000010
728 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
729 #define MCX_CAP_DEVICE_ROCE		0x00000004
730 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
731 
732 	uint32_t		flags6;
733 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
734 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
735 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
736 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
737 					0x10000000
738 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
739 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
740 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
741 #define MCX_CAP_DEVICE_PG		0x01000000
742 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
743 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
744 					0x00400000
745 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
746 					0x00200000
747 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
748 					0x00100000
749 #define MCX_CAP_DEVICE_CD		0x00080000
750 #define MCX_CAP_DEVICE_ATM		0x00040000
751 #define MCX_CAP_DEVICE_APM		0x00020000
752 #define MCX_CAP_DEVICE_IMAICL		0x00010000
753 #define MCX_CAP_DEVICE_QKV		0x00000200
754 #define MCX_CAP_DEVICE_PKV		0x00000100
755 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
756 #define MCX_CAP_DEVICE_XRC		0x00000008
757 #define MCX_CAP_DEVICE_UD		0x00000004
758 #define MCX_CAP_DEVICE_UC		0x00000002
759 #define MCX_CAP_DEVICE_RC		0x00000001
760 
761 	uint8_t			uar_flags;
762 #define MCX_CAP_DEVICE_UAR_4K		0x80
763 	uint8_t			uar_sz;	/* 6 bits */
764 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
765 	uint8_t			__reserved__[1];
766 	uint8_t			log_pg_sz;
767 
768 	uint8_t			flags7;
769 #define MCX_CAP_DEVICE_BF		0x80
770 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
771 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
772 					0x20
773 	uint8_t			log_bf_reg_size; /* 5 bits */
774 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
775 	uint8_t			__reserved__[2];
776 
777 	uint16_t		num_of_diagnostic_counters;
778 	uint16_t		max_wqe_sz_sq;
779 
780 	uint8_t			__reserved__[2];
781 	uint16_t		max_wqe_sz_rq;
782 
783 	uint8_t			__reserved__[2];
784 	uint16_t		max_wqe_sz_sq_dc;
785 
786 	uint32_t		max_qp_mcg; /* 25 bits */
787 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
788 
789 	uint8_t			__reserved__[3];
790 	uint8_t			log_max_mcq;
791 
792 	uint8_t			log_max_transport_domain; /* 5 bits */
793 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
794 					0x1f
795 	uint8_t			log_max_pd; /* 5 bits */
796 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
797 	uint8_t			__reserved__[1];
798 	uint8_t			log_max_xrcd; /* 5 bits */
799 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
800 
801 	uint8_t			__reserved__[2];
802 	uint16_t		max_flow_counter;
803 
804 	uint8_t			log_max_rq; /* 5 bits */
805 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
806 	uint8_t			log_max_sq; /* 5 bits */
807 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
808 	uint8_t			log_max_tir; /* 5 bits */
809 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
810 	uint8_t			log_max_tis; /* 5 bits */
811 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
812 
813 	uint8_t 		flags8;
814 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
815 					0x80
816 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
817 	uint8_t			log_max_rqt; /* 5 bits */
818 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
819 	uint8_t			log_max_rqt_size; /* 5 bits */
820 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
821 	uint8_t			log_max_tis_per_sq; /* 5 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
823 					0x1f
824 } __packed __aligned(8);
825 
826 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
827 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
828 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
829 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
830 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
831 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
832 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
833 
834 struct mcx_cmd_set_driver_version_in {
835 	uint16_t		cmd_opcode;
836 	uint8_t			cmd_reserved0[4];
837 	uint16_t		cmd_op_mod;
838 	uint8_t			cmd_reserved1[8];
839 } __packed __aligned(4);
840 
841 struct mcx_cmd_set_driver_version_out {
842 	uint8_t			cmd_status;
843 	uint8_t			cmd_reserved0[3];
844 	uint32_t		cmd_syndrome;
845 	uint8_t			cmd_reserved1[8];
846 } __packed __aligned(4);
847 
848 struct mcx_cmd_set_driver_version {
849 	uint8_t			cmd_driver_version[64];
850 } __packed __aligned(8);
851 
852 struct mcx_cmd_modify_nic_vport_context_in {
853 	uint16_t		cmd_opcode;
854 	uint8_t			cmd_reserved0[4];
855 	uint16_t		cmd_op_mod;
856 	uint8_t			cmd_reserved1[4];
857 	uint32_t		cmd_field_select;
858 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
859 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
860 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
861 } __packed __aligned(4);
862 
863 struct mcx_cmd_modify_nic_vport_context_out {
864 	uint8_t			cmd_status;
865 	uint8_t			cmd_reserved0[3];
866 	uint32_t		cmd_syndrome;
867 	uint8_t			cmd_reserved1[8];
868 } __packed __aligned(4);
869 
870 struct mcx_cmd_query_nic_vport_context_in {
871 	uint16_t		cmd_opcode;
872 	uint8_t			cmd_reserved0[4];
873 	uint16_t		cmd_op_mod;
874 	uint8_t			cmd_reserved1[4];
875 	uint8_t			cmd_allowed_list_type;
876 	uint8_t			cmd_reserved2[3];
877 } __packed __aligned(4);
878 
879 struct mcx_cmd_query_nic_vport_context_out {
880 	uint8_t			cmd_status;
881 	uint8_t			cmd_reserved0[3];
882 	uint32_t		cmd_syndrome;
883 	uint8_t			cmd_reserved1[8];
884 } __packed __aligned(4);
885 
886 struct mcx_nic_vport_ctx {
887 	uint32_t		vp_min_wqe_inline_mode;
888 	uint8_t			vp_reserved0[32];
889 	uint32_t		vp_mtu;
890 	uint8_t			vp_reserved1[200];
891 	uint16_t		vp_flags;
892 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
893 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
894 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
895 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
896 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
897 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
898 	uint16_t		vp_allowed_list_size;
899 	uint64_t		vp_perm_addr;
900 	uint8_t			vp_reserved2[4];
901 	/* allowed list follows */
902 } __packed __aligned(4);
903 
904 struct mcx_counter {
905 	uint64_t		packets;
906 	uint64_t		octets;
907 } __packed __aligned(4);
908 
909 struct mcx_nic_vport_counters {
910 	struct mcx_counter	rx_err;
911 	struct mcx_counter	tx_err;
912 	uint8_t			reserved0[64]; /* 0x30 */
913 	struct mcx_counter	rx_bcast;
914 	struct mcx_counter	tx_bcast;
915 	struct mcx_counter	rx_ucast;
916 	struct mcx_counter	tx_ucast;
917 	struct mcx_counter	rx_mcast;
918 	struct mcx_counter	tx_mcast;
919 	uint8_t			reserved1[0x210 - 0xd0];
920 } __packed __aligned(4);
921 
922 struct mcx_cmd_query_vport_counters_in {
923 	uint16_t		cmd_opcode;
924 	uint8_t			cmd_reserved0[4];
925 	uint16_t		cmd_op_mod;
926 	uint8_t			cmd_reserved1[8];
927 } __packed __aligned(4);
928 
929 struct mcx_cmd_query_vport_counters_mb_in {
930 	uint8_t			cmd_reserved0[8];
931 	uint8_t			cmd_clear;
932 	uint8_t			cmd_reserved1[7];
933 } __packed __aligned(4);
934 
935 struct mcx_cmd_query_vport_counters_out {
936 	uint8_t			cmd_status;
937 	uint8_t			cmd_reserved0[3];
938 	uint32_t		cmd_syndrome;
939 	uint8_t			cmd_reserved1[8];
940 } __packed __aligned(4);
941 
942 struct mcx_cmd_query_flow_counter_in {
943 	uint16_t		cmd_opcode;
944 	uint8_t			cmd_reserved0[4];
945 	uint16_t		cmd_op_mod;
946 	uint8_t			cmd_reserved1[8];
947 } __packed __aligned(4);
948 
949 struct mcx_cmd_query_flow_counter_mb_in {
950 	uint8_t			cmd_reserved0[8];
951 	uint8_t			cmd_clear;
952 	uint8_t			cmd_reserved1[5];
953 	uint16_t		cmd_flow_counter_id;
954 } __packed __aligned(4);
955 
956 struct mcx_cmd_query_flow_counter_out {
957 	uint8_t			cmd_status;
958 	uint8_t			cmd_reserved0[3];
959 	uint32_t		cmd_syndrome;
960 	uint8_t			cmd_reserved1[8];
961 } __packed __aligned(4);
962 
963 struct mcx_cmd_alloc_uar_in {
964 	uint16_t		cmd_opcode;
965 	uint8_t			cmd_reserved0[4];
966 	uint16_t		cmd_op_mod;
967 	uint8_t			cmd_reserved1[8];
968 } __packed __aligned(4);
969 
970 struct mcx_cmd_alloc_uar_out {
971 	uint8_t			cmd_status;
972 	uint8_t			cmd_reserved0[3];
973 	uint32_t		cmd_syndrome;
974 	uint32_t		cmd_uar;
975 	uint8_t			cmd_reserved1[4];
976 } __packed __aligned(4);
977 
978 struct mcx_cmd_query_special_ctx_in {
979 	uint16_t		cmd_opcode;
980 	uint8_t			cmd_reserved0[4];
981 	uint16_t		cmd_op_mod;
982 	uint8_t			cmd_reserved1[8];
983 } __packed __aligned(4);
984 
985 struct mcx_cmd_query_special_ctx_out {
986 	uint8_t			cmd_status;
987 	uint8_t			cmd_reserved0[3];
988 	uint32_t		cmd_syndrome;
989 	uint8_t			cmd_reserved1[4];
990 	uint32_t		cmd_resd_lkey;
991 } __packed __aligned(4);
992 
993 struct mcx_eq_ctx {
994 	uint32_t		eq_status;
995 #define MCX_EQ_CTX_ST_SHIFT		8
996 #define MCX_EQ_CTX_ST_MASK		(0xf << MCX_EQ_CTX_ST_SHIFT)
997 #define MCX_EQ_CTX_ST_ARMED		(0x9 << MCX_EQ_CTX_ST_SHIFT)
998 #define MCX_EQ_CTX_ST_FIRED		(0xa << MCX_EQ_CTX_ST_SHIFT)
999 #define MCX_EQ_CTX_OI_SHIFT		17
1000 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
1001 #define MCX_EQ_CTX_EC_SHIFT		18
1002 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
1003 #define MCX_EQ_CTX_STATUS_SHIFT		28
1004 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
1005 #define MCX_EQ_CTX_STATUS_OK		(0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1006 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1007 					(0xa << MCX_EQ_CTX_STATUS_SHIFT)
1008 	uint32_t		eq_reserved1;
1009 	uint32_t		eq_page_offset;
1010 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
1011 	uint32_t		eq_uar_size;
1012 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
1013 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
1014 	uint32_t		eq_reserved2;
1015 	uint8_t			eq_reserved3[3];
1016 	uint8_t			eq_intr;
1017 	uint32_t		eq_log_page_size;
1018 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1019 	uint32_t		eq_reserved4[3];
1020 	uint32_t		eq_consumer_counter;
1021 	uint32_t		eq_producer_counter;
1022 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
1023 	uint32_t		eq_reserved5[4];
1024 } __packed __aligned(4);
1025 
1026 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1027 
1028 struct mcx_cmd_create_eq_in {
1029 	uint16_t		cmd_opcode;
1030 	uint8_t			cmd_reserved0[4];
1031 	uint16_t		cmd_op_mod;
1032 	uint8_t			cmd_reserved1[8];
1033 } __packed __aligned(4);
1034 
1035 struct mcx_cmd_create_eq_mb_in {
1036 	struct mcx_eq_ctx	cmd_eq_ctx;
1037 	uint8_t			cmd_reserved0[8];
1038 	uint64_t		cmd_event_bitmask;
1039 #define MCX_EVENT_TYPE_COMPLETION	0x00
1040 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
1041 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
1042 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
1043 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
1044 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
1045 #define MCX_EVENT_TYPE_LAST_WQE		0x13
1046 	uint8_t			cmd_reserved1[176];
1047 } __packed __aligned(4);
1048 
1049 struct mcx_cmd_create_eq_out {
1050 	uint8_t			cmd_status;
1051 	uint8_t			cmd_reserved0[3];
1052 	uint32_t		cmd_syndrome;
1053 	uint32_t		cmd_eqn;
1054 	uint8_t			cmd_reserved1[4];
1055 } __packed __aligned(4);
1056 
1057 struct mcx_eq_entry {
1058 	uint8_t			eq_reserved1;
1059 	uint8_t			eq_event_type;
1060 	uint8_t			eq_reserved2;
1061 	uint8_t			eq_event_sub_type;
1062 
1063 	uint8_t			eq_reserved3[28];
1064 	uint32_t		eq_event_data[7];
1065 	uint8_t			eq_reserved4[2];
1066 	uint8_t			eq_signature;
1067 	uint8_t			eq_owner;
1068 #define MCX_EQ_ENTRY_OWNER_INIT			1
1069 } __packed __aligned(4);
1070 
1071 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1072 
1073 struct mcx_cmd_alloc_pd_in {
1074 	uint16_t		cmd_opcode;
1075 	uint8_t			cmd_reserved0[4];
1076 	uint16_t		cmd_op_mod;
1077 	uint8_t			cmd_reserved1[8];
1078 } __packed __aligned(4);
1079 
1080 struct mcx_cmd_alloc_pd_out {
1081 	uint8_t			cmd_status;
1082 	uint8_t			cmd_reserved0[3];
1083 	uint32_t		cmd_syndrome;
1084 	uint32_t		cmd_pd;
1085 	uint8_t			cmd_reserved1[4];
1086 } __packed __aligned(4);
1087 
1088 struct mcx_cmd_alloc_td_in {
1089 	uint16_t		cmd_opcode;
1090 	uint8_t			cmd_reserved0[4];
1091 	uint16_t		cmd_op_mod;
1092 	uint8_t			cmd_reserved1[8];
1093 } __packed __aligned(4);
1094 
1095 struct mcx_cmd_alloc_td_out {
1096 	uint8_t			cmd_status;
1097 	uint8_t			cmd_reserved0[3];
1098 	uint32_t		cmd_syndrome;
1099 	uint32_t		cmd_tdomain;
1100 	uint8_t			cmd_reserved1[4];
1101 } __packed __aligned(4);
1102 
1103 struct mcx_cmd_create_tir_in {
1104 	uint16_t		cmd_opcode;
1105 	uint8_t			cmd_reserved0[4];
1106 	uint16_t		cmd_op_mod;
1107 	uint8_t			cmd_reserved1[8];
1108 } __packed __aligned(4);
1109 
1110 struct mcx_cmd_create_tir_mb_in {
1111 	uint8_t			cmd_reserved0[20];
1112 	uint32_t		cmd_disp_type;
1113 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
1114 	uint8_t			cmd_reserved1[8];
1115 	uint32_t		cmd_lro;
1116 	uint8_t			cmd_reserved2[8];
1117 	uint32_t		cmd_inline_rqn;
1118 	uint32_t		cmd_indir_table;
1119 	uint32_t		cmd_tdomain;
1120 	uint8_t			cmd_rx_hash_key[40];
1121 	uint32_t		cmd_rx_hash_sel_outer;
1122 	uint32_t		cmd_rx_hash_sel_inner;
1123 	uint8_t			cmd_reserved3[152];
1124 } __packed __aligned(4);
1125 
1126 struct mcx_cmd_create_tir_out {
1127 	uint8_t			cmd_status;
1128 	uint8_t			cmd_reserved0[3];
1129 	uint32_t		cmd_syndrome;
1130 	uint32_t		cmd_tirn;
1131 	uint8_t			cmd_reserved1[4];
1132 } __packed __aligned(4);
1133 
1134 struct mcx_cmd_destroy_tir_in {
1135 	uint16_t		cmd_opcode;
1136 	uint8_t			cmd_reserved0[4];
1137 	uint16_t		cmd_op_mod;
1138 	uint32_t		cmd_tirn;
1139 	uint8_t			cmd_reserved1[4];
1140 } __packed __aligned(4);
1141 
1142 struct mcx_cmd_destroy_tir_out {
1143 	uint8_t			cmd_status;
1144 	uint8_t			cmd_reserved0[3];
1145 	uint32_t		cmd_syndrome;
1146 	uint8_t			cmd_reserved1[8];
1147 } __packed __aligned(4);
1148 
1149 struct mcx_cmd_create_tis_in {
1150 	uint16_t		cmd_opcode;
1151 	uint8_t			cmd_reserved0[4];
1152 	uint16_t		cmd_op_mod;
1153 	uint8_t			cmd_reserved1[8];
1154 } __packed __aligned(4);
1155 
1156 struct mcx_cmd_create_tis_mb_in {
1157 	uint8_t			cmd_reserved[16];
1158 	uint32_t		cmd_prio;
1159 	uint8_t			cmd_reserved1[32];
1160 	uint32_t		cmd_tdomain;
1161 	uint8_t			cmd_reserved2[120];
1162 } __packed __aligned(4);
1163 
1164 struct mcx_cmd_create_tis_out {
1165 	uint8_t			cmd_status;
1166 	uint8_t			cmd_reserved0[3];
1167 	uint32_t		cmd_syndrome;
1168 	uint32_t		cmd_tisn;
1169 	uint8_t			cmd_reserved1[4];
1170 } __packed __aligned(4);
1171 
1172 struct mcx_cmd_destroy_tis_in {
1173 	uint16_t		cmd_opcode;
1174 	uint8_t			cmd_reserved0[4];
1175 	uint16_t		cmd_op_mod;
1176 	uint32_t		cmd_tisn;
1177 	uint8_t			cmd_reserved1[4];
1178 } __packed __aligned(4);
1179 
1180 struct mcx_cmd_destroy_tis_out {
1181 	uint8_t			cmd_status;
1182 	uint8_t			cmd_reserved0[3];
1183 	uint32_t		cmd_syndrome;
1184 	uint8_t			cmd_reserved1[8];
1185 } __packed __aligned(4);
1186 
1187 struct mcx_cq_ctx {
1188 	uint32_t		cq_status;
1189 	uint32_t		cq_reserved1;
1190 	uint32_t		cq_page_offset;
1191 	uint32_t		cq_uar_size;
1192 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
1193 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
1194 	uint32_t		cq_period_max_count;
1195 #define MCX_CQ_CTX_PERIOD_SHIFT		16
1196 	uint32_t		cq_eqn;
1197 	uint32_t		cq_log_page_size;
1198 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1199 	uint32_t		cq_reserved2;
1200 	uint32_t		cq_last_notified;
1201 	uint32_t		cq_last_solicit;
1202 	uint32_t		cq_consumer_counter;
1203 	uint32_t		cq_producer_counter;
1204 	uint8_t			cq_reserved3[8];
1205 	uint64_t		cq_doorbell;
1206 } __packed __aligned(4);
1207 
1208 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1209 
1210 struct mcx_cmd_create_cq_in {
1211 	uint16_t		cmd_opcode;
1212 	uint8_t			cmd_reserved0[4];
1213 	uint16_t		cmd_op_mod;
1214 	uint8_t			cmd_reserved1[8];
1215 } __packed __aligned(4);
1216 
1217 struct mcx_cmd_create_cq_mb_in {
1218 	struct mcx_cq_ctx	cmd_cq_ctx;
1219 	uint8_t			cmd_reserved1[192];
1220 } __packed __aligned(4);
1221 
1222 struct mcx_cmd_create_cq_out {
1223 	uint8_t			cmd_status;
1224 	uint8_t			cmd_reserved0[3];
1225 	uint32_t		cmd_syndrome;
1226 	uint32_t		cmd_cqn;
1227 	uint8_t			cmd_reserved1[4];
1228 } __packed __aligned(4);
1229 
1230 struct mcx_cmd_destroy_cq_in {
1231 	uint16_t		cmd_opcode;
1232 	uint8_t			cmd_reserved0[4];
1233 	uint16_t		cmd_op_mod;
1234 	uint32_t		cmd_cqn;
1235 	uint8_t			cmd_reserved1[4];
1236 } __packed __aligned(4);
1237 
1238 struct mcx_cmd_destroy_cq_out {
1239 	uint8_t			cmd_status;
1240 	uint8_t			cmd_reserved0[3];
1241 	uint32_t		cmd_syndrome;
1242 	uint8_t			cmd_reserved1[8];
1243 } __packed __aligned(4);
1244 
1245 struct mcx_cq_entry {
1246 	uint32_t		__reserved__;
1247 	uint32_t		cq_lro;
1248 	uint32_t		cq_lro_ack_seq_num;
1249 	uint32_t		cq_rx_hash;
1250 	uint8_t			cq_rx_hash_type;
1251 	uint8_t			cq_ml_path;
1252 	uint16_t		__reserved__;
1253 	uint32_t		cq_checksum;
1254 	uint32_t		__reserved__;
1255 	uint32_t		cq_flags;
1256 	uint32_t		cq_lro_srqn;
1257 	uint32_t		__reserved__[2];
1258 	uint32_t		cq_byte_cnt;
1259 	uint64_t		cq_timestamp;
1260 	uint8_t			cq_rx_drops;
1261 	uint8_t			cq_flow_tag[3];
1262 	uint16_t		cq_wqe_count;
1263 	uint8_t			cq_signature;
1264 	uint8_t			cq_opcode_owner;
1265 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
1266 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
1267 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
1268 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
1269 
1270 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
1271 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
1272 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
1273 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
1274 
1275 #define MCX_CQ_ENTRY_OPCODE_REQ			0
1276 #define MCX_CQ_ENTRY_OPCODE_SEND		2
1277 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
1278 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
1279 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
1280 
1281 } __packed __aligned(4);
1282 
1283 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1284 
1285 struct mcx_cq_doorbell {
1286 	uint32_t		 db_update_ci;
1287 	uint32_t		 db_arm_ci;
1288 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
1289 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
1290 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
1291 } __packed __aligned(8);
1292 
1293 struct mcx_wq_ctx {
1294 	uint8_t			 wq_type;
1295 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
1296 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
1297 	uint8_t			 wq_reserved0[5];
1298 	uint16_t		 wq_lwm;
1299 	uint32_t		 wq_pd;
1300 	uint32_t		 wq_uar_page;
1301 	uint64_t		 wq_doorbell;
1302 	uint32_t		 wq_hw_counter;
1303 	uint32_t		 wq_sw_counter;
1304 	uint16_t		 wq_log_stride;
1305 	uint8_t			 wq_log_page_sz;
1306 	uint8_t			 wq_log_size;
1307 	uint8_t			 wq_reserved1[156];
1308 } __packed __aligned(4);
1309 
1310 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1311 
1312 struct mcx_sq_ctx {
1313 	uint32_t		sq_flags;
1314 #define MCX_SQ_CTX_RLKEY			(1 << 31)
1315 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
1316 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
1317 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
1318 #define MCX_SQ_CTX_STATE_SHIFT			20
1319 	uint32_t		sq_user_index;
1320 	uint32_t		sq_cqn;
1321 	uint32_t		sq_reserved1[5];
1322 	uint32_t		sq_tis_lst_sz;
1323 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
1324 	uint32_t		sq_reserved2[2];
1325 	uint32_t		sq_tis_num;
1326 	struct mcx_wq_ctx	sq_wq;
1327 } __packed __aligned(4);
1328 
1329 struct mcx_sq_entry_seg {
1330 	uint32_t		sqs_byte_count;
1331 	uint32_t		sqs_lkey;
1332 	uint64_t		sqs_addr;
1333 } __packed __aligned(4);
1334 
1335 struct mcx_sq_entry {
1336 	/* control segment */
1337 	uint32_t		sqe_opcode_index;
1338 #define MCX_SQE_WQE_INDEX_SHIFT			8
1339 #define MCX_SQE_WQE_OPCODE_NOP			0x00
1340 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
1341 	uint32_t		sqe_ds_sq_num;
1342 #define MCX_SQE_SQ_NUM_SHIFT			8
1343 	uint32_t		sqe_signature;
1344 #define MCX_SQE_SIGNATURE_SHIFT			24
1345 #define MCX_SQE_SOLICITED_EVENT			0x02
1346 #define MCX_SQE_CE_CQE_ON_ERR			0x00
1347 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
1348 #define MCX_SQE_CE_CQE_ALWAYS			0x08
1349 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
1350 #define MCX_SQE_FM_NO_FENCE			0x00
1351 #define MCX_SQE_FM_SMALL_FENCE			0x40
1352 	uint32_t		sqe_mkey;
1353 
1354 	/* ethernet segment */
1355 	uint32_t		sqe_reserved1;
1356 	uint32_t		sqe_mss_csum;
1357 #define MCX_SQE_L4_CSUM				(1 << 31)
1358 #define MCX_SQE_L3_CSUM				(1 << 30)
1359 	uint32_t		sqe_reserved2;
1360 	uint16_t		sqe_inline_header_size;
1361 	uint16_t		sqe_inline_headers[9];
1362 
1363 	/* data segment */
1364 	struct mcx_sq_entry_seg sqe_segs[1];
1365 } __packed __aligned(64);
1366 
1367 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1368 
1369 struct mcx_cmd_create_sq_in {
1370 	uint16_t		cmd_opcode;
1371 	uint8_t			cmd_reserved0[4];
1372 	uint16_t		cmd_op_mod;
1373 	uint8_t			cmd_reserved1[8];
1374 } __packed __aligned(4);
1375 
1376 struct mcx_cmd_create_sq_out {
1377 	uint8_t			cmd_status;
1378 	uint8_t			cmd_reserved0[3];
1379 	uint32_t		cmd_syndrome;
1380 	uint32_t		cmd_sqn;
1381 	uint8_t			cmd_reserved1[4];
1382 } __packed __aligned(4);
1383 
1384 struct mcx_cmd_modify_sq_in {
1385 	uint16_t		cmd_opcode;
1386 	uint8_t			cmd_reserved0[4];
1387 	uint16_t		cmd_op_mod;
1388 	uint32_t		cmd_sq_state;
1389 	uint8_t			cmd_reserved1[4];
1390 } __packed __aligned(4);
1391 
1392 struct mcx_cmd_modify_sq_mb_in {
1393 	uint32_t		cmd_modify_hi;
1394 	uint32_t		cmd_modify_lo;
1395 	uint8_t			cmd_reserved0[8];
1396 	struct mcx_sq_ctx	cmd_sq_ctx;
1397 } __packed __aligned(4);
1398 
1399 struct mcx_cmd_modify_sq_out {
1400 	uint8_t			cmd_status;
1401 	uint8_t			cmd_reserved0[3];
1402 	uint32_t		cmd_syndrome;
1403 	uint8_t			cmd_reserved1[8];
1404 } __packed __aligned(4);
1405 
1406 struct mcx_cmd_destroy_sq_in {
1407 	uint16_t		cmd_opcode;
1408 	uint8_t			cmd_reserved0[4];
1409 	uint16_t		cmd_op_mod;
1410 	uint32_t		cmd_sqn;
1411 	uint8_t			cmd_reserved1[4];
1412 } __packed __aligned(4);
1413 
1414 struct mcx_cmd_destroy_sq_out {
1415 	uint8_t			cmd_status;
1416 	uint8_t			cmd_reserved0[3];
1417 	uint32_t		cmd_syndrome;
1418 	uint8_t			cmd_reserved1[8];
1419 } __packed __aligned(4);
1420 
1421 
1422 struct mcx_rq_ctx {
1423 	uint32_t		rq_flags;
1424 #define MCX_RQ_CTX_RLKEY			(1 << 31)
1425 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
1426 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
1427 #define MCX_RQ_CTX_STATE_SHIFT			20
1428 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
1429 	uint32_t		rq_user_index;
1430 	uint32_t		rq_cqn;
1431 	uint32_t		rq_reserved1;
1432 	uint32_t		rq_rmpn;
1433 	uint32_t		rq_reserved2[7];
1434 	struct mcx_wq_ctx	rq_wq;
1435 } __packed __aligned(4);
1436 
1437 struct mcx_rq_entry {
1438 	uint32_t		rqe_byte_count;
1439 	uint32_t		rqe_lkey;
1440 	uint64_t		rqe_addr;
1441 } __packed __aligned(16);
1442 
1443 struct mcx_cmd_create_rq_in {
1444 	uint16_t		cmd_opcode;
1445 	uint8_t			cmd_reserved0[4];
1446 	uint16_t		cmd_op_mod;
1447 	uint8_t			cmd_reserved1[8];
1448 } __packed __aligned(4);
1449 
1450 struct mcx_cmd_create_rq_out {
1451 	uint8_t			cmd_status;
1452 	uint8_t			cmd_reserved0[3];
1453 	uint32_t		cmd_syndrome;
1454 	uint32_t		cmd_rqn;
1455 	uint8_t			cmd_reserved1[4];
1456 } __packed __aligned(4);
1457 
1458 struct mcx_cmd_modify_rq_in {
1459 	uint16_t		cmd_opcode;
1460 	uint8_t			cmd_reserved0[4];
1461 	uint16_t		cmd_op_mod;
1462 	uint32_t		cmd_rq_state;
1463 	uint8_t			cmd_reserved1[4];
1464 } __packed __aligned(4);
1465 
1466 struct mcx_cmd_modify_rq_mb_in {
1467 	uint32_t		cmd_modify_hi;
1468 	uint32_t		cmd_modify_lo;
1469 	uint8_t			cmd_reserved0[8];
1470 	struct mcx_rq_ctx	cmd_rq_ctx;
1471 } __packed __aligned(4);
1472 
1473 struct mcx_cmd_modify_rq_out {
1474 	uint8_t			cmd_status;
1475 	uint8_t			cmd_reserved0[3];
1476 	uint32_t		cmd_syndrome;
1477 	uint8_t			cmd_reserved1[8];
1478 } __packed __aligned(4);
1479 
1480 struct mcx_cmd_destroy_rq_in {
1481 	uint16_t		cmd_opcode;
1482 	uint8_t			cmd_reserved0[4];
1483 	uint16_t		cmd_op_mod;
1484 	uint32_t		cmd_rqn;
1485 	uint8_t			cmd_reserved1[4];
1486 } __packed __aligned(4);
1487 
1488 struct mcx_cmd_destroy_rq_out {
1489 	uint8_t			cmd_status;
1490 	uint8_t			cmd_reserved0[3];
1491 	uint32_t		cmd_syndrome;
1492 	uint8_t			cmd_reserved1[8];
1493 } __packed __aligned(4);
1494 
1495 struct mcx_cmd_create_flow_table_in {
1496 	uint16_t		cmd_opcode;
1497 	uint8_t			cmd_reserved0[4];
1498 	uint16_t		cmd_op_mod;
1499 	uint8_t			cmd_reserved1[8];
1500 } __packed __aligned(4);
1501 
1502 struct mcx_flow_table_ctx {
1503 	uint8_t			ft_miss_action;
1504 	uint8_t			ft_level;
1505 	uint8_t			ft_reserved0;
1506 	uint8_t			ft_log_size;
1507 	uint32_t		ft_table_miss_id;
1508 	uint8_t			ft_reserved1[28];
1509 } __packed __aligned(4);
1510 
1511 struct mcx_cmd_create_flow_table_mb_in {
1512 	uint8_t			cmd_table_type;
1513 	uint8_t			cmd_reserved0[7];
1514 	struct mcx_flow_table_ctx cmd_ctx;
1515 } __packed __aligned(4);
1516 
1517 struct mcx_cmd_create_flow_table_out {
1518 	uint8_t			cmd_status;
1519 	uint8_t			cmd_reserved0[3];
1520 	uint32_t		cmd_syndrome;
1521 	uint32_t		cmd_table_id;
1522 	uint8_t			cmd_reserved1[4];
1523 } __packed __aligned(4);
1524 
1525 struct mcx_cmd_destroy_flow_table_in {
1526 	uint16_t		cmd_opcode;
1527 	uint8_t			cmd_reserved0[4];
1528 	uint16_t		cmd_op_mod;
1529 	uint8_t			cmd_reserved1[8];
1530 } __packed __aligned(4);
1531 
1532 struct mcx_cmd_destroy_flow_table_mb_in {
1533 	uint8_t			cmd_table_type;
1534 	uint8_t			cmd_reserved0[3];
1535 	uint32_t		cmd_table_id;
1536 	uint8_t			cmd_reserved1[40];
1537 } __packed __aligned(4);
1538 
1539 struct mcx_cmd_destroy_flow_table_out {
1540 	uint8_t			cmd_status;
1541 	uint8_t			cmd_reserved0[3];
1542 	uint32_t		cmd_syndrome;
1543 	uint8_t			cmd_reserved1[8];
1544 } __packed __aligned(4);
1545 
1546 struct mcx_cmd_set_flow_table_root_in {
1547 	uint16_t		cmd_opcode;
1548 	uint8_t			cmd_reserved0[4];
1549 	uint16_t		cmd_op_mod;
1550 	uint8_t			cmd_reserved1[8];
1551 } __packed __aligned(4);
1552 
1553 struct mcx_cmd_set_flow_table_root_mb_in {
1554 	uint8_t			cmd_table_type;
1555 	uint8_t			cmd_reserved0[3];
1556 	uint32_t		cmd_table_id;
1557 	uint8_t			cmd_reserved1[56];
1558 } __packed __aligned(4);
1559 
1560 struct mcx_cmd_set_flow_table_root_out {
1561 	uint8_t			cmd_status;
1562 	uint8_t			cmd_reserved0[3];
1563 	uint32_t		cmd_syndrome;
1564 	uint8_t			cmd_reserved1[8];
1565 } __packed __aligned(4);
1566 
1567 struct mcx_flow_match {
1568 	/* outer headers */
1569 	uint8_t			mc_src_mac[6];
1570 	uint16_t		mc_ethertype;
1571 	uint8_t			mc_dest_mac[6];
1572 	uint16_t		mc_first_vlan;
1573 	uint8_t			mc_ip_proto;
1574 	uint8_t			mc_ip_dscp_ecn;
1575 	uint8_t			mc_vlan_flags;
1576 	uint8_t			mc_tcp_flags;
1577 	uint16_t		mc_tcp_sport;
1578 	uint16_t		mc_tcp_dport;
1579 	uint32_t		mc_reserved0;
1580 	uint16_t		mc_udp_sport;
1581 	uint16_t		mc_udp_dport;
1582 	uint8_t			mc_src_ip[16];
1583 	uint8_t			mc_dest_ip[16];
1584 
1585 	/* misc parameters */
1586 	uint8_t			mc_reserved1[8];
1587 	uint16_t		mc_second_vlan;
1588 	uint8_t			mc_reserved2[2];
1589 	uint8_t			mc_second_vlan_flags;
1590 	uint8_t			mc_reserved3[15];
1591 	uint32_t		mc_outer_ipv6_flow_label;
1592 	uint8_t			mc_reserved4[32];
1593 
1594 	uint8_t			mc_reserved[384];
1595 } __packed __aligned(4);
1596 
1597 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1598 
1599 struct mcx_cmd_create_flow_group_in {
1600 	uint16_t		cmd_opcode;
1601 	uint8_t			cmd_reserved0[4];
1602 	uint16_t		cmd_op_mod;
1603 	uint8_t			cmd_reserved1[8];
1604 } __packed __aligned(4);
1605 
1606 struct mcx_cmd_create_flow_group_mb_in {
1607 	uint8_t			cmd_table_type;
1608 	uint8_t			cmd_reserved0[3];
1609 	uint32_t		cmd_table_id;
1610 	uint8_t			cmd_reserved1[4];
1611 	uint32_t		cmd_start_flow_index;
1612 	uint8_t			cmd_reserved2[4];
1613 	uint32_t		cmd_end_flow_index;
1614 	uint8_t			cmd_reserved3[23];
1615 	uint8_t			cmd_match_criteria_enable;
1616 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
1617 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
1618 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
1619 	struct mcx_flow_match	cmd_match_criteria;
1620 	uint8_t			cmd_reserved4[448];
1621 } __packed __aligned(4);
1622 
1623 struct mcx_cmd_create_flow_group_out {
1624 	uint8_t			cmd_status;
1625 	uint8_t			cmd_reserved0[3];
1626 	uint32_t		cmd_syndrome;
1627 	uint32_t		cmd_group_id;
1628 	uint8_t			cmd_reserved1[4];
1629 } __packed __aligned(4);
1630 
1631 struct mcx_flow_ctx {
1632 	uint8_t			fc_reserved0[4];
1633 	uint32_t		fc_group_id;
1634 	uint32_t		fc_flow_tag;
1635 	uint32_t		fc_action;
1636 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
1637 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
1638 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
1639 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
1640 	uint32_t		fc_dest_list_size;
1641 	uint32_t		fc_counter_list_size;
1642 	uint8_t			fc_reserved1[40];
1643 	struct mcx_flow_match	fc_match_value;
1644 	uint8_t			fc_reserved2[192];
1645 } __packed __aligned(4);
1646 
1647 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
1648 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
1649 
1650 struct mcx_cmd_destroy_flow_group_in {
1651 	uint16_t		cmd_opcode;
1652 	uint8_t			cmd_reserved0[4];
1653 	uint16_t		cmd_op_mod;
1654 	uint8_t			cmd_reserved1[8];
1655 } __packed __aligned(4);
1656 
1657 struct mcx_cmd_destroy_flow_group_mb_in {
1658 	uint8_t			cmd_table_type;
1659 	uint8_t			cmd_reserved0[3];
1660 	uint32_t		cmd_table_id;
1661 	uint32_t		cmd_group_id;
1662 	uint8_t			cmd_reserved1[36];
1663 } __packed __aligned(4);
1664 
1665 struct mcx_cmd_destroy_flow_group_out {
1666 	uint8_t			cmd_status;
1667 	uint8_t			cmd_reserved0[3];
1668 	uint32_t		cmd_syndrome;
1669 	uint8_t			cmd_reserved1[8];
1670 } __packed __aligned(4);
1671 
1672 struct mcx_cmd_set_flow_table_entry_in {
1673 	uint16_t		cmd_opcode;
1674 	uint8_t			cmd_reserved0[4];
1675 	uint16_t		cmd_op_mod;
1676 	uint8_t			cmd_reserved1[8];
1677 } __packed __aligned(4);
1678 
1679 struct mcx_cmd_set_flow_table_entry_mb_in {
1680 	uint8_t			cmd_table_type;
1681 	uint8_t			cmd_reserved0[3];
1682 	uint32_t		cmd_table_id;
1683 	uint32_t		cmd_modify_enable_mask;
1684 	uint8_t			cmd_reserved1[4];
1685 	uint32_t		cmd_flow_index;
1686 	uint8_t			cmd_reserved2[28];
1687 	struct mcx_flow_ctx	cmd_flow_ctx;
1688 } __packed __aligned(4);
1689 
1690 struct mcx_cmd_set_flow_table_entry_out {
1691 	uint8_t			cmd_status;
1692 	uint8_t			cmd_reserved0[3];
1693 	uint32_t		cmd_syndrome;
1694 	uint8_t			cmd_reserved1[8];
1695 } __packed __aligned(4);
1696 
1697 struct mcx_cmd_query_flow_table_entry_in {
1698 	uint16_t		cmd_opcode;
1699 	uint8_t			cmd_reserved0[4];
1700 	uint16_t		cmd_op_mod;
1701 	uint8_t			cmd_reserved1[8];
1702 } __packed __aligned(4);
1703 
1704 struct mcx_cmd_query_flow_table_entry_mb_in {
1705 	uint8_t			cmd_table_type;
1706 	uint8_t			cmd_reserved0[3];
1707 	uint32_t		cmd_table_id;
1708 	uint8_t			cmd_reserved1[8];
1709 	uint32_t		cmd_flow_index;
1710 	uint8_t			cmd_reserved2[28];
1711 } __packed __aligned(4);
1712 
1713 struct mcx_cmd_query_flow_table_entry_out {
1714 	uint8_t			cmd_status;
1715 	uint8_t			cmd_reserved0[3];
1716 	uint32_t		cmd_syndrome;
1717 	uint8_t			cmd_reserved1[8];
1718 } __packed __aligned(4);
1719 
1720 struct mcx_cmd_query_flow_table_entry_mb_out {
1721 	uint8_t			cmd_reserved0[48];
1722 	struct mcx_flow_ctx	cmd_flow_ctx;
1723 } __packed __aligned(4);
1724 
1725 struct mcx_cmd_delete_flow_table_entry_in {
1726 	uint16_t		cmd_opcode;
1727 	uint8_t			cmd_reserved0[4];
1728 	uint16_t		cmd_op_mod;
1729 	uint8_t			cmd_reserved1[8];
1730 } __packed __aligned(4);
1731 
1732 struct mcx_cmd_delete_flow_table_entry_mb_in {
1733 	uint8_t			cmd_table_type;
1734 	uint8_t			cmd_reserved0[3];
1735 	uint32_t		cmd_table_id;
1736 	uint8_t			cmd_reserved1[8];
1737 	uint32_t		cmd_flow_index;
1738 	uint8_t			cmd_reserved2[28];
1739 } __packed __aligned(4);
1740 
1741 struct mcx_cmd_delete_flow_table_entry_out {
1742 	uint8_t			cmd_status;
1743 	uint8_t			cmd_reserved0[3];
1744 	uint32_t		cmd_syndrome;
1745 	uint8_t			cmd_reserved1[8];
1746 } __packed __aligned(4);
1747 
1748 struct mcx_cmd_query_flow_group_in {
1749 	uint16_t		cmd_opcode;
1750 	uint8_t			cmd_reserved0[4];
1751 	uint16_t		cmd_op_mod;
1752 	uint8_t			cmd_reserved1[8];
1753 } __packed __aligned(4);
1754 
1755 struct mcx_cmd_query_flow_group_mb_in {
1756 	uint8_t			cmd_table_type;
1757 	uint8_t			cmd_reserved0[3];
1758 	uint32_t		cmd_table_id;
1759 	uint32_t		cmd_group_id;
1760 	uint8_t			cmd_reserved1[36];
1761 } __packed __aligned(4);
1762 
1763 struct mcx_cmd_query_flow_group_out {
1764 	uint8_t			cmd_status;
1765 	uint8_t			cmd_reserved0[3];
1766 	uint32_t		cmd_syndrome;
1767 	uint8_t			cmd_reserved1[8];
1768 } __packed __aligned(4);
1769 
1770 struct mcx_cmd_query_flow_group_mb_out {
1771 	uint8_t			cmd_reserved0[12];
1772 	uint32_t		cmd_start_flow_index;
1773 	uint8_t			cmd_reserved1[4];
1774 	uint32_t		cmd_end_flow_index;
1775 	uint8_t			cmd_reserved2[20];
1776 	uint32_t		cmd_match_criteria_enable;
1777 	uint8_t			cmd_match_criteria[512];
1778 	uint8_t			cmd_reserved4[448];
1779 } __packed __aligned(4);
1780 
1781 struct mcx_cmd_query_flow_table_in {
1782 	uint16_t		cmd_opcode;
1783 	uint8_t			cmd_reserved0[4];
1784 	uint16_t		cmd_op_mod;
1785 	uint8_t			cmd_reserved1[8];
1786 } __packed __aligned(4);
1787 
1788 struct mcx_cmd_query_flow_table_mb_in {
1789 	uint8_t			cmd_table_type;
1790 	uint8_t			cmd_reserved0[3];
1791 	uint32_t		cmd_table_id;
1792 	uint8_t			cmd_reserved1[40];
1793 } __packed __aligned(4);
1794 
1795 struct mcx_cmd_query_flow_table_out {
1796 	uint8_t			cmd_status;
1797 	uint8_t			cmd_reserved0[3];
1798 	uint32_t		cmd_syndrome;
1799 	uint8_t			cmd_reserved1[8];
1800 } __packed __aligned(4);
1801 
1802 struct mcx_cmd_query_flow_table_mb_out {
1803 	uint8_t			cmd_reserved0[4];
1804 	struct mcx_flow_table_ctx cmd_ctx;
1805 } __packed __aligned(4);
1806 
1807 struct mcx_cmd_alloc_flow_counter_in {
1808 	uint16_t		cmd_opcode;
1809 	uint8_t			cmd_reserved0[4];
1810 	uint16_t		cmd_op_mod;
1811 	uint8_t			cmd_reserved1[8];
1812 } __packed __aligned(4);
1813 
1814 struct mcx_cmd_query_rq_in {
1815 	uint16_t		cmd_opcode;
1816 	uint8_t			cmd_reserved0[4];
1817 	uint16_t		cmd_op_mod;
1818 	uint32_t		cmd_rqn;
1819 	uint8_t			cmd_reserved1[4];
1820 } __packed __aligned(4);
1821 
1822 struct mcx_cmd_query_rq_out {
1823 	uint8_t			cmd_status;
1824 	uint8_t			cmd_reserved0[3];
1825 	uint32_t		cmd_syndrome;
1826 	uint8_t			cmd_reserved1[8];
1827 } __packed __aligned(4);
1828 
1829 struct mcx_cmd_query_rq_mb_out {
1830 	uint8_t			cmd_reserved0[16];
1831 	struct mcx_rq_ctx	cmd_ctx;
1832 };
1833 
1834 struct mcx_cmd_query_sq_in {
1835 	uint16_t		cmd_opcode;
1836 	uint8_t			cmd_reserved0[4];
1837 	uint16_t		cmd_op_mod;
1838 	uint32_t		cmd_sqn;
1839 	uint8_t			cmd_reserved1[4];
1840 } __packed __aligned(4);
1841 
1842 struct mcx_cmd_query_sq_out {
1843 	uint8_t			cmd_status;
1844 	uint8_t			cmd_reserved0[3];
1845 	uint32_t		cmd_syndrome;
1846 	uint8_t			cmd_reserved1[8];
1847 } __packed __aligned(4);
1848 
1849 struct mcx_cmd_query_sq_mb_out {
1850 	uint8_t			cmd_reserved0[16];
1851 	struct mcx_sq_ctx	cmd_ctx;
1852 };
1853 
1854 struct mcx_cmd_alloc_flow_counter_out {
1855 	uint8_t			cmd_status;
1856 	uint8_t			cmd_reserved0[3];
1857 	uint32_t		cmd_syndrome;
1858 	uint8_t			cmd_reserved1[2];
1859 	uint16_t		cmd_flow_counter_id;
1860 	uint8_t			cmd_reserved2[4];
1861 } __packed __aligned(4);
1862 
1863 struct mcx_wq_doorbell {
1864 	uint32_t		 db_recv_counter;
1865 	uint32_t		 db_send_counter;
1866 } __packed __aligned(8);
1867 
1868 struct mcx_dmamem {
1869 	bus_dmamap_t		 mxm_map;
1870 	bus_dma_segment_t	 mxm_seg;
1871 	int			 mxm_nsegs;
1872 	size_t			 mxm_size;
1873 	caddr_t			 mxm_kva;
1874 };
1875 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
1876 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
1877 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
1878 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
1879 
1880 struct mcx_hwmem {
1881 	bus_dmamap_t		 mhm_map;
1882 	bus_dma_segment_t	*mhm_segs;
1883 	unsigned int		 mhm_seg_count;
1884 	unsigned int		 mhm_npages;
1885 };
1886 
1887 struct mcx_slot {
1888 	bus_dmamap_t		 ms_map;
1889 	struct mbuf		*ms_m;
1890 };
1891 
1892 struct mcx_cq {
1893 	int			 cq_n;
1894 	struct mcx_dmamem	 cq_mem;
1895 	uint32_t		*cq_doorbell;
1896 	int			 cq_cons;
1897 	int			 cq_count;
1898 };
1899 
1900 struct mcx_calibration {
1901 	uint64_t		 c_timestamp;	/* previous mcx chip time */
1902 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
1903 	uint64_t		 c_tbase;	/* mcx chip time */
1904 	uint64_t		 c_ubase;	/* kernel nanouptime */
1905 	uint64_t		 c_tdiff;
1906 	uint64_t		 c_udiff;
1907 };
1908 
1909 #define MCX_CALIBRATE_FIRST    2
1910 #define MCX_CALIBRATE_NORMAL   30
1911 
1912 struct mcx_softc {
1913 	struct device		 sc_dev;
1914 	struct arpcom		 sc_ac;
1915 	struct ifmedia		 sc_media;
1916 	uint64_t		 sc_media_status;
1917 	uint64_t		 sc_media_active;
1918 
1919 	pci_chipset_tag_t	 sc_pc;
1920 	pci_intr_handle_t	 sc_ih;
1921 	void			*sc_ihc;
1922 	pcitag_t		 sc_tag;
1923 
1924 	bus_dma_tag_t		 sc_dmat;
1925 	bus_space_tag_t		 sc_memt;
1926 	bus_space_handle_t	 sc_memh;
1927 	bus_size_t		 sc_mems;
1928 
1929 	struct mcx_dmamem	 sc_cmdq_mem;
1930 	unsigned int		 sc_cmdq_mask;
1931 	unsigned int		 sc_cmdq_size;
1932 
1933 	unsigned int		 sc_cmdq_token;
1934 
1935 	struct mcx_hwmem	 sc_boot_pages;
1936 	struct mcx_hwmem	 sc_init_pages;
1937 	struct mcx_hwmem	 sc_regular_pages;
1938 
1939 	int			 sc_uar;
1940 	int			 sc_pd;
1941 	int			 sc_tdomain;
1942 	uint32_t		 sc_lkey;
1943 
1944 	struct mcx_dmamem	 sc_doorbell_mem;
1945 
1946 	int			 sc_eqn;
1947 	int			 sc_eq_cons;
1948 	struct mcx_dmamem	 sc_eq_mem;
1949 	int			 sc_hardmtu;
1950 
1951 	struct task		 sc_port_change;
1952 
1953 	int			 sc_flow_table_id;
1954 #define MCX_FLOW_GROUP_PROMISC	 0
1955 #define MCX_FLOW_GROUP_ALLMULTI	 1
1956 #define MCX_FLOW_GROUP_MAC	 2
1957 #define MCX_NUM_FLOW_GROUPS	 3
1958 	int			 sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1959 	int			 sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1960 	int			 sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1961 	int			 sc_promisc_flow_enabled;
1962 	int			 sc_allmulti_flow_enabled;
1963 	int			 sc_mcast_flow_base;
1964 	int			 sc_extra_mcast;
1965 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1966 
1967 	struct mcx_calibration	 sc_calibration[2];
1968 	unsigned int		 sc_calibration_gen;
1969 	struct timeout		 sc_calibrate;
1970 
1971 	struct mcx_cq		 sc_cq[MCX_MAX_CQS];
1972 	int			 sc_num_cq;
1973 
1974 	/* rx */
1975 	int			 sc_tirn;
1976 	int			 sc_rqn;
1977 	struct mcx_dmamem	 sc_rq_mem;
1978 	struct mcx_slot		*sc_rx_slots;
1979 	uint32_t		*sc_rx_doorbell;
1980 
1981 	int			 sc_rx_cons;
1982 	int			 sc_rx_prod;
1983 	struct timeout		 sc_rx_refill;
1984 	struct if_rxring	 sc_rxr;
1985 
1986 	/* tx */
1987 	int			 sc_tisn;
1988 	int			 sc_sqn;
1989 	struct mcx_dmamem	 sc_sq_mem;
1990 	struct mcx_slot		*sc_tx_slots;
1991 	uint32_t		*sc_tx_doorbell;
1992 	int			 sc_bf_size;
1993 	int			 sc_bf_offset;
1994 
1995 	int			 sc_tx_cons;
1996 	int			 sc_tx_prod;
1997 
1998 	uint64_t		 sc_last_cq_db;
1999 	uint64_t		 sc_last_srq_db;
2000 };
2001 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
2002 
2003 static int	mcx_match(struct device *, void *, void *);
2004 static void	mcx_attach(struct device *, struct device *, void *);
2005 
2006 static int	mcx_version(struct mcx_softc *);
2007 static int	mcx_init_wait(struct mcx_softc *);
2008 static int	mcx_enable_hca(struct mcx_softc *);
2009 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
2010 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2011 		    int);
2012 static int	mcx_issi(struct mcx_softc *);
2013 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2014 static int	mcx_hca_max_caps(struct mcx_softc *);
2015 static int	mcx_hca_set_caps(struct mcx_softc *);
2016 static int	mcx_init_hca(struct mcx_softc *);
2017 static int	mcx_set_driver_version(struct mcx_softc *);
2018 static int	mcx_iff(struct mcx_softc *);
2019 static int	mcx_alloc_uar(struct mcx_softc *);
2020 static int	mcx_alloc_pd(struct mcx_softc *);
2021 static int	mcx_alloc_tdomain(struct mcx_softc *);
2022 static int	mcx_create_eq(struct mcx_softc *);
2023 static int	mcx_query_nic_vport_context(struct mcx_softc *);
2024 static int	mcx_query_special_contexts(struct mcx_softc *);
2025 static int	mcx_set_port_mtu(struct mcx_softc *, int);
2026 static int	mcx_create_cq(struct mcx_softc *, int);
2027 static int	mcx_destroy_cq(struct mcx_softc *, int);
2028 static int	mcx_create_sq(struct mcx_softc *, int);
2029 static int	mcx_destroy_sq(struct mcx_softc *);
2030 static int	mcx_ready_sq(struct mcx_softc *);
2031 static int	mcx_create_rq(struct mcx_softc *, int);
2032 static int	mcx_destroy_rq(struct mcx_softc *);
2033 static int	mcx_ready_rq(struct mcx_softc *);
2034 static int	mcx_create_tir(struct mcx_softc *);
2035 static int	mcx_destroy_tir(struct mcx_softc *);
2036 static int	mcx_create_tis(struct mcx_softc *);
2037 static int	mcx_destroy_tis(struct mcx_softc *);
2038 static int	mcx_create_flow_table(struct mcx_softc *, int);
2039 static int	mcx_set_flow_table_root(struct mcx_softc *);
2040 static int	mcx_destroy_flow_table(struct mcx_softc *);
2041 static int	mcx_create_flow_group(struct mcx_softc *, int, int,
2042 		    int, int, struct mcx_flow_match *);
2043 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
2044 static int	mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2045 		    uint8_t *);
2046 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2047 
2048 #if 0
2049 static int	mcx_dump_flow_table(struct mcx_softc *);
2050 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int);
2051 static int	mcx_dump_flow_group(struct mcx_softc *);
2052 static int	mcx_dump_rq(struct mcx_softc *);
2053 static int	mcx_dump_sq(struct mcx_softc *);
2054 #endif
2055 
2056 
2057 /*
2058 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2059 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2060 */
2061 static void	mcx_refill(void *);
2062 static int	mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2063 		    struct mbuf_list *, const struct mcx_calibration *);
2064 static void	mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2065 		    int *);
2066 static void	mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2067 
2068 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2069 static void	mcx_arm_eq(struct mcx_softc *);
2070 static int	mcx_intr(void *);
2071 
2072 static void	mcx_up(struct mcx_softc *);
2073 static void	mcx_down(struct mcx_softc *);
2074 static int	mcx_ioctl(struct ifnet *, u_long, caddr_t);
2075 static int	mcx_rxrinfo(struct mcx_softc *, struct if_rxrinfo *);
2076 static void	mcx_start(struct ifqueue *);
2077 static void	mcx_watchdog(struct ifnet *);
2078 static void	mcx_media_add_types(struct mcx_softc *);
2079 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
2080 static int	mcx_media_change(struct ifnet *);
2081 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2082 static void	mcx_port_change(void *);
2083 
2084 static void	mcx_calibrate_first(struct mcx_softc *);
2085 static void	mcx_calibrate(void *);
2086 
2087 static inline uint32_t
2088 		mcx_rd(struct mcx_softc *, bus_size_t);
2089 static inline void
2090 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2091 static inline void
2092 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2093 
2094 static uint64_t	mcx_timer(struct mcx_softc *);
2095 
2096 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2097 		    bus_size_t, u_int align);
2098 static void	mcx_dmamem_zero(struct mcx_dmamem *);
2099 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2100 
2101 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2102 		    unsigned int);
2103 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2104 
2105 struct cfdriver mcx_cd = {
2106 	NULL,
2107 	"mcx",
2108 	DV_IFNET,
2109 };
2110 
2111 struct cfattach mcx_ca = {
2112 	sizeof(struct mcx_softc),
2113 	mcx_match,
2114 	mcx_attach,
2115 };
2116 
2117 static const struct pci_matchid mcx_devices[] = {
2118 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
2119 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
2120 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
2121 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
2122 };
2123 
2124 static const uint64_t mcx_eth_cap_map[] = {
2125 	IFM_1000_SGMII,
2126 	IFM_1000_KX,
2127 	IFM_10G_CX4,
2128 	IFM_10G_KX4,
2129 	IFM_10G_KR,
2130 	0,
2131 	IFM_40G_CR4,
2132 	IFM_40G_KR4,
2133 	0,
2134 	0,
2135 	0,
2136 	0,
2137 	IFM_10G_SFP_CU,
2138 	IFM_10G_SR,
2139 	IFM_10G_LR,
2140 	IFM_40G_SR4,
2141 	IFM_40G_LR4,
2142 	0,
2143 	0, /* IFM_50G_SR2 */
2144 	0,
2145 	IFM_100G_CR4,
2146 	IFM_100G_SR4,
2147 	IFM_100G_KR4,
2148 	0,
2149 	0,
2150 	0,
2151 	0,
2152 	IFM_25G_CR,
2153 	IFM_25G_KR,
2154 	IFM_25G_SR,
2155 	IFM_50G_CR2,
2156 	IFM_50G_KR2
2157 };
2158 
2159 static int
2160 mcx_match(struct device *parent, void *match, void *aux)
2161 {
2162 	return (pci_matchbyid(aux, mcx_devices, nitems(mcx_devices)));
2163 }
2164 
2165 void
2166 mcx_attach(struct device *parent, struct device *self, void *aux)
2167 {
2168 	struct mcx_softc *sc = (struct mcx_softc *)self;
2169 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2170 	struct pci_attach_args *pa = aux;
2171 	pcireg_t memtype;
2172 	uint32_t r;
2173 	unsigned int cq_stride;
2174 	unsigned int cq_size;
2175 	const char *intrstr;
2176 	int i;
2177 
2178 	sc->sc_pc = pa->pa_pc;
2179 	sc->sc_tag = pa->pa_tag;
2180 	sc->sc_dmat = pa->pa_dmat;
2181 
2182 	/* Map the PCI memory space */
2183 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2184 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2185 	    BUS_SPACE_MAP_PREFETCHABLE, &sc->sc_memt, &sc->sc_memh,
2186 	    NULL, &sc->sc_mems, 0)) {
2187 		printf(": unable to map register memory\n");
2188 		return;
2189 	}
2190 
2191 	if (mcx_version(sc) != 0) {
2192 		/* error printed by mcx_version */
2193 		goto unmap;
2194 	}
2195 
2196 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2197 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2198 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2199 	if (cq_size > MCX_MAX_CQE) {
2200 		printf(", command queue size overflow %u\n", cq_size);
2201 		goto unmap;
2202 	}
2203 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2204 		printf(", command queue entry size underflow %u\n", cq_stride);
2205 		goto unmap;
2206 	}
2207 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2208 		printf(", command queue page overflow\n");
2209 		goto unmap;
2210 	}
2211 
2212 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2213 	    MCX_PAGE_SIZE) != 0) {
2214 		printf(", unable to allocate doorbell memory\n");
2215 		goto unmap;
2216 	}
2217 
2218 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2219 	    MCX_PAGE_SIZE) != 0) {
2220 		printf(", unable to allocate command queue\n");
2221 		goto dbfree;
2222 	}
2223 
2224 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2225 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2226 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2227 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2228 
2229 	if (mcx_init_wait(sc) != 0) {
2230 		printf(", timeout waiting for init\n");
2231 		goto cqfree;
2232 	}
2233 
2234 	sc->sc_cmdq_mask = cq_size - 1;
2235 	sc->sc_cmdq_size = cq_stride;
2236 
2237 	if (mcx_enable_hca(sc) != 0) {
2238 		/* error printed by mcx_enable_hca */
2239 		goto cqfree;
2240 	}
2241 
2242 	if (mcx_issi(sc) != 0) {
2243 		/* error printed by mcx_issi */
2244 		goto teardown;
2245 	}
2246 
2247 	if (mcx_pages(sc, &sc->sc_boot_pages,
2248 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2249 		/* error printed by mcx_pages */
2250 		goto teardown;
2251 	}
2252 
2253 	if (mcx_hca_max_caps(sc) != 0) {
2254 		/* error printed by mcx_hca_max_caps */
2255 		goto teardown;
2256 	}
2257 
2258 	if (mcx_hca_set_caps(sc) != 0) {
2259 		/* error printed by mcx_hca_set_caps */
2260 		goto teardown;
2261 	}
2262 
2263 	if (mcx_pages(sc, &sc->sc_init_pages,
2264 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2265 		/* error printed by mcx_pages */
2266 		goto teardown;
2267 	}
2268 
2269 	if (mcx_init_hca(sc) != 0) {
2270 		/* error printed by mcx_init_hca */
2271 		goto teardown;
2272 	}
2273 
2274 	if (mcx_pages(sc, &sc->sc_regular_pages,
2275 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2276 		/* error printed by mcx_pages */
2277 		goto teardown;
2278 	}
2279 
2280 	/* apparently not necessary? */
2281 	if (mcx_set_driver_version(sc) != 0) {
2282 		/* error printed by mcx_set_driver_version */
2283 		goto teardown;
2284 	}
2285 
2286 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
2287 		/* error printed by mcx_iff? */
2288 		goto teardown;
2289 	}
2290 
2291 	if (mcx_alloc_uar(sc) != 0) {
2292 		/* error printed by mcx_alloc_uar */
2293 		goto teardown;
2294 	}
2295 
2296 	if (mcx_alloc_pd(sc) != 0) {
2297 		/* error printed by mcx_alloc_pd */
2298 		goto teardown;
2299 	}
2300 
2301 	if (mcx_alloc_tdomain(sc) != 0) {
2302 		/* error printed by mcx_alloc_tdomain */
2303 		goto teardown;
2304 	}
2305 
2306 	/*
2307 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2308 	 * mellanox support tells me legacy interrupts are not supported,
2309 	 * so we're stuck with just msi-x.
2310 	 */
2311 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
2312 		printf(": unable to map interrupt\n");
2313 		goto teardown;
2314 	}
2315 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_ih);
2316 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
2317 	    IPL_NET | IPL_MPSAFE, mcx_intr, sc, DEVNAME(sc));
2318 	if (sc->sc_ihc == NULL) {
2319 		printf(": unable to establish interrupt");
2320 		if (intrstr != NULL)
2321 			printf(" at %s", intrstr);
2322 		printf("\n");
2323 		goto teardown;
2324 	}
2325 
2326 	if (mcx_create_eq(sc) != 0) {
2327 		/* error printed by mcx_create_eq */
2328 		goto teardown;
2329 	}
2330 
2331 	if (mcx_query_nic_vport_context(sc) != 0) {
2332 		/* error printed by mcx_query_nic_vport_context */
2333 		goto teardown;
2334 	}
2335 
2336 	if (mcx_query_special_contexts(sc) != 0) {
2337 		/* error printed by mcx_query_special_contexts */
2338 		goto teardown;
2339 	}
2340 
2341 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2342 		/* error printed by mcx_set_port_mtu */
2343 		goto teardown;
2344 	}
2345 
2346 	printf(", %s, address %s\n", intrstr,
2347 	    ether_sprintf(sc->sc_ac.ac_enaddr));
2348 
2349 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2350 	ifp->if_softc = sc;
2351 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2352 	ifp->if_xflags = IFXF_MPSAFE;
2353 	ifp->if_ioctl = mcx_ioctl;
2354 	ifp->if_qstart = mcx_start;
2355 	ifp->if_watchdog = mcx_watchdog;
2356 	ifp->if_hardmtu = sc->sc_hardmtu;
2357 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2358 	IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2359 
2360 	ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2361 	    mcx_media_status);
2362 	mcx_media_add_types(sc);
2363 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2364 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2365 
2366 	if_attach(ifp);
2367 	ether_ifattach(ifp);
2368 
2369 	timeout_set(&sc->sc_rx_refill, mcx_refill, sc);
2370 	timeout_set(&sc->sc_calibrate, mcx_calibrate, sc);
2371 
2372 	task_set(&sc->sc_port_change, mcx_port_change, sc);
2373 	mcx_port_change(sc);
2374 
2375 	sc->sc_flow_table_id = -1;
2376 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2377 		sc->sc_flow_group_id[i] = -1;
2378 		sc->sc_flow_group_size[i] = 0;
2379 		sc->sc_flow_group_start[i] = 0;
2380 	}
2381 	sc->sc_extra_mcast = 0;
2382 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2383 	return;
2384 
2385 teardown:
2386 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2387 	/* error printed by mcx_teardown_hca, and we're already unwinding */
2388 cqfree:
2389 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2390 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2391 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2392 	    MCX_CMDQ_INTERFACE_DISABLED);
2393 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2394 
2395 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2396 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2397 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2398 
2399 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2400 dbfree:
2401 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2402 unmap:
2403 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2404 	sc->sc_mems = 0;
2405 }
2406 
2407 static int
2408 mcx_version(struct mcx_softc *sc)
2409 {
2410 	uint32_t fw0, fw1;
2411 	uint16_t cmdif;
2412 
2413 	fw0 = mcx_rd(sc, MCX_FW_VER);
2414 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2415 
2416 	printf(": FW %u.%u.%04u", MCX_FW_VER_MAJOR(fw0),
2417 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2418 
2419 	cmdif = MCX_CMDIF(fw1);
2420 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
2421 		printf(", unsupported command interface %u\n", cmdif);
2422 		return (-1);
2423 	}
2424 
2425 	return (0);
2426 }
2427 
2428 static int
2429 mcx_init_wait(struct mcx_softc *sc)
2430 {
2431 	unsigned int i;
2432 	uint32_t r;
2433 
2434 	for (i = 0; i < 2000; i++) {
2435 		r = mcx_rd(sc, MCX_STATE);
2436 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2437 			return (0);
2438 
2439 		delay(1000);
2440 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2441 		    BUS_SPACE_BARRIER_READ);
2442 	}
2443 
2444 	return (-1);
2445 }
2446 
2447 static uint8_t
2448 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2449     unsigned int msec)
2450 {
2451 	unsigned int i;
2452 
2453 	for (i = 0; i < msec; i++) {
2454 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2455 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2456 
2457 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2458 		    MCX_CQ_STATUS_OWN_SW) {
2459 			if (sc->sc_eqn != 0)
2460 				mcx_intr(sc);
2461 			return (0);
2462 		}
2463 
2464 		delay(1000);
2465 	}
2466 
2467 	return (ETIMEDOUT);
2468 }
2469 
2470 static uint32_t
2471 mcx_mix_u64(uint32_t xor, uint64_t u64)
2472 {
2473 	xor ^= u64 >> 32;
2474 	xor ^= u64;
2475 
2476 	return (xor);
2477 }
2478 
2479 static uint32_t
2480 mcx_mix_u32(uint32_t xor, uint32_t u32)
2481 {
2482 	xor ^= u32;
2483 
2484 	return (xor);
2485 }
2486 
2487 static uint32_t
2488 mcx_mix_u8(uint32_t xor, uint8_t u8)
2489 {
2490 	xor ^= u8;
2491 
2492 	return (xor);
2493 }
2494 
2495 static uint8_t
2496 mcx_mix_done(uint32_t xor)
2497 {
2498 	xor ^= xor >> 16;
2499 	xor ^= xor >> 8;
2500 
2501 	return (xor);
2502 }
2503 
2504 static uint8_t
2505 mcx_xor(const void *buf, size_t len)
2506 {
2507 	const uint32_t *dwords = buf;
2508 	uint32_t xor = 0xff;
2509 	size_t i;
2510 
2511 	len /= sizeof(*dwords);
2512 
2513 	for (i = 0; i < len; i++)
2514 		xor ^= dwords[i];
2515 
2516 	return (mcx_mix_done(xor));
2517 }
2518 
2519 static uint8_t
2520 mcx_cmdq_token(struct mcx_softc *sc)
2521 {
2522 	uint8_t token;
2523 
2524 	do {
2525 		token = ++sc->sc_cmdq_token;
2526 	} while (token == 0);
2527 
2528 	return (token);
2529 }
2530 
2531 static void
2532 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2533     uint32_t ilen, uint32_t olen, uint8_t token)
2534 {
2535 	memset(cqe, 0, sc->sc_cmdq_size);
2536 
2537 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2538 	htobem32(&cqe->cq_input_length, ilen);
2539 	htobem32(&cqe->cq_output_length, olen);
2540 	cqe->cq_token = token;
2541 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2542 }
2543 
2544 static void
2545 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2546 {
2547 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2548 }
2549 
2550 static int
2551 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2552 {
2553 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
2554 	return (0);
2555 }
2556 
2557 static void *
2558 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2559 {
2560 	return (&cqe->cq_input_data);
2561 }
2562 
2563 static void *
2564 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2565 {
2566 	return (&cqe->cq_output_data);
2567 }
2568 
2569 static void
2570 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2571     unsigned int slot)
2572 {
2573 	mcx_cmdq_sign(cqe);
2574 
2575 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2576 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2577 
2578 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2579 }
2580 
2581 static int
2582 mcx_enable_hca(struct mcx_softc *sc)
2583 {
2584 	struct mcx_cmdq_entry *cqe;
2585 	struct mcx_cmd_enable_hca_in *in;
2586 	struct mcx_cmd_enable_hca_out *out;
2587 	int error;
2588 	uint8_t status;
2589 
2590 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2591 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2592 
2593 	in = mcx_cmdq_in(cqe);
2594 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2595 	in->cmd_op_mod = htobe16(0);
2596 	in->cmd_function_id = htobe16(0);
2597 
2598 	mcx_cmdq_post(sc, cqe, 0);
2599 
2600 	error = mcx_cmdq_poll(sc, cqe, 1000);
2601 	if (error != 0) {
2602 		printf(", hca enable timeout\n");
2603 		return (-1);
2604 	}
2605 	if (mcx_cmdq_verify(cqe) != 0) {
2606 		printf(", hca enable command corrupt\n");
2607 		return (-1);
2608 	}
2609 
2610 	status = cqe->cq_output_data[0];
2611 	if (status != MCX_CQ_STATUS_OK) {
2612 		printf(", hca enable failed (%x)\n", status);
2613 		return (-1);
2614 	}
2615 
2616 	return (0);
2617 }
2618 
2619 static int
2620 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2621 {
2622 	struct mcx_cmdq_entry *cqe;
2623 	struct mcx_cmd_teardown_hca_in *in;
2624 	struct mcx_cmd_teardown_hca_out *out;
2625 	int error;
2626 	uint8_t status;
2627 
2628 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2629 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2630 
2631 	in = mcx_cmdq_in(cqe);
2632 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2633 	in->cmd_op_mod = htobe16(0);
2634 	in->cmd_profile = profile;
2635 
2636 	mcx_cmdq_post(sc, cqe, 0);
2637 
2638 	error = mcx_cmdq_poll(sc, cqe, 1000);
2639 	if (error != 0) {
2640 		printf(", hca teardown timeout\n");
2641 		return (-1);
2642 	}
2643 	if (mcx_cmdq_verify(cqe) != 0) {
2644 		printf(", hca teardown command corrupt\n");
2645 		return (-1);
2646 	}
2647 
2648 	status = cqe->cq_output_data[0];
2649 	if (status != MCX_CQ_STATUS_OK) {
2650 		printf(", hca teardown failed (%x)\n", status);
2651 		return (-1);
2652 	}
2653 
2654 	return (0);
2655 }
2656 
2657 static int
2658 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2659     unsigned int nmb, uint64_t *ptr, uint8_t token)
2660 {
2661 	caddr_t kva;
2662 	uint64_t dva;
2663 	int i;
2664 	int error;
2665 
2666 	error = mcx_dmamem_alloc(sc, mxm,
2667 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2668 	if (error != 0)
2669 		return (error);
2670 
2671 	mcx_dmamem_zero(mxm);
2672 
2673 	dva = MCX_DMA_DVA(mxm);
2674 	kva = MCX_DMA_KVA(mxm);
2675 	for (i = 0; i < nmb; i++) {
2676 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2677 
2678 		/* patch the cqe or mbox pointing at this one */
2679 		htobem64(ptr, dva);
2680 
2681 		/* fill in this mbox */
2682 		htobem32(&mbox->mb_block_number, i);
2683 		mbox->mb_token = token;
2684 
2685 		/* move to the next one */
2686 		ptr = &mbox->mb_next_ptr;
2687 
2688 		dva += MCX_CMDQ_MAILBOX_SIZE;
2689 		kva += MCX_CMDQ_MAILBOX_SIZE;
2690 	}
2691 
2692 	return (0);
2693 }
2694 
2695 static uint32_t
2696 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2697 {
2698 	uint32_t xor = 0xff;
2699 
2700 	/* only 3 fields get set, so mix them directly */
2701 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2702 	xor = mcx_mix_u32(xor, mb->mb_block_number);
2703 	xor = mcx_mix_u8(xor, mb->mb_token);
2704 
2705 	return (mcx_mix_done(xor));
2706 }
2707 
2708 static void
2709 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2710 {
2711 	caddr_t kva;
2712 	int i;
2713 
2714 	kva = MCX_DMA_KVA(mxm);
2715 
2716 	for (i = 0; i < nmb; i++) {
2717 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2718 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2719 		mb->mb_ctrl_signature = sig;
2720 		mb->mb_signature = sig ^
2721 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2722 
2723 		kva += MCX_CMDQ_MAILBOX_SIZE;
2724 	}
2725 }
2726 
2727 static void
2728 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2729 {
2730 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2731 	    0, MCX_DMA_LEN(mxm), ops);
2732 }
2733 
2734 static struct mcx_cmdq_mailbox *
2735 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2736 {
2737 	caddr_t kva;
2738 
2739 	kva = MCX_DMA_KVA(mxm);
2740 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
2741 
2742 	return ((struct mcx_cmdq_mailbox *)kva);
2743 }
2744 
2745 static inline void *
2746 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2747 {
2748 	return (&mb->mb_data);
2749 }
2750 
2751 static void
2752 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2753     void *b, size_t len)
2754 {
2755 	caddr_t buf = b;
2756 	struct mcx_cmdq_mailbox *mb;
2757 	int i;
2758 
2759 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2760 	for (i = 0; i < nmb; i++) {
2761 
2762 		memcpy(mb->mb_data, buf, min(sizeof(mb->mb_data), len));
2763 
2764 		if (sizeof(mb->mb_data) >= len)
2765 			break;
2766 
2767 		buf += sizeof(mb->mb_data);
2768 		len -= sizeof(mb->mb_data);
2769 		mb++;
2770 	}
2771 }
2772 
2773 static void
2774 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2775 {
2776 	caddr_t buf = b;
2777 	struct mcx_cmdq_mailbox *mb;
2778 	int i;
2779 
2780 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2781 	for (i = 0; i < nmb; i++) {
2782 		memcpy(buf, mb->mb_data, min(sizeof(mb->mb_data), len));
2783 
2784 		if (sizeof(mb->mb_data) >= len)
2785 			break;
2786 
2787 		buf += sizeof(mb->mb_data);
2788 		len -= sizeof(mb->mb_data);
2789 		mb++;
2790 	}
2791 }
2792 
2793 static void
2794 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2795 {
2796 	mcx_dmamem_free(sc, mxm);
2797 }
2798 
2799 #if 0
2800 static void
2801 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2802 {
2803 	unsigned int i;
2804 
2805 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2806 	    bemtoh32(&cqe->cq_input_length), bemtoh64(&cqe->cq_input_ptr));
2807 
2808 	printf(", idata ");
2809 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2810 		printf("%02x", cqe->cq_input_data[i]);
2811 
2812 	printf(", odata ");
2813 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2814 		printf("%02x", cqe->cq_output_data[i]);
2815 
2816 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2817 	    bemtoh64(&cqe->cq_output_ptr), bemtoh32(&cqe->cq_output_length),
2818 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2819 }
2820 
2821 static void
2822 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2823 {
2824 	int i, j;
2825 	uint8_t *d;
2826 
2827 	for (i = 0; i < num; i++) {
2828 		struct mcx_cmdq_mailbox *mbox;
2829 		mbox = mcx_cq_mbox(mboxes, i);
2830 
2831 		d = mcx_cq_mbox_data(mbox);
2832 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2833 			if (j != 0 && (j % 16 == 0))
2834 				printf("\n");
2835 			printf("%.2x ", d[j]);
2836 		}
2837 	}
2838 }
2839 #endif
2840 
2841 static int
2842 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2843     int len)
2844 {
2845 	struct mcx_dmamem mxm;
2846 	struct mcx_cmdq_entry *cqe;
2847 	struct mcx_cmd_access_reg_in *in;
2848 	struct mcx_cmd_access_reg_out *out;
2849 	uint8_t token = mcx_cmdq_token(sc);
2850 	int error, nmb;
2851 
2852 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2853 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2854 	    token);
2855 
2856 	in = mcx_cmdq_in(cqe);
2857 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2858 	in->cmd_op_mod = htobe16(op);
2859 	in->cmd_register_id = htobe16(reg);
2860 
2861 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2862 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2863 		printf(", unable to allocate access reg mailboxen\n");
2864 		return (-1);
2865 	}
2866 	cqe->cq_input_ptr = cqe->cq_output_ptr;
2867 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
2868 	mcx_cmdq_mboxes_sign(&mxm, nmb);
2869 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
2870 
2871 	mcx_cmdq_post(sc, cqe, 0);
2872 	error = mcx_cmdq_poll(sc, cqe, 1000);
2873 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
2874 
2875 	if (error != 0) {
2876 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
2877 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
2878 		goto free;
2879 	}
2880 	error = mcx_cmdq_verify(cqe);
2881 	if (error != 0) {
2882 		printf("%s: access reg (%s %x) reply corrupt\n",
2883 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
2884 		    reg);
2885 		goto free;
2886 	}
2887 
2888 	out = mcx_cmdq_out(cqe);
2889 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
2890 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
2891 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
2892 		    reg, out->cmd_status, out->cmd_syndrome);
2893 		error = -1;
2894 		goto free;
2895 	}
2896 
2897 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
2898 free:
2899 	mcx_dmamem_free(sc, &mxm);
2900 
2901 	return (error);
2902 }
2903 
2904 static int
2905 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
2906 {
2907 	struct mcx_cmd_set_issi_in *in;
2908 	struct mcx_cmd_set_issi_out *out;
2909 	uint8_t status;
2910 
2911 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2912 
2913 	in = mcx_cmdq_in(cqe);
2914 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
2915 	in->cmd_op_mod = htobe16(0);
2916 	in->cmd_current_issi = htobe16(MCX_ISSI);
2917 
2918 	mcx_cmdq_post(sc, cqe, slot);
2919 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
2920 		return (-1);
2921 	if (mcx_cmdq_verify(cqe) != 0)
2922 		return (-1);
2923 
2924 	status = cqe->cq_output_data[0];
2925 	if (status != MCX_CQ_STATUS_OK)
2926 		return (-1);
2927 
2928 	return (0);
2929 }
2930 
2931 static int
2932 mcx_issi(struct mcx_softc *sc)
2933 {
2934 	struct mcx_dmamem mxm;
2935 	struct mcx_cmdq_entry *cqe;
2936 	struct mcx_cmd_query_issi_in *in;
2937 	struct mcx_cmd_query_issi_il_out *out;
2938 	struct mcx_cmd_query_issi_mb_out *mb;
2939 	uint8_t token = mcx_cmdq_token(sc);
2940 	uint8_t status;
2941 	int error;
2942 
2943 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2944 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
2945 
2946 	in = mcx_cmdq_in(cqe);
2947 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
2948 	in->cmd_op_mod = htobe16(0);
2949 
2950 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
2951 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
2952 	    &cqe->cq_output_ptr, token) != 0) {
2953 		printf(", unable to allocate query issi mailbox\n");
2954 		return (-1);
2955 	}
2956 	mcx_cmdq_mboxes_sign(&mxm, 1);
2957 
2958 	mcx_cmdq_post(sc, cqe, 0);
2959 	error = mcx_cmdq_poll(sc, cqe, 1000);
2960 	if (error != 0) {
2961 		printf(", query issi timeout\n");
2962 		goto free;
2963 	}
2964 	error = mcx_cmdq_verify(cqe);
2965 	if (error != 0) {
2966 		printf(", query issi reply corrupt\n");
2967 		goto free;
2968 	}
2969 
2970 	status = cqe->cq_output_data[0];
2971 	switch (status) {
2972 	case MCX_CQ_STATUS_OK:
2973 		break;
2974 	case MCX_CQ_STATUS_BAD_OPCODE:
2975 		/* use ISSI 0 */
2976 		goto free;
2977 	default:
2978 		printf(", query issi failed (%x)\n", status);
2979 		error = -1;
2980 		goto free;
2981 	}
2982 
2983 	out = mcx_cmdq_out(cqe);
2984 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
2985 		/* use ISSI 1 */
2986 		goto free;
2987 	}
2988 
2989 	/* don't need to read cqe anymore, can be used for SET ISSI */
2990 
2991 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
2992 	CTASSERT(MCX_ISSI < NBBY);
2993 	 /* XXX math is hard */
2994 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
2995 		/* use ISSI 0 */
2996 		goto free;
2997 	}
2998 
2999 	if (mcx_set_issi(sc, cqe, 0) != 0) {
3000 		/* ignore the error, just use ISSI 0 */
3001 	} else {
3002 		/* use ISSI 1 */
3003 	}
3004 
3005 free:
3006 	mcx_cq_mboxes_free(sc, &mxm);
3007 	return (error);
3008 }
3009 
3010 static int
3011 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3012     uint32_t *npages, uint16_t *func_id)
3013 {
3014 	struct mcx_cmdq_entry *cqe;
3015 	struct mcx_cmd_query_pages_in *in;
3016 	struct mcx_cmd_query_pages_out *out;
3017 
3018 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3019 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3020 
3021 	in = mcx_cmdq_in(cqe);
3022 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3023 	in->cmd_op_mod = type;
3024 
3025 	mcx_cmdq_post(sc, cqe, 0);
3026 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3027 		printf(", query pages timeout\n");
3028 		return (-1);
3029 	}
3030 	if (mcx_cmdq_verify(cqe) != 0) {
3031 		printf(", query pages reply corrupt\n");
3032 		return (-1);
3033 	}
3034 
3035 	out = mcx_cmdq_out(cqe);
3036 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3037 		printf(", query pages failed (%x)\n", out->cmd_status);
3038 		return (-1);
3039 	}
3040 
3041 	*func_id = out->cmd_func_id;
3042 	*npages = bemtoh32(&out->cmd_num_pages);
3043 
3044 	return (0);
3045 }
3046 
3047 struct bus_dma_iter {
3048 	bus_dmamap_t		i_map;
3049 	bus_size_t		i_offset;
3050 	unsigned int		i_index;
3051 };
3052 
3053 static void
3054 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3055 {
3056 	i->i_map = map;
3057 	i->i_offset = 0;
3058 	i->i_index = 0;
3059 }
3060 
3061 static bus_addr_t
3062 bus_dma_iter_addr(struct bus_dma_iter *i)
3063 {
3064 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3065 }
3066 
3067 static void
3068 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3069 {
3070 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3071 	bus_size_t diff;
3072 
3073 	do {
3074 		diff = seg->ds_len - i->i_offset;
3075 		if (size < diff)
3076 			break;
3077 
3078 		size -= diff;
3079 
3080 		seg++;
3081 
3082 		i->i_offset = 0;
3083 		i->i_index++;
3084 	} while (size > 0);
3085 
3086 	i->i_offset += size;
3087 }
3088 
3089 static int
3090 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3091 {
3092 	struct mcx_dmamem mxm;
3093 	struct mcx_cmdq_entry *cqe;
3094 	struct mcx_cmd_manage_pages_in *in;
3095 	struct mcx_cmd_manage_pages_out *out;
3096 	unsigned int paslen, nmb, i, j, npages;
3097 	struct bus_dma_iter iter;
3098 	uint64_t *pas;
3099 	uint8_t status;
3100 	uint8_t token = mcx_cmdq_token(sc);
3101 	int error;
3102 
3103 	npages = mhm->mhm_npages;
3104 
3105 	paslen = sizeof(*pas) * npages;
3106 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3107 
3108 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3109 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3110 
3111 	in = mcx_cmdq_in(cqe);
3112 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3113 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3114 	in->cmd_func_id = func_id;
3115 	htobem32(&in->cmd_input_num_entries, npages);
3116 
3117 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3118 	    &cqe->cq_input_ptr, token) != 0) {
3119 		printf(", unable to allocate manage pages mailboxen\n");
3120 		return (-1);
3121 	}
3122 
3123 	bus_dma_iter_init(&iter, mhm->mhm_map);
3124 	for (i = 0; i < nmb; i++) {
3125 		unsigned int lim;
3126 
3127 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3128 		lim = min(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3129 
3130 		for (j = 0; j < lim; j++) {
3131 			htobem64(&pas[j], bus_dma_iter_addr(&iter));
3132 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3133 		}
3134 
3135 		npages -= lim;
3136 	}
3137 
3138 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3139 
3140 	mcx_cmdq_post(sc, cqe, 0);
3141 	error = mcx_cmdq_poll(sc, cqe, 1000);
3142 	if (error != 0) {
3143 		printf(", manage pages timeout\n");
3144 		goto free;
3145 	}
3146 	error = mcx_cmdq_verify(cqe);
3147 	if (error != 0) {
3148 		printf(", manage pages reply corrupt\n");
3149 		goto free;
3150 	}
3151 
3152 	status = cqe->cq_output_data[0];
3153 	if (status != MCX_CQ_STATUS_OK) {
3154 		printf(", manage pages failed (%x)\n", status);
3155 		error = -1;
3156 		goto free;
3157 	}
3158 
3159 free:
3160 	mcx_dmamem_free(sc, &mxm);
3161 
3162 	return (error);
3163 }
3164 
3165 static int
3166 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3167 {
3168 	uint32_t npages;
3169 	uint16_t func_id;
3170 
3171 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3172 		/* error printed by mcx_query_pages */
3173 		return (-1);
3174 	}
3175 
3176 	if (npages == 0)
3177 		return (0);
3178 
3179 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3180 		printf(", unable to allocate hwmem\n");
3181 		return (-1);
3182 	}
3183 
3184 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
3185 		printf(", unable to add hwmem\n");
3186 		goto free;
3187 	}
3188 
3189 	return (0);
3190 
3191 free:
3192 	mcx_hwmem_free(sc, mhm);
3193 
3194 	return (-1);
3195 }
3196 
3197 static int
3198 mcx_hca_max_caps(struct mcx_softc *sc)
3199 {
3200 	struct mcx_dmamem mxm;
3201 	struct mcx_cmdq_entry *cqe;
3202 	struct mcx_cmd_query_hca_cap_in *in;
3203 	struct mcx_cmd_query_hca_cap_out *out;
3204 	struct mcx_cmdq_mailbox *mb;
3205 	struct mcx_cap_device *hca;
3206 	uint8_t status;
3207 	uint8_t token = mcx_cmdq_token(sc);
3208 	int error;
3209 
3210 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3211 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3212 	    token);
3213 
3214 	in = mcx_cmdq_in(cqe);
3215 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3216 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3217 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3218 
3219 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3220 	    &cqe->cq_output_ptr, token) != 0) {
3221 		printf(", unable to allocate query hca caps mailboxen\n");
3222 		return (-1);
3223 	}
3224 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3225 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3226 
3227 	mcx_cmdq_post(sc, cqe, 0);
3228 	error = mcx_cmdq_poll(sc, cqe, 1000);
3229 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3230 
3231 	if (error != 0) {
3232 		printf(", query hca caps timeout\n");
3233 		goto free;
3234 	}
3235 	error = mcx_cmdq_verify(cqe);
3236 	if (error != 0) {
3237 		printf(", query hca caps reply corrupt\n");
3238 		goto free;
3239 	}
3240 
3241 	status = cqe->cq_output_data[0];
3242 	if (status != MCX_CQ_STATUS_OK) {
3243 		printf(", query hca caps failed (%x)\n", status);
3244 		error = -1;
3245 		goto free;
3246 	}
3247 
3248 	mb = mcx_cq_mbox(&mxm, 0);
3249 	hca = mcx_cq_mbox_data(mb);
3250 
3251 	if (hca->log_pg_sz > PAGE_SHIFT) {
3252 		printf(", minimum system page shift %u is too large\n",
3253 		    hca->log_pg_sz);
3254 		error = -1;
3255 		goto free;
3256 	}
3257 	/*
3258 	 * blueflame register is split into two buffers, and we must alternate
3259 	 * between the two of them.
3260 	 */
3261 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3262 
3263 free:
3264 	mcx_dmamem_free(sc, &mxm);
3265 
3266 	return (error);
3267 }
3268 
3269 static int
3270 mcx_hca_set_caps(struct mcx_softc *sc)
3271 {
3272 	struct mcx_dmamem mxm;
3273 	struct mcx_cmdq_entry *cqe;
3274 	struct mcx_cmd_query_hca_cap_in *in;
3275 	struct mcx_cmd_query_hca_cap_out *out;
3276 	struct mcx_cmdq_mailbox *mb;
3277 	struct mcx_cap_device *hca;
3278 	uint8_t status;
3279 	uint8_t token = mcx_cmdq_token(sc);
3280 	int error;
3281 
3282 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3283 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3284 	    token);
3285 
3286 	in = mcx_cmdq_in(cqe);
3287 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3288 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3289 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3290 
3291 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3292 	    &cqe->cq_output_ptr, token) != 0) {
3293 		printf(", unable to allocate manage pages mailboxen\n");
3294 		return (-1);
3295 	}
3296 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3297 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3298 
3299 	mcx_cmdq_post(sc, cqe, 0);
3300 	error = mcx_cmdq_poll(sc, cqe, 1000);
3301 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3302 
3303 	if (error != 0) {
3304 		printf(", query hca caps timeout\n");
3305 		goto free;
3306 	}
3307 	error = mcx_cmdq_verify(cqe);
3308 	if (error != 0) {
3309 		printf(", query hca caps reply corrupt\n");
3310 		goto free;
3311 	}
3312 
3313 	status = cqe->cq_output_data[0];
3314 	if (status != MCX_CQ_STATUS_OK) {
3315 		printf(", query hca caps failed (%x)\n", status);
3316 		error = -1;
3317 		goto free;
3318 	}
3319 
3320 	mb = mcx_cq_mbox(&mxm, 0);
3321 	hca = mcx_cq_mbox_data(mb);
3322 
3323 	hca->log_pg_sz = PAGE_SHIFT;
3324 
3325 free:
3326 	mcx_dmamem_free(sc, &mxm);
3327 
3328 	return (error);
3329 }
3330 
3331 
3332 static int
3333 mcx_init_hca(struct mcx_softc *sc)
3334 {
3335 	struct mcx_cmdq_entry *cqe;
3336 	struct mcx_cmd_init_hca_in *in;
3337 	struct mcx_cmd_init_hca_out *out;
3338 	int error;
3339 	uint8_t status;
3340 
3341 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3342 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3343 
3344 	in = mcx_cmdq_in(cqe);
3345 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3346 	in->cmd_op_mod = htobe16(0);
3347 
3348 	mcx_cmdq_post(sc, cqe, 0);
3349 
3350 	error = mcx_cmdq_poll(sc, cqe, 1000);
3351 	if (error != 0) {
3352 		printf(", hca init timeout\n");
3353 		return (-1);
3354 	}
3355 	if (mcx_cmdq_verify(cqe) != 0) {
3356 		printf(", hca init command corrupt\n");
3357 		return (-1);
3358 	}
3359 
3360 	status = cqe->cq_output_data[0];
3361 	if (status != MCX_CQ_STATUS_OK) {
3362 		printf(", hca init failed (%x)\n", status);
3363 		return (-1);
3364 	}
3365 
3366 	return (0);
3367 }
3368 
3369 static int
3370 mcx_set_driver_version(struct mcx_softc *sc)
3371 {
3372 	struct mcx_dmamem mxm;
3373 	struct mcx_cmdq_entry *cqe;
3374 	struct mcx_cmd_set_driver_version_in *in;
3375 	struct mcx_cmd_set_driver_version_out *out;
3376 	int error;
3377 	int token;
3378 	uint8_t status;
3379 
3380 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3381 	token = mcx_cmdq_token(sc);
3382 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
3383 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3384 
3385 	in = mcx_cmdq_in(cqe);
3386 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3387 	in->cmd_op_mod = htobe16(0);
3388 
3389 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3390 	    &cqe->cq_input_ptr, token) != 0) {
3391 		printf(", unable to allocate set driver version mailboxen\n");
3392 		return (-1);
3393 	}
3394 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3395 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3396 
3397 	mcx_cmdq_mboxes_sign(&mxm, 1);
3398 	mcx_cmdq_post(sc, cqe, 0);
3399 
3400 	error = mcx_cmdq_poll(sc, cqe, 1000);
3401 	if (error != 0) {
3402 		printf(", set driver version timeout\n");
3403 		goto free;
3404 	}
3405 	if (mcx_cmdq_verify(cqe) != 0) {
3406 		printf(", set driver version command corrupt\n");
3407 		goto free;
3408 	}
3409 
3410 	status = cqe->cq_output_data[0];
3411 	if (status != MCX_CQ_STATUS_OK) {
3412 		printf(", set driver version failed (%x)\n", status);
3413 		error = -1;
3414 		goto free;
3415 	}
3416 
3417 free:
3418 	mcx_dmamem_free(sc, &mxm);
3419 
3420 	return (error);
3421 }
3422 
3423 static int
3424 mcx_iff(struct mcx_softc *sc)
3425 {
3426 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3427 	struct mcx_dmamem mxm;
3428 	struct mcx_cmdq_entry *cqe;
3429 	struct mcx_cmd_modify_nic_vport_context_in *in;
3430 	struct mcx_cmd_modify_nic_vport_context_out *out;
3431 	struct mcx_nic_vport_ctx *ctx;
3432 	int error;
3433 	int token;
3434 	int insize;
3435 
3436 	/* enable or disable the promisc flow */
3437 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3438 		if (sc->sc_promisc_flow_enabled == 0) {
3439 			mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3440 			    0, NULL);
3441 			sc->sc_promisc_flow_enabled = 1;
3442 		}
3443 	} else if (sc->sc_promisc_flow_enabled != 0) {
3444 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3445 		sc->sc_promisc_flow_enabled = 0;
3446 	}
3447 
3448 	/* enable or disable the all-multicast flow */
3449 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3450 		if (sc->sc_allmulti_flow_enabled == 0) {
3451 			uint8_t mcast[ETHER_ADDR_LEN];
3452 
3453 			memset(mcast, 0, sizeof(mcast));
3454 			mcast[0] = 0x01;
3455 			mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3456 			    0, mcast);
3457 			sc->sc_allmulti_flow_enabled = 1;
3458 		}
3459 	} else if (sc->sc_allmulti_flow_enabled != 0) {
3460 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3461 		sc->sc_allmulti_flow_enabled = 0;
3462 	}
3463 
3464 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3465 
3466 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3467 	token = mcx_cmdq_token(sc);
3468 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3469 
3470 	in = mcx_cmdq_in(cqe);
3471 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3472 	in->cmd_op_mod = htobe16(0);
3473 	in->cmd_field_select = htobe32(
3474 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3475 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3476 
3477 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3478 		printf(", unable to allocate modify nic vport context mailboxen\n");
3479 		return (-1);
3480 	}
3481 	ctx = (struct mcx_nic_vport_ctx *)
3482 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3483 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3484 	/*
3485 	 * always leave promisc-all enabled on the vport since we can't give it
3486 	 * a vlan list, and we're already doing multicast filtering in the flow
3487 	 * table.
3488 	 */
3489 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3490 
3491 	mcx_cmdq_mboxes_sign(&mxm, 1);
3492 	mcx_cmdq_post(sc, cqe, 0);
3493 
3494 	error = mcx_cmdq_poll(sc, cqe, 1000);
3495 	if (error != 0) {
3496 		printf(", modify nic vport context timeout\n");
3497 		goto free;
3498 	}
3499 	if (mcx_cmdq_verify(cqe) != 0) {
3500 		printf(", modify nic vport context command corrupt\n");
3501 		goto free;
3502 	}
3503 
3504 	out = mcx_cmdq_out(cqe);
3505 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3506 		printf(", modify nic vport context failed (%x, %x)\n",
3507 		    out->cmd_status, out->cmd_syndrome);
3508 		error = -1;
3509 		goto free;
3510 	}
3511 
3512 free:
3513 	mcx_dmamem_free(sc, &mxm);
3514 
3515 	return (error);
3516 }
3517 
3518 static int
3519 mcx_alloc_uar(struct mcx_softc *sc)
3520 {
3521 	struct mcx_cmdq_entry *cqe;
3522 	struct mcx_cmd_alloc_uar_in *in;
3523 	struct mcx_cmd_alloc_uar_out *out;
3524 	int error;
3525 
3526 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3527 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3528 
3529 	in = mcx_cmdq_in(cqe);
3530 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3531 	in->cmd_op_mod = htobe16(0);
3532 
3533 	mcx_cmdq_post(sc, cqe, 0);
3534 
3535 	error = mcx_cmdq_poll(sc, cqe, 1000);
3536 	if (error != 0) {
3537 		printf(", alloc uar timeout\n");
3538 		return (-1);
3539 	}
3540 	if (mcx_cmdq_verify(cqe) != 0) {
3541 		printf(", alloc uar command corrupt\n");
3542 		return (-1);
3543 	}
3544 
3545 	out = mcx_cmdq_out(cqe);
3546 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3547 		printf(", alloc uar failed (%x)\n", out->cmd_status);
3548 		return (-1);
3549 	}
3550 
3551 	sc->sc_uar = betoh32(out->cmd_uar);
3552 
3553 	return (0);
3554 }
3555 
3556 static int
3557 mcx_create_eq(struct mcx_softc *sc)
3558 {
3559 	struct mcx_cmdq_entry *cqe;
3560 	struct mcx_dmamem mxm;
3561 	struct mcx_cmd_create_eq_in *in;
3562 	struct mcx_cmd_create_eq_mb_in *mbin;
3563 	struct mcx_cmd_create_eq_out *out;
3564 	struct mcx_eq_entry *eqe;
3565 	int error;
3566 	uint64_t *pas;
3567 	int insize, npages, paslen, i, token;
3568 
3569 	sc->sc_eq_cons = 0;
3570 
3571 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3572 	    MCX_PAGE_SIZE);
3573 	paslen = npages * sizeof(*pas);
3574 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3575 
3576 	if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3577 	    MCX_PAGE_SIZE) != 0) {
3578 		printf(", unable to allocate event queue memory\n");
3579 		return (-1);
3580 	}
3581 
3582 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3583 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3584 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3585 	}
3586 
3587 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3588 	token = mcx_cmdq_token(sc);
3589 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3590 
3591 	in = mcx_cmdq_in(cqe);
3592 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3593 	in->cmd_op_mod = htobe16(0);
3594 
3595 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3596 	    &cqe->cq_input_ptr, token) != 0) {
3597 		printf(", unable to allocate create eq mailboxen\n");
3598 		return (-1);
3599 	}
3600 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3601 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3602 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3603 	mbin->cmd_event_bitmask = htobe64(
3604 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3605 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3606 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3607 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3608 
3609 	/* physical addresses follow the mailbox in data */
3610 	pas = (uint64_t *)(mbin + 1);
3611 	for (i = 0; i < npages; i++) {
3612 		pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_eq_mem) +
3613 		    (i * MCX_PAGE_SIZE));
3614 	}
3615 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3616 	mcx_cmdq_post(sc, cqe, 0);
3617 
3618 	error = mcx_cmdq_poll(sc, cqe, 1000);
3619 	if (error != 0) {
3620 		printf(", create eq timeout\n");
3621 		goto free;
3622 	}
3623 	if (mcx_cmdq_verify(cqe) != 0) {
3624 		printf(", create eq command corrupt\n");
3625 		goto free;
3626 	}
3627 
3628 	out = mcx_cmdq_out(cqe);
3629 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3630 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
3631 		    betoh32(out->cmd_syndrome));
3632 		error = -1;
3633 		goto free;
3634 	}
3635 
3636 	sc->sc_eqn = betoh32(out->cmd_eqn);
3637 	mcx_arm_eq(sc);
3638 free:
3639 	mcx_dmamem_free(sc, &mxm);
3640 	return (error);
3641 }
3642 
3643 static int
3644 mcx_alloc_pd(struct mcx_softc *sc)
3645 {
3646 	struct mcx_cmdq_entry *cqe;
3647 	struct mcx_cmd_alloc_pd_in *in;
3648 	struct mcx_cmd_alloc_pd_out *out;
3649 	int error;
3650 
3651 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3652 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3653 
3654 	in = mcx_cmdq_in(cqe);
3655 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3656 	in->cmd_op_mod = htobe16(0);
3657 
3658 	mcx_cmdq_post(sc, cqe, 0);
3659 
3660 	error = mcx_cmdq_poll(sc, cqe, 1000);
3661 	if (error != 0) {
3662 		printf(", alloc pd timeout\n");
3663 		return (-1);
3664 	}
3665 	if (mcx_cmdq_verify(cqe) != 0) {
3666 		printf(", alloc pd command corrupt\n");
3667 		return (-1);
3668 	}
3669 
3670 	out = mcx_cmdq_out(cqe);
3671 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3672 		printf(", alloc pd failed (%x)\n", out->cmd_status);
3673 		return (-1);
3674 	}
3675 
3676 	sc->sc_pd = betoh32(out->cmd_pd);
3677 	return (0);
3678 }
3679 
3680 static int
3681 mcx_alloc_tdomain(struct mcx_softc *sc)
3682 {
3683 	struct mcx_cmdq_entry *cqe;
3684 	struct mcx_cmd_alloc_td_in *in;
3685 	struct mcx_cmd_alloc_td_out *out;
3686 	int error;
3687 
3688 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3689 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3690 
3691 	in = mcx_cmdq_in(cqe);
3692 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3693 	in->cmd_op_mod = htobe16(0);
3694 
3695 	mcx_cmdq_post(sc, cqe, 0);
3696 
3697 	error = mcx_cmdq_poll(sc, cqe, 1000);
3698 	if (error != 0) {
3699 		printf(", alloc transport domain timeout\n");
3700 		return (-1);
3701 	}
3702 	if (mcx_cmdq_verify(cqe) != 0) {
3703 		printf(", alloc transport domain command corrupt\n");
3704 		return (-1);
3705 	}
3706 
3707 	out = mcx_cmdq_out(cqe);
3708 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3709 		printf(", alloc transport domain failed (%x)\n",
3710 		    out->cmd_status);
3711 		return (-1);
3712 	}
3713 
3714 	sc->sc_tdomain = betoh32(out->cmd_tdomain);
3715 	return (0);
3716 }
3717 
3718 static int
3719 mcx_query_nic_vport_context(struct mcx_softc *sc)
3720 {
3721 	struct mcx_dmamem mxm;
3722 	struct mcx_cmdq_entry *cqe;
3723 	struct mcx_cmd_query_nic_vport_context_in *in;
3724 	struct mcx_cmd_query_nic_vport_context_out *out;
3725 	struct mcx_nic_vport_ctx *ctx;
3726 	uint8_t *addr;
3727 	int error, token, i;
3728 
3729 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3730 	token = mcx_cmdq_token(sc);
3731 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3732 
3733 	in = mcx_cmdq_in(cqe);
3734 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3735 	in->cmd_op_mod = htobe16(0);
3736 	in->cmd_allowed_list_type = 0;
3737 
3738 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3739 		printf(", unable to allocate query nic vport context mailboxen\n");
3740 		return (-1);
3741 	}
3742 	mcx_cmdq_mboxes_sign(&mxm, 1);
3743 	mcx_cmdq_post(sc, cqe, 0);
3744 
3745 	error = mcx_cmdq_poll(sc, cqe, 1000);
3746 	if (error != 0) {
3747 		printf(", query nic vport context timeout\n");
3748 		goto free;
3749 	}
3750 	if (mcx_cmdq_verify(cqe) != 0) {
3751 		printf(", query nic vport context command corrupt\n");
3752 		goto free;
3753 	}
3754 
3755 	out = mcx_cmdq_out(cqe);
3756 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3757 		printf(", query nic vport context failed (%x, %x)\n",
3758 		    out->cmd_status, out->cmd_syndrome);
3759 		error = -1;
3760 		goto free;
3761 	}
3762 
3763 	ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3764 	addr = (uint8_t *)&ctx->vp_perm_addr;
3765 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3766 		sc->sc_ac.ac_enaddr[i] = addr[i + 2];
3767 	}
3768 free:
3769 	mcx_dmamem_free(sc, &mxm);
3770 
3771 	return (error);
3772 }
3773 
3774 static int
3775 mcx_query_special_contexts(struct mcx_softc *sc)
3776 {
3777 	struct mcx_cmdq_entry *cqe;
3778 	struct mcx_cmd_query_special_ctx_in *in;
3779 	struct mcx_cmd_query_special_ctx_out *out;
3780 	int error;
3781 
3782 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3783 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3784 
3785 	in = mcx_cmdq_in(cqe);
3786 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3787 	in->cmd_op_mod = htobe16(0);
3788 
3789 	mcx_cmdq_post(sc, cqe, 0);
3790 
3791 	error = mcx_cmdq_poll(sc, cqe, 1000);
3792 	if (error != 0) {
3793 		printf(", query special contexts timeout\n");
3794 		return (-1);
3795 	}
3796 	if (mcx_cmdq_verify(cqe) != 0) {
3797 		printf(", query special contexts command corrupt\n");
3798 		return (-1);
3799 	}
3800 
3801 	out = mcx_cmdq_out(cqe);
3802 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3803 		printf(", query special contexts failed (%x)\n",
3804 		    out->cmd_status);
3805 		return (-1);
3806 	}
3807 
3808 	sc->sc_lkey = betoh32(out->cmd_resd_lkey);
3809 	return (0);
3810 }
3811 
3812 static int
3813 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3814 {
3815 	struct mcx_reg_pmtu pmtu;
3816 	int error;
3817 
3818 	/* read max mtu */
3819 	memset(&pmtu, 0, sizeof(pmtu));
3820 	pmtu.rp_local_port = 1;
3821 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3822 	    sizeof(pmtu));
3823 	if (error != 0) {
3824 		printf(", unable to get port MTU\n");
3825 		return error;
3826 	}
3827 
3828 	mtu = min(mtu, betoh16(pmtu.rp_max_mtu));
3829 	pmtu.rp_admin_mtu = htobe16(mtu);
3830 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3831 	    sizeof(pmtu));
3832 	if (error != 0) {
3833 		printf(", unable to set port MTU\n");
3834 		return error;
3835 	}
3836 
3837 	sc->sc_hardmtu = mtu;
3838 	return 0;
3839 }
3840 
3841 static int
3842 mcx_create_cq(struct mcx_softc *sc, int eqn)
3843 {
3844 	struct mcx_cmdq_entry *cmde;
3845 	struct mcx_cq_entry *cqe;
3846 	struct mcx_cq *cq;
3847 	struct mcx_dmamem mxm;
3848 	struct mcx_cmd_create_cq_in *in;
3849 	struct mcx_cmd_create_cq_mb_in *mbin;
3850 	struct mcx_cmd_create_cq_out *out;
3851 	int error;
3852 	uint64_t *pas;
3853 	int insize, npages, paslen, i, token;
3854 
3855 	if (sc->sc_num_cq >= MCX_MAX_CQS) {
3856 		printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3857 		return (-1);
3858 	}
3859 	cq = &sc->sc_cq[sc->sc_num_cq];
3860 
3861 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3862 	    MCX_PAGE_SIZE);
3863 	paslen = npages * sizeof(*pas);
3864 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3865 
3866 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3867 	    MCX_PAGE_SIZE) != 0) {
3868 		printf("%s: unable to allocate completion queue memory\n",
3869 		    DEVNAME(sc));
3870 		return (-1);
3871 	}
3872 	cqe = MCX_DMA_KVA(&cq->cq_mem);
3873 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
3874 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
3875 	}
3876 
3877 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3878 	token = mcx_cmdq_token(sc);
3879 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
3880 
3881 	in = mcx_cmdq_in(cmde);
3882 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
3883 	in->cmd_op_mod = htobe16(0);
3884 
3885 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3886 	    &cmde->cq_input_ptr, token) != 0) {
3887 		printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
3888 		error = -1;
3889 		goto free;
3890 	}
3891 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3892 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
3893 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
3894 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
3895 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
3896 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
3897 	    MCX_CQ_MOD_COUNTER);
3898 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
3899 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) +
3900 	    MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
3901 
3902 	/* physical addresses follow the mailbox in data */
3903 	pas = (uint64_t *)(mbin + 1);
3904 	for (i = 0; i < npages; i++) {
3905 		pas[i] = htobe64(MCX_DMA_DVA(&cq->cq_mem) + (i * MCX_PAGE_SIZE));
3906 	}
3907 	mcx_cmdq_post(sc, cmde, 0);
3908 
3909 	error = mcx_cmdq_poll(sc, cmde, 1000);
3910 	if (error != 0) {
3911 		printf("%s: create cq timeout\n", DEVNAME(sc));
3912 		goto free;
3913 	}
3914 	if (mcx_cmdq_verify(cmde) != 0) {
3915 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
3916 		goto free;
3917 	}
3918 
3919 	out = mcx_cmdq_out(cmde);
3920 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3921 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
3922 		    out->cmd_status, betoh32(out->cmd_syndrome));
3923 		error = -1;
3924 		goto free;
3925 	}
3926 
3927 	cq->cq_n = betoh32(out->cmd_cqn);
3928 	cq->cq_cons = 0;
3929 	cq->cq_count = 0;
3930 	cq->cq_doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem) +
3931 	    MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq);
3932 	mcx_arm_cq(sc, cq);
3933 	sc->sc_num_cq++;
3934 
3935 free:
3936 	mcx_dmamem_free(sc, &mxm);
3937 	return (error);
3938 }
3939 
3940 static int
3941 mcx_destroy_cq(struct mcx_softc *sc, int index)
3942 {
3943 	struct mcx_cmdq_entry *cqe;
3944 	struct mcx_cmd_destroy_cq_in *in;
3945 	struct mcx_cmd_destroy_cq_out *out;
3946 	int error;
3947 	int token;
3948 
3949 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3950 	token = mcx_cmdq_token(sc);
3951 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
3952 
3953 	in = mcx_cmdq_in(cqe);
3954 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
3955 	in->cmd_op_mod = htobe16(0);
3956 	in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
3957 
3958 	mcx_cmdq_post(sc, cqe, 0);
3959 	error = mcx_cmdq_poll(sc, cqe, 1000);
3960 	if (error != 0) {
3961 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
3962 		return error;
3963 	}
3964 	if (mcx_cmdq_verify(cqe) != 0) {
3965 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
3966 		return error;
3967 	}
3968 
3969 	out = mcx_cmdq_out(cqe);
3970 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3971 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
3972 		    out->cmd_status, betoh32(out->cmd_syndrome));
3973 		return -1;
3974 	}
3975 
3976 	sc->sc_cq[index].cq_n = 0;
3977 	mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
3978 	sc->sc_cq[index].cq_cons = 0;
3979 	sc->sc_cq[index].cq_count = 0;
3980 	return 0;
3981 }
3982 
3983 static int
3984 mcx_create_rq(struct mcx_softc *sc, int cqn)
3985 {
3986 	struct mcx_cmdq_entry *cqe;
3987 	struct mcx_dmamem mxm;
3988 	struct mcx_cmd_create_rq_in *in;
3989 	struct mcx_cmd_create_rq_out *out;
3990 	struct mcx_rq_ctx *mbin;
3991 	int error;
3992 	uint64_t *pas;
3993 	uint8_t *doorbell;
3994 	int insize, npages, paslen, i, token;
3995 
3996 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
3997 	    MCX_PAGE_SIZE);
3998 	paslen = npages * sizeof(*pas);
3999 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4000 
4001 	if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4002 	    MCX_PAGE_SIZE) != 0) {
4003 		printf("%s: unable to allocate receive queue memory\n",
4004 		    DEVNAME(sc));
4005 		return (-1);
4006 	}
4007 
4008 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4009 	token = mcx_cmdq_token(sc);
4010 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4011 
4012 	in = mcx_cmdq_in(cqe);
4013 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4014 	in->cmd_op_mod = htobe16(0);
4015 
4016 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4017 	    &cqe->cq_input_ptr, token) != 0) {
4018 		printf("%s: unable to allocate create rq mailboxen\n",
4019 		    DEVNAME(sc));
4020 		error = -1;
4021 		goto free;
4022 	}
4023 	mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4024 	mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4025 	mbin->rq_cqn = htobe32(cqn);
4026 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4027 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4028 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4029 	    MCX_RQ_DOORBELL_OFFSET);
4030 	mbin->rq_wq.wq_log_stride = htobe16(4);
4031 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4032 
4033 	/* physical addresses follow the mailbox in data */
4034 	pas = (uint64_t *)(mbin + 1);
4035 	for (i = 0; i < npages; i++) {
4036 		pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_rq_mem) +
4037 		    (i * MCX_PAGE_SIZE));
4038 	}
4039 	mcx_cmdq_post(sc, cqe, 0);
4040 
4041 	error = mcx_cmdq_poll(sc, cqe, 1000);
4042 	if (error != 0) {
4043 		printf("%s: create rq timeout\n", DEVNAME(sc));
4044 		goto free;
4045 	}
4046 	if (mcx_cmdq_verify(cqe) != 0) {
4047 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
4048 		goto free;
4049 	}
4050 
4051 	out = mcx_cmdq_out(cqe);
4052 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4053 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4054 		    out->cmd_status, betoh32(out->cmd_syndrome));
4055 		error = -1;
4056 		goto free;
4057 	}
4058 
4059 	sc->sc_rqn = betoh32(out->cmd_rqn);
4060 
4061 	doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4062 	sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4063 
4064 free:
4065 	mcx_dmamem_free(sc, &mxm);
4066 	return (error);
4067 }
4068 
4069 static int
4070 mcx_ready_rq(struct mcx_softc *sc)
4071 {
4072 	struct mcx_cmdq_entry *cqe;
4073 	struct mcx_dmamem mxm;
4074 	struct mcx_cmd_modify_rq_in *in;
4075 	struct mcx_cmd_modify_rq_mb_in *mbin;
4076 	struct mcx_cmd_modify_rq_out *out;
4077 	int error;
4078 	int token;
4079 
4080 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4081 	token = mcx_cmdq_token(sc);
4082 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4083 
4084 	in = mcx_cmdq_in(cqe);
4085 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4086 	in->cmd_op_mod = htobe16(0);
4087 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4088 
4089 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4090 		printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4091 		return (-1);
4092 	}
4093 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4094 	mbin->cmd_rq_ctx.rq_flags = htobe32(
4095 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4096 
4097 	mcx_cmdq_mboxes_sign(&mxm, 1);
4098 	mcx_cmdq_post(sc, cqe, 0);
4099 	error = mcx_cmdq_poll(sc, cqe, 1000);
4100 	if (error != 0) {
4101 		printf("%s: modify rq timeout\n", DEVNAME(sc));
4102 		goto free;
4103 	}
4104 	if (mcx_cmdq_verify(cqe) != 0) {
4105 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4106 		goto free;
4107 	}
4108 
4109 	out = mcx_cmdq_out(cqe);
4110 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4111 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4112 		    out->cmd_status, betoh32(out->cmd_syndrome));
4113 		error = -1;
4114 		goto free;
4115 	}
4116 
4117 free:
4118 	mcx_dmamem_free(sc, &mxm);
4119 	return (error);
4120 }
4121 
4122 static int
4123 mcx_destroy_rq(struct mcx_softc *sc)
4124 {
4125 	struct mcx_cmdq_entry *cqe;
4126 	struct mcx_cmd_destroy_rq_in *in;
4127 	struct mcx_cmd_destroy_rq_out *out;
4128 	int error;
4129 	int token;
4130 
4131 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4132 	token = mcx_cmdq_token(sc);
4133 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4134 
4135 	in = mcx_cmdq_in(cqe);
4136 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4137 	in->cmd_op_mod = htobe16(0);
4138 	in->cmd_rqn = htobe32(sc->sc_rqn);
4139 
4140 	mcx_cmdq_post(sc, cqe, 0);
4141 	error = mcx_cmdq_poll(sc, cqe, 1000);
4142 	if (error != 0) {
4143 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
4144 		return error;
4145 	}
4146 	if (mcx_cmdq_verify(cqe) != 0) {
4147 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4148 		return error;
4149 	}
4150 
4151 	out = mcx_cmdq_out(cqe);
4152 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4153 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4154 		    out->cmd_status, betoh32(out->cmd_syndrome));
4155 		return -1;
4156 	}
4157 
4158 	sc->sc_rqn = 0;
4159 	return 0;
4160 }
4161 
4162 static int
4163 mcx_create_tir(struct mcx_softc *sc)
4164 {
4165 	struct mcx_cmdq_entry *cqe;
4166 	struct mcx_dmamem mxm;
4167 	struct mcx_cmd_create_tir_in *in;
4168 	struct mcx_cmd_create_tir_mb_in *mbin;
4169 	struct mcx_cmd_create_tir_out *out;
4170 	int error;
4171 	int token;
4172 
4173 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4174 	token = mcx_cmdq_token(sc);
4175 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4176 
4177 	in = mcx_cmdq_in(cqe);
4178 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4179 	in->cmd_op_mod = htobe16(0);
4180 
4181 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4182 		printf("%s: unable to allocate create tir mailbox\n",
4183 		    DEVNAME(sc));
4184 		return (-1);
4185 	}
4186 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4187 	/* leave disp_type = 0, so packets get sent to the inline rqn */
4188 	mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4189 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4190 
4191 	mcx_cmdq_post(sc, cqe, 0);
4192 	error = mcx_cmdq_poll(sc, cqe, 1000);
4193 	if (error != 0) {
4194 		printf("%s: create tir timeout\n", DEVNAME(sc));
4195 		goto free;
4196 	}
4197 	if (mcx_cmdq_verify(cqe) != 0) {
4198 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
4199 		goto free;
4200 	}
4201 
4202 	out = mcx_cmdq_out(cqe);
4203 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4204 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4205 		    out->cmd_status, betoh32(out->cmd_syndrome));
4206 		error = -1;
4207 		goto free;
4208 	}
4209 
4210 	sc->sc_tirn = betoh32(out->cmd_tirn);
4211 free:
4212 	mcx_dmamem_free(sc, &mxm);
4213 	return (error);
4214 }
4215 
4216 static int
4217 mcx_destroy_tir(struct mcx_softc *sc)
4218 {
4219 	struct mcx_cmdq_entry *cqe;
4220 	struct mcx_cmd_destroy_tir_in *in;
4221 	struct mcx_cmd_destroy_tir_out *out;
4222 	int error;
4223 	int token;
4224 
4225 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4226 	token = mcx_cmdq_token(sc);
4227 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4228 
4229 	in = mcx_cmdq_in(cqe);
4230 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4231 	in->cmd_op_mod = htobe16(0);
4232 	in->cmd_tirn = htobe32(sc->sc_tirn);
4233 
4234 	mcx_cmdq_post(sc, cqe, 0);
4235 	error = mcx_cmdq_poll(sc, cqe, 1000);
4236 	if (error != 0) {
4237 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
4238 		return error;
4239 	}
4240 	if (mcx_cmdq_verify(cqe) != 0) {
4241 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4242 		return error;
4243 	}
4244 
4245 	out = mcx_cmdq_out(cqe);
4246 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4247 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4248 		    out->cmd_status, betoh32(out->cmd_syndrome));
4249 		return -1;
4250 	}
4251 
4252 	sc->sc_tirn = 0;
4253 	return 0;
4254 }
4255 
4256 static int
4257 mcx_create_sq(struct mcx_softc *sc, int cqn)
4258 {
4259 	struct mcx_cmdq_entry *cqe;
4260 	struct mcx_dmamem mxm;
4261 	struct mcx_cmd_create_sq_in *in;
4262 	struct mcx_sq_ctx *mbin;
4263 	struct mcx_cmd_create_sq_out *out;
4264 	int error;
4265 	uint64_t *pas;
4266 	uint8_t *doorbell;
4267 	int insize, npages, paslen, i, token;
4268 
4269 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4270 	    MCX_PAGE_SIZE);
4271 	paslen = npages * sizeof(*pas);
4272 	insize = sizeof(struct mcx_sq_ctx) + paslen;
4273 
4274 	if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4275 	    MCX_PAGE_SIZE) != 0) {
4276 		printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4277 		return (-1);
4278 	}
4279 
4280 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4281 	token = mcx_cmdq_token(sc);
4282 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4283 	    token);
4284 
4285 	in = mcx_cmdq_in(cqe);
4286 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4287 	in->cmd_op_mod = htobe16(0);
4288 
4289 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4290 	    &cqe->cq_input_ptr, token) != 0) {
4291 		printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4292 		error = -1;
4293 		goto free;
4294 	}
4295 	mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4296 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4297 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4298 	mbin->sq_cqn = htobe32(cqn);
4299 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4300 	mbin->sq_tis_num = htobe32(sc->sc_tisn);
4301 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4302 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4303 	mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4304 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4305 	    MCX_SQ_DOORBELL_OFFSET);
4306 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4307 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4308 
4309 	/* physical addresses follow the mailbox in data */
4310 	pas = (uint64_t *)(mbin + 1);
4311 	for (i = 0; i < npages; i++) {
4312 		pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_sq_mem) +
4313 		    (i * MCX_PAGE_SIZE));
4314 	}
4315 	mcx_cmdq_post(sc, cqe, 0);
4316 
4317 	error = mcx_cmdq_poll(sc, cqe, 1000);
4318 	if (error != 0) {
4319 		printf("%s: create sq timeout\n", DEVNAME(sc));
4320 		goto free;
4321 	}
4322 	if (mcx_cmdq_verify(cqe) != 0) {
4323 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
4324 		goto free;
4325 	}
4326 
4327 	out = mcx_cmdq_out(cqe);
4328 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4329 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4330 		    out->cmd_status, betoh32(out->cmd_syndrome));
4331 		error = -1;
4332 		goto free;
4333 	}
4334 
4335 	sc->sc_sqn = betoh32(out->cmd_sqn);
4336 
4337 	doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4338 	sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4339 free:
4340 	mcx_dmamem_free(sc, &mxm);
4341 	return (error);
4342 }
4343 
4344 static int
4345 mcx_destroy_sq(struct mcx_softc *sc)
4346 {
4347 	struct mcx_cmdq_entry *cqe;
4348 	struct mcx_cmd_destroy_sq_in *in;
4349 	struct mcx_cmd_destroy_sq_out *out;
4350 	int error;
4351 	int token;
4352 
4353 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4354 	token = mcx_cmdq_token(sc);
4355 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4356 
4357 	in = mcx_cmdq_in(cqe);
4358 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4359 	in->cmd_op_mod = htobe16(0);
4360 	in->cmd_sqn = htobe32(sc->sc_sqn);
4361 
4362 	mcx_cmdq_post(sc, cqe, 0);
4363 	error = mcx_cmdq_poll(sc, cqe, 1000);
4364 	if (error != 0) {
4365 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
4366 		return error;
4367 	}
4368 	if (mcx_cmdq_verify(cqe) != 0) {
4369 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4370 		return error;
4371 	}
4372 
4373 	out = mcx_cmdq_out(cqe);
4374 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4375 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4376 		    out->cmd_status, betoh32(out->cmd_syndrome));
4377 		return -1;
4378 	}
4379 
4380 	sc->sc_sqn = 0;
4381 	return 0;
4382 }
4383 
4384 static int
4385 mcx_ready_sq(struct mcx_softc *sc)
4386 {
4387 	struct mcx_cmdq_entry *cqe;
4388 	struct mcx_dmamem mxm;
4389 	struct mcx_cmd_modify_sq_in *in;
4390 	struct mcx_cmd_modify_sq_mb_in *mbin;
4391 	struct mcx_cmd_modify_sq_out *out;
4392 	int error;
4393 	int token;
4394 
4395 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4396 	token = mcx_cmdq_token(sc);
4397 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4398 
4399 	in = mcx_cmdq_in(cqe);
4400 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4401 	in->cmd_op_mod = htobe16(0);
4402 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4403 
4404 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4405 		printf("%s: unable to allocate modify sq mailbox\n",
4406 		    DEVNAME(sc));
4407 		return (-1);
4408 	}
4409 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4410 	mbin->cmd_sq_ctx.sq_flags = htobe32(
4411 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4412 
4413 	mcx_cmdq_mboxes_sign(&mxm, 1);
4414 	mcx_cmdq_post(sc, cqe, 0);
4415 	error = mcx_cmdq_poll(sc, cqe, 1000);
4416 	if (error != 0) {
4417 		printf("%s: modify sq timeout\n", DEVNAME(sc));
4418 		goto free;
4419 	}
4420 	if (mcx_cmdq_verify(cqe) != 0) {
4421 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4422 		goto free;
4423 	}
4424 
4425 	out = mcx_cmdq_out(cqe);
4426 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4427 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4428 		    out->cmd_status, betoh32(out->cmd_syndrome));
4429 		error = -1;
4430 		goto free;
4431 	}
4432 
4433 free:
4434 	mcx_dmamem_free(sc, &mxm);
4435 	return (error);
4436 }
4437 
4438 static int
4439 mcx_create_tis(struct mcx_softc *sc)
4440 {
4441 	struct mcx_cmdq_entry *cqe;
4442 	struct mcx_dmamem mxm;
4443 	struct mcx_cmd_create_tis_in *in;
4444 	struct mcx_cmd_create_tis_mb_in *mbin;
4445 	struct mcx_cmd_create_tis_out *out;
4446 	int error;
4447 	int token;
4448 
4449 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4450 	token = mcx_cmdq_token(sc);
4451 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4452 
4453 	in = mcx_cmdq_in(cqe);
4454 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4455 	in->cmd_op_mod = htobe16(0);
4456 
4457 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4458 		printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4459 		return (-1);
4460 	}
4461 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4462 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4463 
4464 	mcx_cmdq_mboxes_sign(&mxm, 1);
4465 	mcx_cmdq_post(sc, cqe, 0);
4466 	error = mcx_cmdq_poll(sc, cqe, 1000);
4467 	if (error != 0) {
4468 		printf("%s: create tis timeout\n", DEVNAME(sc));
4469 		goto free;
4470 	}
4471 	if (mcx_cmdq_verify(cqe) != 0) {
4472 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
4473 		goto free;
4474 	}
4475 
4476 	out = mcx_cmdq_out(cqe);
4477 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4478 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4479 		    out->cmd_status, betoh32(out->cmd_syndrome));
4480 		error = -1;
4481 		goto free;
4482 	}
4483 
4484 	sc->sc_tisn = betoh32(out->cmd_tisn);
4485 free:
4486 	mcx_dmamem_free(sc, &mxm);
4487 	return (error);
4488 }
4489 
4490 static int
4491 mcx_destroy_tis(struct mcx_softc *sc)
4492 {
4493 	struct mcx_cmdq_entry *cqe;
4494 	struct mcx_cmd_destroy_tis_in *in;
4495 	struct mcx_cmd_destroy_tis_out *out;
4496 	int error;
4497 	int token;
4498 
4499 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4500 	token = mcx_cmdq_token(sc);
4501 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4502 
4503 	in = mcx_cmdq_in(cqe);
4504 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4505 	in->cmd_op_mod = htobe16(0);
4506 	in->cmd_tisn = htobe32(sc->sc_tisn);
4507 
4508 	mcx_cmdq_post(sc, cqe, 0);
4509 	error = mcx_cmdq_poll(sc, cqe, 1000);
4510 	if (error != 0) {
4511 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
4512 		return error;
4513 	}
4514 	if (mcx_cmdq_verify(cqe) != 0) {
4515 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4516 		return error;
4517 	}
4518 
4519 	out = mcx_cmdq_out(cqe);
4520 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4521 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4522 		    out->cmd_status, betoh32(out->cmd_syndrome));
4523 		return -1;
4524 	}
4525 
4526 	sc->sc_tirn = 0;
4527 	return 0;
4528 }
4529 
4530 #if 0
4531 static int
4532 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4533 {
4534 	struct mcx_cmdq_entry *cqe;
4535 	struct mcx_cmd_alloc_flow_counter_in *in;
4536 	struct mcx_cmd_alloc_flow_counter_out *out;
4537 	int error;
4538 
4539 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4540 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4541 
4542 	in = mcx_cmdq_in(cqe);
4543 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4544 	in->cmd_op_mod = htobe16(0);
4545 
4546 	mcx_cmdq_post(sc, cqe, 0);
4547 
4548 	error = mcx_cmdq_poll(sc, cqe, 1000);
4549 	if (error != 0) {
4550 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4551 		return (-1);
4552 	}
4553 	if (mcx_cmdq_verify(cqe) != 0) {
4554 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4555 		return (-1);
4556 	}
4557 
4558 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4559 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4560 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4561 		    out->cmd_status);
4562 		return (-1);
4563 	}
4564 
4565 	sc->sc_flow_counter_id[i]  = betoh16(out->cmd_flow_counter_id);
4566 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4567 
4568 	return (0);
4569 }
4570 #endif
4571 
4572 static int
4573 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4574 {
4575 	struct mcx_cmdq_entry *cqe;
4576 	struct mcx_dmamem mxm;
4577 	struct mcx_cmd_create_flow_table_in *in;
4578 	struct mcx_cmd_create_flow_table_mb_in *mbin;
4579 	struct mcx_cmd_create_flow_table_out *out;
4580 	int error;
4581 	int token;
4582 
4583 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4584 	token = mcx_cmdq_token(sc);
4585 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4586 
4587 	in = mcx_cmdq_in(cqe);
4588 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4589 	in->cmd_op_mod = htobe16(0);
4590 
4591 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4592 		printf("%s: unable to allocate create flow table mailbox\n",
4593 		    DEVNAME(sc));
4594 		return (-1);
4595 	}
4596 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4597 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4598 	mbin->cmd_ctx.ft_log_size = log_size;
4599 
4600 	mcx_cmdq_mboxes_sign(&mxm, 1);
4601 	mcx_cmdq_post(sc, cqe, 0);
4602 	error = mcx_cmdq_poll(sc, cqe, 1000);
4603 	if (error != 0) {
4604 		printf("%s: create flow table timeout\n", DEVNAME(sc));
4605 		goto free;
4606 	}
4607 	if (mcx_cmdq_verify(cqe) != 0) {
4608 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4609 		goto free;
4610 	}
4611 
4612 	out = mcx_cmdq_out(cqe);
4613 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4614 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4615 		    out->cmd_status, betoh32(out->cmd_syndrome));
4616 		error = -1;
4617 		goto free;
4618 	}
4619 
4620 	sc->sc_flow_table_id = betoh32(out->cmd_table_id);
4621 free:
4622 	mcx_dmamem_free(sc, &mxm);
4623 	return (error);
4624 }
4625 
4626 static int
4627 mcx_set_flow_table_root(struct mcx_softc *sc)
4628 {
4629 	struct mcx_cmdq_entry *cqe;
4630 	struct mcx_dmamem mxm;
4631 	struct mcx_cmd_set_flow_table_root_in *in;
4632 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4633 	struct mcx_cmd_set_flow_table_root_out *out;
4634 	int error;
4635 	int token;
4636 
4637 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4638 	token = mcx_cmdq_token(sc);
4639 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4640 
4641 	in = mcx_cmdq_in(cqe);
4642 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4643 	in->cmd_op_mod = htobe16(0);
4644 
4645 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4646 		printf("%s: unable to allocate set flow table root mailbox\n",
4647 		    DEVNAME(sc));
4648 		return (-1);
4649 	}
4650 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4651 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4652 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4653 
4654 	mcx_cmdq_mboxes_sign(&mxm, 1);
4655 	mcx_cmdq_post(sc, cqe, 0);
4656 	error = mcx_cmdq_poll(sc, cqe, 1000);
4657 	if (error != 0) {
4658 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
4659 		goto free;
4660 	}
4661 	if (mcx_cmdq_verify(cqe) != 0) {
4662 		printf("%s: set flow table root command corrupt\n",
4663 		    DEVNAME(sc));
4664 		goto free;
4665 	}
4666 
4667 	out = mcx_cmdq_out(cqe);
4668 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4669 		printf("%s: set flow table root failed (%x, %x)\n",
4670 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
4671 		error = -1;
4672 		goto free;
4673 	}
4674 
4675 free:
4676 	mcx_dmamem_free(sc, &mxm);
4677 	return (error);
4678 }
4679 
4680 static int
4681 mcx_destroy_flow_table(struct mcx_softc *sc)
4682 {
4683 	struct mcx_cmdq_entry *cqe;
4684 	struct mcx_dmamem mxm;
4685 	struct mcx_cmd_destroy_flow_table_in *in;
4686 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
4687 	struct mcx_cmd_destroy_flow_table_out *out;
4688 	int error;
4689 	int token;
4690 
4691 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4692 	token = mcx_cmdq_token(sc);
4693 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4694 
4695 	in = mcx_cmdq_in(cqe);
4696 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4697 	in->cmd_op_mod = htobe16(0);
4698 
4699 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4700 		printf("%s: unable to allocate destroy flow table mailbox\n",
4701 		    DEVNAME(sc));
4702 		return (-1);
4703 	}
4704 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4705 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4706 	mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4707 
4708 	mcx_cmdq_mboxes_sign(&mxm, 1);
4709 	mcx_cmdq_post(sc, cqe, 0);
4710 	error = mcx_cmdq_poll(sc, cqe, 1000);
4711 	if (error != 0) {
4712 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4713 		goto free;
4714 	}
4715 	if (mcx_cmdq_verify(cqe) != 0) {
4716 		printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4717 		goto free;
4718 	}
4719 
4720 	out = mcx_cmdq_out(cqe);
4721 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4722 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4723 		    out->cmd_status, betoh32(out->cmd_syndrome));
4724 		error = -1;
4725 		goto free;
4726 	}
4727 
4728 	sc->sc_flow_table_id = -1;
4729 free:
4730 	mcx_dmamem_free(sc, &mxm);
4731 	return (error);
4732 }
4733 
4734 
4735 static int
4736 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4737     int match_enable, struct mcx_flow_match *match)
4738 {
4739 	struct mcx_cmdq_entry *cqe;
4740 	struct mcx_dmamem mxm;
4741 	struct mcx_cmd_create_flow_group_in *in;
4742 	struct mcx_cmd_create_flow_group_mb_in *mbin;
4743 	struct mcx_cmd_create_flow_group_out *out;
4744 	int error;
4745 	int token;
4746 
4747 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4748 	token = mcx_cmdq_token(sc);
4749 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4750 	    token);
4751 
4752 	in = mcx_cmdq_in(cqe);
4753 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4754 	in->cmd_op_mod = htobe16(0);
4755 
4756 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4757 	    != 0) {
4758 		printf("%s: unable to allocate create flow group mailbox\n",
4759 		    DEVNAME(sc));
4760 		return (-1);
4761 	}
4762 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4763 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4764 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4765 	mbin->cmd_start_flow_index = htobe32(start);
4766 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4767 
4768 	mbin->cmd_match_criteria_enable = match_enable;
4769 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4770 
4771 	mcx_cmdq_mboxes_sign(&mxm, 2);
4772 	mcx_cmdq_post(sc, cqe, 0);
4773 	error = mcx_cmdq_poll(sc, cqe, 1000);
4774 	if (error != 0) {
4775 		printf("%s: create flow group timeout\n", DEVNAME(sc));
4776 		goto free;
4777 	}
4778 	if (mcx_cmdq_verify(cqe) != 0) {
4779 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4780 		goto free;
4781 	}
4782 
4783 	out = mcx_cmdq_out(cqe);
4784 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4785 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4786 		    out->cmd_status, betoh32(out->cmd_syndrome));
4787 		error = -1;
4788 		goto free;
4789 	}
4790 
4791 	sc->sc_flow_group_id[group] = betoh32(out->cmd_group_id);
4792 	sc->sc_flow_group_size[group] = size;
4793 	sc->sc_flow_group_start[group] = start;
4794 
4795 free:
4796 	mcx_dmamem_free(sc, &mxm);
4797 	return (error);
4798 }
4799 
4800 static int
4801 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4802 {
4803 	struct mcx_cmdq_entry *cqe;
4804 	struct mcx_dmamem mxm;
4805 	struct mcx_cmd_destroy_flow_group_in *in;
4806 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
4807 	struct mcx_cmd_destroy_flow_group_out *out;
4808 	int error;
4809 	int token;
4810 
4811 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4812 	token = mcx_cmdq_token(sc);
4813 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4814 
4815 	in = mcx_cmdq_in(cqe);
4816 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4817 	in->cmd_op_mod = htobe16(0);
4818 
4819 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4820 		printf("%s: unable to allocate destroy flow group mailbox\n",
4821 		    DEVNAME(sc));
4822 		return (-1);
4823 	}
4824 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4825 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4826 	mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4827 	mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4828 
4829 	mcx_cmdq_mboxes_sign(&mxm, 2);
4830 	mcx_cmdq_post(sc, cqe, 0);
4831 	error = mcx_cmdq_poll(sc, cqe, 1000);
4832 	if (error != 0) {
4833 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4834 		goto free;
4835 	}
4836 	if (mcx_cmdq_verify(cqe) != 0) {
4837 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4838 		goto free;
4839 	}
4840 
4841 	out = mcx_cmdq_out(cqe);
4842 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4843 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4844 		    out->cmd_status, betoh32(out->cmd_syndrome));
4845 		error = -1;
4846 		goto free;
4847 	}
4848 
4849 	sc->sc_flow_group_id[group] = -1;
4850 	sc->sc_flow_group_size[group] = 0;
4851 free:
4852 	mcx_dmamem_free(sc, &mxm);
4853 	return (error);
4854 }
4855 
4856 static int
4857 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4858     uint8_t *macaddr)
4859 {
4860 	struct mcx_cmdq_entry *cqe;
4861 	struct mcx_dmamem mxm;
4862 	struct mcx_cmd_set_flow_table_entry_in *in;
4863 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4864 	struct mcx_cmd_set_flow_table_entry_out *out;
4865 	uint32_t *dest;
4866 	int error;
4867 	int token;
4868 
4869 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4870 	token = mcx_cmdq_token(sc);
4871 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4872 	    sizeof(*out), token);
4873 
4874 	in = mcx_cmdq_in(cqe);
4875 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4876 	in->cmd_op_mod = htobe16(0);
4877 
4878 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4879 	    != 0) {
4880 		printf("%s: unable to allocate set flow table entry mailbox\n",
4881 		    DEVNAME(sc));
4882 		return (-1);
4883 	}
4884 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4885 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4886 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4887 	mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4888 	mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
4889 
4890 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
4891 	dest = (uint32_t *)
4892 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
4893 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
4894 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
4895 	*dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
4896 
4897 	/* the only thing we match on at the moment is the dest mac address */
4898 	if (macaddr != NULL) {
4899 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
4900 		    ETHER_ADDR_LEN);
4901 	}
4902 
4903 	mcx_cmdq_mboxes_sign(&mxm, 2);
4904 	mcx_cmdq_post(sc, cqe, 0);
4905 	error = mcx_cmdq_poll(sc, cqe, 1000);
4906 	if (error != 0) {
4907 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
4908 		goto free;
4909 	}
4910 	if (mcx_cmdq_verify(cqe) != 0) {
4911 		printf("%s: set flow table entry command corrupt\n",
4912 		    DEVNAME(sc));
4913 		goto free;
4914 	}
4915 
4916 	out = mcx_cmdq_out(cqe);
4917 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4918 		printf("%s: set flow table entry failed (%x, %x)\n",
4919 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
4920 		error = -1;
4921 		goto free;
4922 	}
4923 
4924 free:
4925 	mcx_dmamem_free(sc, &mxm);
4926 	return (error);
4927 }
4928 
4929 static int
4930 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
4931 {
4932 	struct mcx_cmdq_entry *cqe;
4933 	struct mcx_dmamem mxm;
4934 	struct mcx_cmd_delete_flow_table_entry_in *in;
4935 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
4936 	struct mcx_cmd_delete_flow_table_entry_out *out;
4937 	int error;
4938 	int token;
4939 
4940 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4941 	token = mcx_cmdq_token(sc);
4942 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4943 	    token);
4944 
4945 	in = mcx_cmdq_in(cqe);
4946 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
4947 	in->cmd_op_mod = htobe16(0);
4948 
4949 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4950 		printf("%s: unable to allocate delete flow table entry mailbox\n",
4951 		    DEVNAME(sc));
4952 		return (-1);
4953 	}
4954 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4955 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4956 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4957 	mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4958 
4959 	mcx_cmdq_mboxes_sign(&mxm, 2);
4960 	mcx_cmdq_post(sc, cqe, 0);
4961 	error = mcx_cmdq_poll(sc, cqe, 1000);
4962 	if (error != 0) {
4963 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
4964 		goto free;
4965 	}
4966 	if (mcx_cmdq_verify(cqe) != 0) {
4967 		printf("%s: delete flow table entry command corrupt\n",
4968 		    DEVNAME(sc));
4969 		goto free;
4970 	}
4971 
4972 	out = mcx_cmdq_out(cqe);
4973 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4974 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
4975 		    DEVNAME(sc), group, index, out->cmd_status,
4976 		    betoh32(out->cmd_syndrome));
4977 		error = -1;
4978 		goto free;
4979 	}
4980 
4981 free:
4982 	mcx_dmamem_free(sc, &mxm);
4983 	return (error);
4984 }
4985 
4986 #if 0
4987 int
4988 mcx_dump_flow_table(struct mcx_softc *sc)
4989 {
4990 	struct mcx_dmamem mxm;
4991 	struct mcx_cmdq_entry *cqe;
4992 	struct mcx_cmd_query_flow_table_in *in;
4993 	struct mcx_cmd_query_flow_table_mb_in *mbin;
4994 	struct mcx_cmd_query_flow_table_out *out;
4995 	struct mcx_cmd_query_flow_table_mb_out *mbout;
4996 	uint8_t token = mcx_cmdq_token(sc);
4997 	int error;
4998 	int i;
4999 	uint8_t *dump;
5000 
5001 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5002 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5003 	    sizeof(*out) + sizeof(*mbout) + 16, token);
5004 
5005 	in = mcx_cmdq_in(cqe);
5006 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5007 	in->cmd_op_mod = htobe16(0);
5008 
5009 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5010 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5011 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5012 	    &cqe->cq_output_ptr, token) != 0) {
5013 		printf(", unable to allocate query flow table mailboxes\n");
5014 		return (-1);
5015 	}
5016 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5017 
5018 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5019 	mbin->cmd_table_type = 0;
5020 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5021 
5022 	mcx_cmdq_mboxes_sign(&mxm, 1);
5023 
5024 	mcx_cmdq_post(sc, cqe, 0);
5025 	error = mcx_cmdq_poll(sc, cqe, 1000);
5026 	if (error != 0) {
5027 		printf("%s: query flow table timeout\n", DEVNAME(sc));
5028 		goto free;
5029 	}
5030 	error = mcx_cmdq_verify(cqe);
5031 	if (error != 0) {
5032 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5033 		goto free;
5034 	}
5035 
5036 	out = mcx_cmdq_out(cqe);
5037 	switch (out->cmd_status) {
5038 	case MCX_CQ_STATUS_OK:
5039 		break;
5040 	default:
5041 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5042 		    out->cmd_status, betoh32(out->cmd_syndrome));
5043 		error = -1;
5044 		goto free;
5045 	}
5046 
5047         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5048 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5049 	dump = (uint8_t *)mbout + 8;
5050 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5051 		printf("%.2x ", dump[i]);
5052 		if (i % 16 == 15)
5053 			printf("\n");
5054 	}
5055 free:
5056 	mcx_cq_mboxes_free(sc, &mxm);
5057 	return (error);
5058 }
5059 int
5060 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5061 {
5062 	struct mcx_dmamem mxm;
5063 	struct mcx_cmdq_entry *cqe;
5064 	struct mcx_cmd_query_flow_table_entry_in *in;
5065 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5066 	struct mcx_cmd_query_flow_table_entry_out *out;
5067 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5068 	uint8_t token = mcx_cmdq_token(sc);
5069 	int error;
5070 	int i;
5071 	uint8_t *dump;
5072 
5073 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5074 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5075 	    sizeof(*out) + sizeof(*mbout) + 16, token);
5076 
5077 	in = mcx_cmdq_in(cqe);
5078 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5079 	in->cmd_op_mod = htobe16(0);
5080 
5081 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5082 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5083 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5084 	    &cqe->cq_output_ptr, token) != 0) {
5085 		printf(", unable to allocate query flow table entry mailboxes\n");
5086 		return (-1);
5087 	}
5088 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5089 
5090 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5091 	mbin->cmd_table_type = 0;
5092 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5093 	mbin->cmd_flow_index = htobe32(index);
5094 
5095 	mcx_cmdq_mboxes_sign(&mxm, 1);
5096 
5097 	mcx_cmdq_post(sc, cqe, 0);
5098 	error = mcx_cmdq_poll(sc, cqe, 1000);
5099 	if (error != 0) {
5100 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5101 		goto free;
5102 	}
5103 	error = mcx_cmdq_verify(cqe);
5104 	if (error != 0) {
5105 		printf("%s: query flow table entry reply corrupt\n",
5106 		    DEVNAME(sc));
5107 		goto free;
5108 	}
5109 
5110 	out = mcx_cmdq_out(cqe);
5111 	switch (out->cmd_status) {
5112 	case MCX_CQ_STATUS_OK:
5113 		break;
5114 	default:
5115 		printf("%s: query flow table entry failed (%x/%x)\n",
5116 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5117 		error = -1;
5118 		goto free;
5119 	}
5120 
5121         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5122 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5123 	dump = (uint8_t *)mbout;
5124 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5125 		printf("%.2x ", dump[i]);
5126 		if (i % 16 == 15)
5127 			printf("\n");
5128 	}
5129 
5130 free:
5131 	mcx_cq_mboxes_free(sc, &mxm);
5132 	return (error);
5133 }
5134 
5135 int
5136 mcx_dump_flow_group(struct mcx_softc *sc)
5137 {
5138 	struct mcx_dmamem mxm;
5139 	struct mcx_cmdq_entry *cqe;
5140 	struct mcx_cmd_query_flow_group_in *in;
5141 	struct mcx_cmd_query_flow_group_mb_in *mbin;
5142 	struct mcx_cmd_query_flow_group_out *out;
5143 	struct mcx_cmd_query_flow_group_mb_out *mbout;
5144 	uint8_t token = mcx_cmdq_token(sc);
5145 	int error;
5146 	int i;
5147 	uint8_t *dump;
5148 
5149 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5150 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5151 	    sizeof(*out) + sizeof(*mbout) + 16, token);
5152 
5153 	in = mcx_cmdq_in(cqe);
5154 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5155 	in->cmd_op_mod = htobe16(0);
5156 
5157 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5158 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5159 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5160 	    &cqe->cq_output_ptr, token) != 0) {
5161 		printf(", unable to allocate query flow group mailboxes\n");
5162 		return (-1);
5163 	}
5164 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5165 
5166 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5167 	mbin->cmd_table_type = 0;
5168 	mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5169 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5170 
5171 	mcx_cmdq_mboxes_sign(&mxm, 1);
5172 
5173 	mcx_cmdq_post(sc, cqe, 0);
5174 	error = mcx_cmdq_poll(sc, cqe, 1000);
5175 	if (error != 0) {
5176 		printf("%s: query flow group timeout\n", DEVNAME(sc));
5177 		goto free;
5178 	}
5179 	error = mcx_cmdq_verify(cqe);
5180 	if (error != 0) {
5181 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5182 		goto free;
5183 	}
5184 
5185 	out = mcx_cmdq_out(cqe);
5186 	switch (out->cmd_status) {
5187 	case MCX_CQ_STATUS_OK:
5188 		break;
5189 	default:
5190 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5191 		    out->cmd_status, betoh32(out->cmd_syndrome));
5192 		error = -1;
5193 		goto free;
5194 	}
5195 
5196         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5197 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5198 	dump = (uint8_t *)mbout;
5199 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5200 		printf("%.2x ", dump[i]);
5201 		if (i % 16 == 15)
5202 			printf("\n");
5203 	}
5204 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5205 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5206 		printf("%.2x ", dump[i]);
5207 		if (i % 16 == 15)
5208 			printf("\n");
5209 	}
5210 
5211 free:
5212 	mcx_cq_mboxes_free(sc, &mxm);
5213 	return (error);
5214 }
5215 
5216 int
5217 mcx_dump_rq(struct mcx_softc *sc)
5218 {
5219 	struct mcx_dmamem mxm;
5220 	struct mcx_cmdq_entry *cqe;
5221 	struct mcx_cmd_query_rq_in *in;
5222 	struct mcx_cmd_query_rq_out *out;
5223 	struct mcx_cmd_query_rq_mb_out *mbout;
5224 	uint8_t token = mcx_cmdq_token(sc);
5225 	int error;
5226 
5227 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5228 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5229 	    token);
5230 
5231 	in = mcx_cmdq_in(cqe);
5232 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5233 	in->cmd_op_mod = htobe16(0);
5234 	in->cmd_rqn = htobe32(sc->sc_rqn);
5235 
5236 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5237 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5238 	    &cqe->cq_output_ptr, token) != 0) {
5239 		printf(", unable to allocate query flow group mailboxes\n");
5240 		return (-1);
5241 	}
5242 
5243 	mcx_cmdq_mboxes_sign(&mxm, 1);
5244 
5245 	mcx_cmdq_post(sc, cqe, 0);
5246 	error = mcx_cmdq_poll(sc, cqe, 1000);
5247 	if (error != 0) {
5248 		printf("%s: query rq timeout\n", DEVNAME(sc));
5249 		goto free;
5250 	}
5251 	error = mcx_cmdq_verify(cqe);
5252 	if (error != 0) {
5253 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5254 		goto free;
5255 	}
5256 
5257 	out = mcx_cmdq_out(cqe);
5258 	switch (out->cmd_status) {
5259 	case MCX_CQ_STATUS_OK:
5260 		break;
5261 	default:
5262 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5263 		    out->cmd_status, betoh32(out->cmd_syndrome));
5264 		error = -1;
5265 		goto free;
5266 	}
5267 
5268         mbout = (struct mcx_cmd_query_rq_mb_out *)
5269 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5270 	printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5271 	    DEVNAME(sc),
5272 	    (betoh32(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5273 	    betoh32(mbout->cmd_ctx.rq_user_index),
5274 	    betoh32(mbout->cmd_ctx.rq_cqn),
5275 	    betoh16(mbout->cmd_ctx.rq_wq.wq_log_stride),
5276 	    mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5277 	    mbout->cmd_ctx.rq_wq.wq_log_size,
5278 	    betoh32(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5279 	    betoh32(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5280 
5281 free:
5282 	mcx_cq_mboxes_free(sc, &mxm);
5283 	return (error);
5284 }
5285 
5286 int
5287 mcx_dump_sq(struct mcx_softc *sc)
5288 {
5289 	struct mcx_dmamem mxm;
5290 	struct mcx_cmdq_entry *cqe;
5291 	struct mcx_cmd_query_sq_in *in;
5292 	struct mcx_cmd_query_sq_out *out;
5293 	struct mcx_cmd_query_sq_mb_out *mbout;
5294 	uint8_t token = mcx_cmdq_token(sc);
5295 	int error;
5296 	int i;
5297 	uint8_t *dump;
5298 
5299 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5300 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5301 	    token);
5302 
5303 	in = mcx_cmdq_in(cqe);
5304 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5305 	in->cmd_op_mod = htobe16(0);
5306 	in->cmd_sqn = htobe32(sc->sc_sqn);
5307 
5308 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5309 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5310 	    &cqe->cq_output_ptr, token) != 0) {
5311 		printf(", unable to allocate query sq mailboxes\n");
5312 		return (-1);
5313 	}
5314 
5315 	mcx_cmdq_mboxes_sign(&mxm, 1);
5316 
5317 	mcx_cmdq_post(sc, cqe, 0);
5318 	error = mcx_cmdq_poll(sc, cqe, 1000);
5319 	if (error != 0) {
5320 		printf("%s: query sq timeout\n", DEVNAME(sc));
5321 		goto free;
5322 	}
5323 	error = mcx_cmdq_verify(cqe);
5324 	if (error != 0) {
5325 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5326 		goto free;
5327 	}
5328 
5329 	out = mcx_cmdq_out(cqe);
5330 	switch (out->cmd_status) {
5331 	case MCX_CQ_STATUS_OK:
5332 		break;
5333 	default:
5334 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5335 		    out->cmd_status, betoh32(out->cmd_syndrome));
5336 		error = -1;
5337 		goto free;
5338 	}
5339 
5340         mbout = (struct mcx_cmd_query_sq_mb_out *)
5341 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5342 /*
5343 	printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5344 	    DEVNAME(sc),
5345 	    (betoh32(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5346 	    betoh32(mbout->cmd_ctx.rq_user_index),
5347 	    betoh32(mbout->cmd_ctx.rq_cqn),
5348 	    betoh16(mbout->cmd_ctx.rq_wq.wq_log_stride),
5349 	    mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5350 	    mbout->cmd_ctx.rq_wq.wq_log_size,
5351 	    betoh32(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5352 	    betoh32(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5353 */
5354 	dump = (uint8_t *)mbout;
5355 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5356 		printf("%.2x ", dump[i]);
5357 		if (i % 16 == 15)
5358 			printf("\n");
5359 	}
5360 
5361 free:
5362 	mcx_cq_mboxes_free(sc, &mxm);
5363 	return (error);
5364 }
5365 
5366 static int
5367 mcx_dump_counters(struct mcx_softc *sc)
5368 {
5369 	struct mcx_dmamem mxm;
5370 	struct mcx_cmdq_entry *cqe;
5371 	struct mcx_cmd_query_vport_counters_in *in;
5372 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
5373 	struct mcx_cmd_query_vport_counters_out *out;
5374 	struct mcx_nic_vport_counters *counters;
5375 	int error, token;
5376 
5377 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5378 	token = mcx_cmdq_token(sc);
5379 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5380 	    sizeof(*out) + sizeof(*counters), token);
5381 
5382 	in = mcx_cmdq_in(cqe);
5383 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5384 	in->cmd_op_mod = htobe16(0);
5385 
5386 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5387 		printf(", unable to allocate query nic vport counters mailboxen\n");
5388 		return (-1);
5389 	}
5390 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5391 
5392 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5393 	mbin->cmd_clear = 0x80;
5394 
5395 	mcx_cmdq_mboxes_sign(&mxm, 1);
5396 	mcx_cmdq_post(sc, cqe, 0);
5397 
5398 	error = mcx_cmdq_poll(sc, cqe, 1000);
5399 	if (error != 0) {
5400 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5401 		goto free;
5402 	}
5403 	if (mcx_cmdq_verify(cqe) != 0) {
5404 		printf("%s: query nic vport counters command corrupt\n",
5405 		    DEVNAME(sc));
5406 		goto free;
5407 	}
5408 
5409 	out = mcx_cmdq_out(cqe);
5410 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5411 		printf("%s: query nic vport counters failed (%x, %x)\n",
5412 		    DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5413 		error = -1;
5414 		goto free;
5415 	}
5416 
5417 	counters = (struct mcx_nic_vport_counters *)
5418 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5419 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5420 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
5421 	    counters->rx_err.packets + counters->tx_err.packets)
5422 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5423 		    DEVNAME(sc),
5424 		    betoh64(counters->tx_err.packets),
5425 		    betoh64(counters->rx_err.packets),
5426 		    betoh64(counters->tx_ucast.packets),
5427 		    betoh64(counters->rx_ucast.packets),
5428 		    betoh64(counters->tx_bcast.packets),
5429 		    betoh64(counters->rx_bcast.packets));
5430 free:
5431 	mcx_dmamem_free(sc, &mxm);
5432 
5433 	return (error);
5434 }
5435 
5436 static int
5437 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5438 {
5439 	struct mcx_dmamem mxm;
5440 	struct mcx_cmdq_entry *cqe;
5441 	struct mcx_cmd_query_flow_counter_in *in;
5442 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
5443 	struct mcx_cmd_query_flow_counter_out *out;
5444 	struct mcx_counter *counters;
5445 	int error, token;
5446 
5447 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5448 	token = mcx_cmdq_token(sc);
5449 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5450 	    sizeof(*counters), token);
5451 
5452 	in = mcx_cmdq_in(cqe);
5453 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5454 	in->cmd_op_mod = htobe16(0);
5455 
5456 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5457 		printf(", unable to allocate query flow counter mailboxen\n");
5458 		return (-1);
5459 	}
5460 	cqe->cq_input_ptr = cqe->cq_output_ptr;
5461 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5462 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5463 	mbin->cmd_clear = 0x80;
5464 
5465 	mcx_cmdq_mboxes_sign(&mxm, 1);
5466 	mcx_cmdq_post(sc, cqe, 0);
5467 
5468 	error = mcx_cmdq_poll(sc, cqe, 1000);
5469 	if (error != 0) {
5470 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
5471 		goto free;
5472 	}
5473 	if (mcx_cmdq_verify(cqe) != 0) {
5474 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5475 		goto free;
5476 	}
5477 
5478 	out = mcx_cmdq_out(cqe);
5479 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5480 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5481 		    out->cmd_status, out->cmd_syndrome);
5482 		error = -1;
5483 		goto free;
5484 	}
5485 
5486 	counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5487 	if (counters->packets)
5488 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5489 		    betoh64(counters->packets));
5490 free:
5491 	mcx_dmamem_free(sc, &mxm);
5492 
5493 	return (error);
5494 }
5495 
5496 #endif
5497 
5498 int
5499 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5500     uint *prod, int bufsize, uint nslots)
5501 {
5502 	struct mcx_rq_entry *rqe;
5503 	struct mcx_slot *ms;
5504 	struct mbuf *m;
5505 	uint slot, p, fills;
5506 
5507 	p = *prod;
5508 	slot = (p % (1 << MCX_LOG_RQ_SIZE));
5509 	rqe = ring;
5510 	for (fills = 0; fills < nslots; fills++) {
5511 		ms = &slots[slot];
5512 		m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5513 		if (m == NULL)
5514 			break;
5515 
5516 		m->m_data += ETHER_ALIGN;
5517 		m->m_len = m->m_pkthdr.len = bufsize;
5518 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5519 		    BUS_DMA_NOWAIT) != 0) {
5520 			m_freem(m);
5521 			break;
5522 		}
5523 		ms->ms_m = m;
5524 
5525 		rqe[slot].rqe_byte_count = htobe32(bufsize);
5526 		rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5527 		rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5528 
5529 		p++;
5530 		slot++;
5531 		if (slot == (1 << MCX_LOG_RQ_SIZE))
5532 			slot = 0;
5533 	}
5534 
5535 	if (fills != 0) {
5536 		*sc->sc_rx_doorbell = htobe32(p);
5537 		/* barrier? */
5538 	}
5539 
5540 	*prod = p;
5541 
5542 	return (nslots - fills);
5543 }
5544 
5545 int
5546 mcx_rx_fill(struct mcx_softc *sc)
5547 {
5548 	u_int slots;
5549 
5550 	slots = if_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5551 	if (slots == 0)
5552 		return (1);
5553 
5554 	slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5555 	    sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5556 	if_rxr_put(&sc->sc_rxr, slots);
5557 	return (0);
5558 }
5559 
5560 void
5561 mcx_refill(void *xsc)
5562 {
5563 	struct mcx_softc *sc = xsc;
5564 
5565 	mcx_rx_fill(sc);
5566 
5567 	if (sc->sc_rx_cons == sc->sc_rx_prod)
5568 		timeout_add(&sc->sc_rx_refill, 1);
5569 }
5570 
5571 void
5572 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5573 {
5574 	struct mcx_slot *ms;
5575 	bus_dmamap_t map;
5576 	int slot, slots;
5577 
5578 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5579 
5580 	ms = &sc->sc_tx_slots[slot];
5581 	map = ms->ms_map;
5582 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5583 	    BUS_DMASYNC_POSTWRITE);
5584 
5585 	slots = 1;
5586 	if (map->dm_nsegs > 1)
5587 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5588 
5589 	(*txfree) += slots;
5590 	bus_dmamap_unload(sc->sc_dmat, map);
5591 	m_freem(ms->ms_m);
5592 	ms->ms_m = NULL;
5593 }
5594 
5595 static uint64_t
5596 mcx_uptime(void)
5597 {
5598 	struct timespec ts;
5599 
5600 	nanouptime(&ts);
5601 
5602 	return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5603 }
5604 
5605 static void
5606 mcx_calibrate_first(struct mcx_softc *sc)
5607 {
5608 	struct mcx_calibration *c = &sc->sc_calibration[0];
5609 
5610 	sc->sc_calibration_gen = 0;
5611 
5612 	c->c_ubase = mcx_uptime();
5613 	c->c_tbase = mcx_timer(sc);
5614 	c->c_tdiff = 0;
5615 
5616 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_FIRST);
5617 }
5618 
5619 #define MCX_TIMESTAMP_SHIFT 10
5620 
5621 static void
5622 mcx_calibrate(void *arg)
5623 {
5624 	struct mcx_softc *sc = arg;
5625 	struct mcx_calibration *nc, *pc;
5626 	unsigned int gen;
5627 
5628 	if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING))
5629 		return;
5630 
5631 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL);
5632 
5633 	gen = sc->sc_calibration_gen;
5634 	pc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
5635 	gen++;
5636 	nc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
5637 
5638 	nc->c_uptime = pc->c_ubase;
5639 	nc->c_timestamp = pc->c_tbase;
5640 
5641 	nc->c_ubase = mcx_uptime();
5642 	nc->c_tbase = mcx_timer(sc);
5643 
5644 	nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5645 	nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5646 
5647 	membar_producer();
5648 	sc->sc_calibration_gen = gen;
5649 }
5650 
5651 static int
5652 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5653     struct mbuf_list *ml, const struct mcx_calibration *c)
5654 {
5655 	struct mcx_slot *ms;
5656 	struct mbuf *m;
5657 	int slot;
5658 
5659 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5660 
5661 	ms = &sc->sc_rx_slots[slot];
5662 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5663 	    BUS_DMASYNC_POSTREAD);
5664 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5665 
5666 	m = ms->ms_m;
5667 	ms->ms_m = NULL;
5668 
5669 	m->m_pkthdr.len = m->m_len = bemtoh32(&cqe->cq_byte_cnt);
5670 
5671 	if (cqe->cq_rx_hash_type) {
5672 		m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5673 		    betoh32(cqe->cq_rx_hash);
5674 	}
5675 
5676 	if (c->c_tdiff) {
5677 		uint64_t t = bemtoh64(&cqe->cq_timestamp) - c->c_timestamp;
5678 		t *= c->c_udiff;
5679 		t /= c->c_tdiff;
5680 
5681 		m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5682 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5683 	}
5684 
5685 	ml_enqueue(ml, m);
5686 
5687 	return (1);
5688 }
5689 
5690 static struct mcx_cq_entry *
5691 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5692 {
5693 	struct mcx_cq_entry *cqe;
5694 	int next;
5695 
5696 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5697 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5698 
5699 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5700 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5701 		return (&cqe[next]);
5702 	}
5703 
5704 	return (NULL);
5705 }
5706 
5707 static void
5708 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5709 {
5710 	bus_size_t offset;
5711 	uint32_t val;
5712 	uint64_t uval;
5713 
5714 	/* different uar per cq? */
5715 	offset = (MCX_PAGE_SIZE * sc->sc_uar);
5716 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5717 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5718 
5719 	cq->cq_doorbell[0] = htobe32(cq->cq_cons);
5720 	cq->cq_doorbell[1] = htobe32(val);
5721 
5722 	uval = val;
5723 	uval <<= 32;
5724 	uval |= cq->cq_n;
5725 	bus_space_write_raw_8(sc->sc_memt, sc->sc_memh,
5726 	    offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5727 	mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5728 	    BUS_SPACE_BARRIER_WRITE);
5729 }
5730 
5731 void
5732 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5733 {
5734 	struct ifnet *ifp = &sc->sc_ac.ac_if;
5735 	const struct mcx_calibration *c;
5736 	unsigned int gen;
5737 	struct mcx_cq_entry *cqe;
5738 	uint8_t *cqp;
5739 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
5740 	int rxfree, txfree;
5741 
5742 	gen = sc->sc_calibration_gen;
5743 	membar_consumer();
5744 	c = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
5745 
5746 	rxfree = 0;
5747 	txfree = 0;
5748 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
5749 		uint8_t opcode;
5750 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5751 		switch (opcode) {
5752 		case MCX_CQ_ENTRY_OPCODE_REQ:
5753 			mcx_process_txeof(sc, cqe, &txfree);
5754 			break;
5755 		case MCX_CQ_ENTRY_OPCODE_SEND:
5756 			rxfree += mcx_process_rx(sc, cqe, &ml, c);
5757 			break;
5758 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5759 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5760 			cqp = (uint8_t *)cqe;
5761 			/* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5762 			break;
5763 
5764 		default:
5765 			/* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5766 			break;
5767 		}
5768 
5769 		cq->cq_cons++;
5770 	}
5771 
5772 	cq->cq_count++;
5773 	mcx_arm_cq(sc, cq);
5774 
5775 	if (rxfree > 0) {
5776 		if_rxr_put(&sc->sc_rxr, rxfree);
5777 		if (ifiq_input(&sc->sc_ac.ac_if.if_rcv, &ml))
5778 			if_rxr_livelocked(&sc->sc_rxr);
5779 
5780 		mcx_rx_fill(sc);
5781 		/* timeout if full somehow */
5782 	}
5783 	if (txfree > 0) {
5784 		sc->sc_tx_cons += txfree;
5785 		if (ifq_is_oactive(&ifp->if_snd))
5786 			ifq_restart(&ifp->if_snd);
5787 	}
5788 }
5789 
5790 static void
5791 mcx_arm_eq(struct mcx_softc *sc)
5792 {
5793 	bus_size_t offset;
5794 	uint32_t val;
5795 
5796 	offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5797 	val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5798 
5799 	mcx_wr(sc, offset, val);
5800 	/* barrier? */
5801 }
5802 
5803 static struct mcx_eq_entry *
5804 mcx_next_eq_entry(struct mcx_softc *sc)
5805 {
5806 	struct mcx_eq_entry *eqe;
5807 	int next;
5808 
5809 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5810 	next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5811 	if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5812 		sc->sc_eq_cons++;
5813 		return (&eqe[next]);
5814 	}
5815 	return (NULL);
5816 }
5817 
5818 int
5819 mcx_intr(void *xsc)
5820 {
5821 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
5822 	struct mcx_eq_entry *eqe;
5823 	int i, cq;
5824 
5825 	while ((eqe = mcx_next_eq_entry(sc))) {
5826 		switch (eqe->eq_event_type) {
5827 		case MCX_EVENT_TYPE_COMPLETION:
5828 			cq = betoh32(eqe->eq_event_data[6]);
5829 			for (i = 0; i < sc->sc_num_cq; i++) {
5830 				if (sc->sc_cq[i].cq_n == cq) {
5831 					mcx_process_cq(sc, &sc->sc_cq[i]);
5832 					break;
5833 				}
5834 			}
5835 			break;
5836 
5837 		case MCX_EVENT_TYPE_LAST_WQE:
5838 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5839 			break;
5840 
5841 		case MCX_EVENT_TYPE_CQ_ERROR:
5842 			/* printf("%s: cq error\n", DEVNAME(sc)); */
5843 			break;
5844 
5845 		case MCX_EVENT_TYPE_CMD_COMPLETION:
5846 			/* wakeup probably */
5847 			break;
5848 
5849 		case MCX_EVENT_TYPE_PORT_CHANGE:
5850 			task_add(systq, &sc->sc_port_change);
5851 			break;
5852 
5853 		default:
5854 			/* printf("%s: something happened\n", DEVNAME(sc)); */
5855 			break;
5856 		}
5857 	}
5858 	mcx_arm_eq(sc);
5859 	return (1);
5860 }
5861 
5862 static void
5863 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
5864     int total)
5865 {
5866 	struct mcx_slot *ms;
5867 
5868 	int i = allocated;
5869 	while (i-- > 0) {
5870 		ms = &slots[i];
5871 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
5872 		if (ms->ms_m != NULL)
5873 			m_freem(ms->ms_m);
5874 	}
5875 	free(slots, M_DEVBUF, total * sizeof(*ms));
5876 }
5877 
5878 static void
5879 mcx_up(struct mcx_softc *sc)
5880 {
5881 	struct ifnet *ifp = &sc->sc_ac.ac_if;
5882 	struct mcx_slot *ms;
5883 	int i, start;
5884 	struct mcx_flow_match match_crit;
5885 
5886 	sc->sc_rx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_RQ_SIZE),
5887 	    M_DEVBUF, M_WAITOK | M_ZERO);
5888 	if (sc->sc_rx_slots == NULL) {
5889 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
5890 		return;
5891 	}
5892 
5893 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
5894 		ms = &sc->sc_rx_slots[i];
5895 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
5896 		    sc->sc_hardmtu, 0,
5897 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5898 		    &ms->ms_map) != 0) {
5899 			printf("%s: failed to allocate rx dma maps\n",
5900 			    DEVNAME(sc));
5901 			goto destroy_rx_slots;
5902 		}
5903 	}
5904 
5905 	sc->sc_tx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_SQ_SIZE),
5906 	    M_DEVBUF, M_WAITOK | M_ZERO);
5907 	if (sc->sc_tx_slots == NULL) {
5908 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
5909 		goto destroy_rx_slots;
5910 	}
5911 
5912 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
5913 		ms = &sc->sc_tx_slots[i];
5914 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
5915 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
5916 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5917 		    &ms->ms_map) != 0) {
5918 			printf("%s: failed to allocate tx dma maps\n",
5919 			    DEVNAME(sc));
5920 			goto destroy_tx_slots;
5921 		}
5922 	}
5923 
5924 	if (mcx_create_cq(sc, sc->sc_eqn) != 0)
5925 		goto down;
5926 
5927 	/* send queue */
5928 	if (mcx_create_tis(sc) != 0)
5929 		goto down;
5930 
5931 	if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
5932 		goto down;
5933 
5934 	/* receive queue */
5935 	if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
5936 		goto down;
5937 
5938 	if (mcx_create_tir(sc) != 0)
5939 		goto down;
5940 
5941 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
5942 		goto down;
5943 
5944 	/* promisc flow group */
5945 	start = 0;
5946 	memset(&match_crit, 0, sizeof(match_crit));
5947 	if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
5948 	    0, &match_crit) != 0)
5949 		goto down;
5950 	sc->sc_promisc_flow_enabled = 0;
5951 	start++;
5952 
5953 	/* all multicast flow group */
5954 	match_crit.mc_dest_mac[0] = 0x01;
5955 	if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
5956 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
5957 		goto down;
5958 	sc->sc_allmulti_flow_enabled = 0;
5959 	start++;
5960 
5961 	/* mac address matching flow group */
5962 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
5963 	if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
5964 	    (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
5965 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
5966 		goto down;
5967 
5968 	/* flow table entries for unicast and broadcast */
5969 	start = 0;
5970 	if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
5971 	    sc->sc_ac.ac_enaddr) != 0)
5972 		goto down;
5973 	start++;
5974 
5975 	if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
5976 	    etherbroadcastaddr) != 0)
5977 		goto down;
5978 	start++;
5979 
5980 	/* multicast entries go after that */
5981 	sc->sc_mcast_flow_base = start;
5982 
5983 	/* re-add any existing multicast flows */
5984 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
5985 		if (sc->sc_mcast_flows[i][0] != 0) {
5986 			mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
5987 			    sc->sc_mcast_flow_base + i,
5988 			    sc->sc_mcast_flows[i]);
5989 		}
5990 	}
5991 
5992 	if (mcx_set_flow_table_root(sc) != 0)
5993 		goto down;
5994 
5995 	/* start the queues */
5996 	if (mcx_ready_sq(sc) != 0)
5997 		goto down;
5998 
5999 	if (mcx_ready_rq(sc) != 0)
6000 		goto down;
6001 
6002 	if_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6003 	sc->sc_rx_cons = 0;
6004 	sc->sc_rx_prod = 0;
6005 	mcx_rx_fill(sc);
6006 
6007 	mcx_calibrate_first(sc);
6008 
6009 	SET(ifp->if_flags, IFF_RUNNING);
6010 
6011 	sc->sc_tx_cons = 0;
6012 	sc->sc_tx_prod = 0;
6013 	ifq_clr_oactive(&ifp->if_snd);
6014 	ifq_restart(&ifp->if_snd);
6015 
6016 	return;
6017 destroy_tx_slots:
6018 	mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6019 	sc->sc_rx_slots = NULL;
6020 
6021 	i = (1 << MCX_LOG_RQ_SIZE);
6022 destroy_rx_slots:
6023 	mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6024 	sc->sc_rx_slots = NULL;
6025 down:
6026 	mcx_down(sc);
6027 }
6028 
6029 static void
6030 mcx_down(struct mcx_softc *sc)
6031 {
6032 	struct ifnet *ifp = &sc->sc_ac.ac_if;
6033 	int group, i;
6034 
6035 	CLR(ifp->if_flags, IFF_RUNNING);
6036 
6037 	/*
6038 	 * delete flow table entries first, so no packets can arrive
6039 	 * after the barriers
6040 	 */
6041 	if (sc->sc_promisc_flow_enabled)
6042 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6043 	if (sc->sc_allmulti_flow_enabled)
6044 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6045 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6046 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6047 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6048 		if (sc->sc_mcast_flows[i][0] != 0) {
6049 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6050 			    sc->sc_mcast_flow_base + i);
6051 		}
6052 	}
6053 
6054 	intr_barrier(sc->sc_ihc);
6055 	ifq_barrier(&ifp->if_snd);
6056 
6057 	timeout_del_barrier(&sc->sc_calibrate);
6058 
6059 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6060 		if (sc->sc_flow_group_id[group] != -1)
6061 			mcx_destroy_flow_group(sc,
6062 			    sc->sc_flow_group_id[group]);
6063 	}
6064 
6065 	if (sc->sc_flow_table_id != -1)
6066 		mcx_destroy_flow_table(sc);
6067 
6068 	if (sc->sc_tirn != 0)
6069 		mcx_destroy_tir(sc);
6070 	if (sc->sc_rqn != 0)
6071 		mcx_destroy_rq(sc);
6072 
6073 	if (sc->sc_sqn != 0)
6074 		mcx_destroy_sq(sc);
6075 	if (sc->sc_tisn != 0)
6076 		mcx_destroy_tis(sc);
6077 
6078 	for (i = 0; i < sc->sc_num_cq; i++)
6079 		mcx_destroy_cq(sc, i);
6080 	sc->sc_num_cq = 0;
6081 
6082 	if (sc->sc_tx_slots != NULL) {
6083 		mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6084 		    (1 << MCX_LOG_SQ_SIZE));
6085 		sc->sc_tx_slots = NULL;
6086 	}
6087 	if (sc->sc_rx_slots != NULL) {
6088 		mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6089 		    (1 << MCX_LOG_RQ_SIZE));
6090 		sc->sc_rx_slots = NULL;
6091 	}
6092 }
6093 
6094 static int
6095 mcx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6096 {
6097 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6098 	struct ifreq *ifr = (struct ifreq *)data;
6099 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6100 	int s, i, error = 0;
6101 
6102 	s = splnet();
6103 	switch (cmd) {
6104 	case SIOCSIFADDR:
6105 		ifp->if_flags |= IFF_UP;
6106 		/* FALLTHROUGH */
6107 
6108 	case SIOCSIFFLAGS:
6109 		if (ISSET(ifp->if_flags, IFF_UP)) {
6110 			if (ISSET(ifp->if_flags, IFF_RUNNING))
6111 				error = ENETRESET;
6112 			else
6113 				mcx_up(sc);
6114 		} else {
6115 			if (ISSET(ifp->if_flags, IFF_RUNNING))
6116 				mcx_down(sc);
6117 		}
6118 		break;
6119 
6120 	case SIOCGIFMEDIA:
6121 	case SIOCSIFMEDIA:
6122 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
6123 		break;
6124 
6125 	case SIOCGIFSFFPAGE:
6126 		error = mcx_get_sffpage(ifp, (struct if_sffpage *)data);
6127 		break;
6128 
6129 	case SIOCGIFRXR:
6130 		error = mcx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
6131 		break;
6132 
6133 	case SIOCADDMULTI:
6134 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
6135 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6136 			if (error != 0)
6137 				return (error);
6138 
6139 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6140 				if (sc->sc_mcast_flows[i][0] == 0) {
6141 					memcpy(sc->sc_mcast_flows[i], addrlo,
6142 					    ETHER_ADDR_LEN);
6143 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6144 						mcx_set_flow_table_entry(sc,
6145 						    MCX_FLOW_GROUP_MAC,
6146 						    sc->sc_mcast_flow_base + i,
6147 						    sc->sc_mcast_flows[i]);
6148 					}
6149 					break;
6150 				}
6151 			}
6152 
6153 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6154 				if (i == MCX_NUM_MCAST_FLOWS) {
6155 					SET(ifp->if_flags, IFF_ALLMULTI);
6156 					sc->sc_extra_mcast++;
6157 					error = ENETRESET;
6158 				}
6159 
6160 				if (sc->sc_ac.ac_multirangecnt > 0) {
6161 					SET(ifp->if_flags, IFF_ALLMULTI);
6162 					error = ENETRESET;
6163 				}
6164 			}
6165 		}
6166 		break;
6167 
6168 	case SIOCDELMULTI:
6169 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
6170 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6171 			if (error != 0)
6172 				return (error);
6173 
6174 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6175 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
6176 				    ETHER_ADDR_LEN) == 0) {
6177 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6178 						mcx_delete_flow_table_entry(sc,
6179 						    MCX_FLOW_GROUP_MAC,
6180 						    sc->sc_mcast_flow_base + i);
6181 					}
6182 					sc->sc_mcast_flows[i][0] = 0;
6183 					break;
6184 				}
6185 			}
6186 
6187 			if (i == MCX_NUM_MCAST_FLOWS)
6188 				sc->sc_extra_mcast--;
6189 
6190 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6191 			    (sc->sc_extra_mcast == 0) &&
6192 			    (sc->sc_ac.ac_multirangecnt == 0)) {
6193 				CLR(ifp->if_flags, IFF_ALLMULTI);
6194 				error = ENETRESET;
6195 			}
6196 		}
6197 		break;
6198 
6199 	default:
6200 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
6201 	}
6202 
6203 	if (error == ENETRESET) {
6204 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6205 		    (IFF_UP | IFF_RUNNING))
6206 			mcx_iff(sc);
6207 		error = 0;
6208 	}
6209 	splx(s);
6210 
6211 	return (error);
6212 }
6213 
6214 static int
6215 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6216 {
6217 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6218 	struct mcx_reg_mcia mcia;
6219 	struct mcx_reg_pmlp pmlp;
6220 	int offset, error;
6221 
6222 	/* get module number */
6223 	memset(&pmlp, 0, sizeof(pmlp));
6224 	pmlp.rp_local_port = 1;
6225 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6226 	    sizeof(pmlp));
6227 	if (error != 0) {
6228 		printf("%s: unable to get eeprom module number\n",
6229 		    DEVNAME(sc));
6230 		return error;
6231 	}
6232 
6233 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6234 		memset(&mcia, 0, sizeof(mcia));
6235 		mcia.rm_l = 0;
6236 		mcia.rm_module = betoh32(pmlp.rp_lane0_mapping) &
6237 		    MCX_PMLP_MODULE_NUM_MASK;
6238 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
6239 		mcia.rm_page_num = sff->sff_page;
6240 		mcia.rm_dev_addr = htobe16(offset);
6241 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6242 
6243 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6244 		    &mcia, sizeof(mcia));
6245 		if (error != 0) {
6246 			printf("%s: unable to read eeprom at %x\n",
6247 			    DEVNAME(sc), offset);
6248 			return error;
6249 		}
6250 
6251 		memcpy(sff->sff_data + offset, mcia.rm_data,
6252 		    MCX_MCIA_EEPROM_BYTES);
6253 	}
6254 
6255 	return 0;
6256 }
6257 
6258 static int
6259 mcx_rxrinfo(struct mcx_softc *sc, struct if_rxrinfo *ifri)
6260 {
6261 	struct if_rxring_info ifr;
6262 
6263 	memset(&ifr, 0, sizeof(ifr));
6264 	ifr.ifr_size = sc->sc_hardmtu;
6265 	ifr.ifr_info = sc->sc_rxr;
6266 
6267 	return (if_rxr_info_ioctl(ifri, 1, &ifr));
6268 }
6269 
6270 int
6271 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6272 {
6273 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6274 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6275 	case 0:
6276 		break;
6277 
6278 	case EFBIG:
6279 		if (m_defrag(m, M_DONTWAIT) == 0 &&
6280 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6281 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6282 			break;
6283 
6284 	default:
6285 		return (1);
6286 	}
6287 
6288 	ms->ms_m = m;
6289 	return (0);
6290 }
6291 
6292 static void
6293 mcx_start(struct ifqueue *ifq)
6294 {
6295 	struct ifnet *ifp = ifq->ifq_if;
6296 	struct mcx_softc *sc = ifp->if_softc;
6297 	struct mcx_sq_entry *sq, *sqe;
6298 	struct mcx_sq_entry_seg *sqs;
6299 	struct mcx_slot *ms;
6300 	bus_dmamap_t map;
6301 	struct mbuf *m;
6302 	u_int idx, free, used;
6303 	uint64_t *bf;
6304 	size_t bf_base;
6305 	int i, seg, nseg;
6306 
6307 	bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6308 
6309 	idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6310 	free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6311 
6312 	used = 0;
6313 	bf = NULL;
6314 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6315 
6316 	for (;;) {
6317 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6318 			ifq_set_oactive(ifq);
6319 			break;
6320 		}
6321 
6322 		m = ifq_dequeue(ifq);
6323 		if (m == NULL) {
6324 			break;
6325 		}
6326 
6327 		sqe = sq + idx;
6328 		ms = &sc->sc_tx_slots[idx];
6329 		memset(sqe, 0, sizeof(*sqe));
6330 
6331 		/* ctrl segment */
6332 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6333 		    ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6334 		/* always generate a completion event */
6335 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6336 
6337 		/* eth segment */
6338 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6339 		m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
6340 		    (caddr_t)sqe->sqe_inline_headers);
6341 		m_adj(m, MCX_SQ_INLINE_SIZE);
6342 
6343 		if (mcx_load_mbuf(sc, ms, m) != 0) {
6344 			m_freem(m);
6345 			ifp->if_oerrors++;
6346 			continue;
6347 		}
6348 		bf = (uint64_t *)sqe;
6349 
6350 #if NBPFILTER > 0
6351 		if (ifp->if_bpf)
6352 			bpf_mtap_hdr(ifp->if_bpf,
6353 			    (caddr_t)sqe->sqe_inline_headers,
6354 			    MCX_SQ_INLINE_SIZE, m, BPF_DIRECTION_OUT, NULL);
6355 #endif
6356 		map = ms->ms_map;
6357 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6358 		    BUS_DMASYNC_PREWRITE);
6359 
6360 		sqe->sqe_ds_sq_num =
6361 		    htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6362 		    (map->dm_nsegs + 3));
6363 
6364 		/* data segment - first wqe has one segment */
6365 		sqs = sqe->sqe_segs;
6366 		seg = 0;
6367 		nseg = 1;
6368 		for (i = 0; i < map->dm_nsegs; i++) {
6369 			if (seg == nseg) {
6370 				/* next slot */
6371 				idx++;
6372 				if (idx == (1 << MCX_LOG_SQ_SIZE))
6373 					idx = 0;
6374 				sc->sc_tx_prod++;
6375 				used++;
6376 
6377 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6378 				seg = 0;
6379 				nseg = MCX_SQ_SEGS_PER_SLOT;
6380 			}
6381 			sqs[seg].sqs_byte_count =
6382 			    htobe32(map->dm_segs[i].ds_len);
6383 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6384 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6385 			seg++;
6386 		}
6387 
6388 		idx++;
6389 		if (idx == (1 << MCX_LOG_SQ_SIZE))
6390 			idx = 0;
6391 		sc->sc_tx_prod++;
6392 		used++;
6393 	}
6394 
6395 	if (used) {
6396 		*sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod);
6397 
6398 		membar_sync();
6399 
6400 		/*
6401 		 * write the first 64 bits of the last sqe we produced
6402 		 * to the blue flame buffer
6403 		 */
6404 		bus_space_write_raw_8(sc->sc_memt, sc->sc_memh,
6405 		    bf_base + sc->sc_bf_offset, *bf);
6406 		/* next write goes to the other buffer */
6407 		sc->sc_bf_offset ^= sc->sc_bf_size;
6408 
6409 		membar_sync();
6410 	}
6411 }
6412 
6413 static void
6414 mcx_watchdog(struct ifnet *ifp)
6415 {
6416 }
6417 
6418 static void
6419 mcx_media_add_types(struct mcx_softc *sc)
6420 {
6421 	struct mcx_reg_ptys ptys;
6422 	int i;
6423 	uint32_t proto_cap;
6424 
6425 	memset(&ptys, 0, sizeof(ptys));
6426 	ptys.rp_local_port = 1;
6427 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6428 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6429 	    sizeof(ptys)) != 0) {
6430 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6431 		return;
6432 	}
6433 
6434 	proto_cap = betoh32(ptys.rp_eth_proto_cap);
6435 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
6436 		if ((proto_cap & (1 << i)) && (mcx_eth_cap_map[i] != 0))
6437 			ifmedia_add(&sc->sc_media, IFM_ETHER |
6438 			    mcx_eth_cap_map[i], 0, NULL);
6439 	}
6440 }
6441 
6442 static void
6443 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6444 {
6445 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6446 	struct mcx_reg_ptys ptys;
6447 	int i;
6448 	uint32_t proto_cap, proto_oper;
6449 	uint64_t media_oper;
6450 
6451 	memset(&ptys, 0, sizeof(ptys));
6452 	ptys.rp_local_port = 1;
6453 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6454 
6455 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6456 	    sizeof(ptys)) != 0) {
6457 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6458 		return;
6459 	}
6460 
6461 	proto_cap = betoh32(ptys.rp_eth_proto_cap);
6462 	proto_oper = betoh32(ptys.rp_eth_proto_oper);
6463 
6464 	media_oper = 0;
6465 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
6466 		if (proto_oper & (1 << i)) {
6467 			media_oper = mcx_eth_cap_map[i];
6468 		}
6469 	}
6470 
6471 	ifmr->ifm_status = IFM_AVALID;
6472 	/* not sure if this is the right thing to check, maybe paos? */
6473 	if (proto_oper != 0) {
6474 		ifmr->ifm_status |= IFM_ACTIVE;
6475 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6476 		/* txpause, rxpause, duplex? */
6477 	}
6478 }
6479 
6480 static int
6481 mcx_media_change(struct ifnet *ifp)
6482 {
6483 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6484 	struct mcx_reg_ptys ptys;
6485 	struct mcx_reg_paos paos;
6486 	uint32_t media;
6487 	int i, error;
6488 
6489 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6490 		return EINVAL;
6491 
6492 	error = 0;
6493 
6494 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6495 		/* read ptys to get supported media */
6496 		memset(&ptys, 0, sizeof(ptys));
6497 		ptys.rp_local_port = 1;
6498 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6499 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6500 		    &ptys, sizeof(ptys)) != 0) {
6501 			printf("%s: unable to read port type/speed\n",
6502 			    DEVNAME(sc));
6503 			return EIO;
6504 		}
6505 
6506 		media = betoh32(ptys.rp_eth_proto_cap);
6507 	} else {
6508 		/* map media type */
6509 		media = 0;
6510 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
6511 			if (mcx_eth_cap_map[i] ==
6512 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6513 				media = (1 << i);
6514 				break;
6515 			}
6516 		}
6517 	}
6518 
6519 	/* disable the port */
6520 	memset(&paos, 0, sizeof(paos));
6521 	paos.rp_local_port = 1;
6522 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6523 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6524 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6525 	    sizeof(paos)) != 0) {
6526 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
6527 		return EIO;
6528 	}
6529 
6530 	memset(&ptys, 0, sizeof(ptys));
6531 	ptys.rp_local_port = 1;
6532 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6533 	ptys.rp_eth_proto_admin = htobe32(media);
6534 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6535 	    sizeof(ptys)) != 0) {
6536 		printf("%s: unable to set port media type/speed\n",
6537 		    DEVNAME(sc));
6538 		error = EIO;
6539 	}
6540 
6541 	/* re-enable the port to start negotiation */
6542 	memset(&paos, 0, sizeof(paos));
6543 	paos.rp_local_port = 1;
6544 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6545 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6546 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6547 	    sizeof(paos)) != 0) {
6548 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
6549 		error = EIO;
6550 	}
6551 
6552 	return error;
6553 }
6554 
6555 static void
6556 mcx_port_change(void *xsc)
6557 {
6558 	struct mcx_softc *sc = xsc;
6559 	struct ifnet *ifp = &sc->sc_ac.ac_if;
6560 	struct mcx_reg_paos paos;
6561 	int link_state = LINK_STATE_DOWN;
6562 
6563 	memset(&paos, 0, sizeof(paos));
6564 	paos.rp_local_port = 1;
6565 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6566 	    sizeof(paos)) == 0) {
6567 		if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6568 			link_state = LINK_STATE_FULL_DUPLEX;
6569 	}
6570 
6571 	if (link_state != ifp->if_link_state) {
6572 		ifp->if_link_state = link_state;
6573 		if_link_state_change(ifp);
6574 	}
6575 }
6576 
6577 
6578 static inline uint32_t
6579 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6580 {
6581 	uint32_t word;
6582 
6583 	word = bus_space_read_raw_4(sc->sc_memt, sc->sc_memh, r);
6584 
6585 	return (betoh32(word));
6586 }
6587 
6588 static inline void
6589 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6590 {
6591 	bus_space_write_raw_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6592 }
6593 
6594 static inline void
6595 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6596 {
6597 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6598 }
6599 
6600 static uint64_t
6601 mcx_timer(struct mcx_softc *sc)
6602 {
6603 	uint32_t hi, lo, ni;
6604 
6605 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6606 	for (;;) {
6607 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6608 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6609 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6610 
6611 		if (ni == hi)
6612 			break;
6613 
6614 		hi = ni;
6615 	}
6616 
6617 	return (((uint64_t)hi << 32) | (uint64_t)lo);
6618 }
6619 
6620 static int
6621 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6622     bus_size_t size, u_int align)
6623 {
6624 	mxm->mxm_size = size;
6625 
6626 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6627 	    mxm->mxm_size, 0,
6628 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
6629 	    &mxm->mxm_map) != 0)
6630 		return (1);
6631 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6632 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6633 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
6634 		goto destroy;
6635 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6636 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6637 		goto free;
6638 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6639 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6640 		goto unmap;
6641 
6642 	return (0);
6643 unmap:
6644 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6645 free:
6646 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6647 destroy:
6648 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6649 	return (1);
6650 }
6651 
6652 static void
6653 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6654 {
6655 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6656 }
6657 
6658 static void
6659 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6660 {
6661 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6662 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6663 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6664 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6665 }
6666 
6667 static int
6668 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6669 {
6670 	bus_dma_segment_t *segs;
6671 	bus_size_t len = pages * MCX_PAGE_SIZE;
6672 	size_t seglen;
6673 
6674 	segs = mallocarray(sizeof(*segs), pages, M_DEVBUF, M_WAITOK|M_CANFAIL);
6675 	if (segs == NULL)
6676 		return (-1);
6677 
6678 	seglen = sizeof(*segs) * pages;
6679 
6680 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6681 	    segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6682 		goto free_segs;
6683 
6684 	if (mhm->mhm_seg_count < pages) {
6685 		size_t nseglen;
6686 
6687 		mhm->mhm_segs = mallocarray(sizeof(*mhm->mhm_segs),
6688 		    mhm->mhm_seg_count, M_DEVBUF, M_WAITOK|M_CANFAIL);
6689 		if (mhm->mhm_segs == NULL)
6690 			goto free_dmamem;
6691 
6692 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6693 
6694 		memcpy(mhm->mhm_segs, segs, nseglen);
6695 
6696 		free(segs, M_DEVBUF, seglen);
6697 
6698 		segs = mhm->mhm_segs;
6699 		seglen = nseglen;
6700 	} else
6701 		mhm->mhm_segs = segs;
6702 
6703 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6704 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6705 	    &mhm->mhm_map) != 0)
6706 		goto free_dmamem;
6707 
6708 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6709 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6710 		goto destroy;
6711 
6712 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6713 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6714 
6715 	mhm->mhm_npages = pages;
6716 
6717 	return (0);
6718 
6719 destroy:
6720 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6721 free_dmamem:
6722 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6723 free_segs:
6724 	free(segs, M_DEVBUF, seglen);
6725 	mhm->mhm_segs = NULL;
6726 
6727 	return (-1);
6728 }
6729 
6730 static void
6731 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6732 {
6733 	if (mhm->mhm_npages == 0)
6734 		return;
6735 
6736 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6737 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6738 
6739 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6740 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6741 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6742 	free(mhm->mhm_segs, M_DEVBUF,
6743 	    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6744 
6745 	mhm->mhm_npages = 0;
6746 }
6747