xref: /netbsd-src/sys/dev/pci/if_mcx.c (revision 5375e12722f63638a2b7fb86c3617ae050ecff7e)
1 /*	$NetBSD: if_mcx.c,v 1.29 2024/07/07 23:29:04 msaitoh Exp $ */
2 /*	$OpenBSD: if_mcx.c,v 1.101 2021/06/02 19:16:11 patrick Exp $ */
3 
4 /*
5  * Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: if_mcx.c,v 1.29 2024/07/07 23:29:04 msaitoh Exp $");
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h>
34 #include <sys/device.h>
35 #include <sys/pool.h>
36 #include <sys/queue.h>
37 #include <sys/callout.h>
38 #include <sys/workqueue.h>
39 #include <sys/atomic.h>
40 #include <sys/timetc.h>
41 #include <sys/kmem.h>
42 #include <sys/bus.h>
43 #include <sys/interrupt.h>
44 #include <sys/pcq.h>
45 #include <sys/cpu.h>
46 #include <sys/bitops.h>
47 
48 #include <machine/intr.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_ether.h>
53 #include <net/if_media.h>
54 #include <net/if_vlanvar.h>
55 #include <net/toeplitz.h>
56 
57 #include <net/bpf.h>
58 
59 #include <netinet/in.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64 
65 /* TODO: Port kstat key/value stuff to evcnt/sysmon */
66 #define	NKSTAT		0
67 
68 /* XXX This driver is not yet MP-safe; don't claim to be! */
69 /* #ifdef NET_MPSAFE */
70 /* #define	MCX_MPSAFE	1 */
71 /* #define	CALLOUT_FLAGS	CALLOUT_MPSAFE */
72 /* #else */
73 #define	CALLOUT_FLAGS	0
74 /* #endif */
75 
76 #define	MCX_TXQ_NUM		2048
77 
78 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
79 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
80 
81 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
82 
83 #define MCX_FW_VER			0x0000
84 #define  MCX_FW_VER_MAJOR(_v)			((_v) & 0xffff)
85 #define  MCX_FW_VER_MINOR(_v)			((_v) >> 16)
86 #define MCX_CMDIF_FW_SUBVER		0x0004
87 #define  MCX_FW_VER_SUBMINOR(_v)		((_v) & 0xffff)
88 #define  MCX_CMDIF(_v)				((_v) >> 16)
89 
90 #define MCX_ISSI			1 /* as per the PRM */
91 #define MCX_CMD_IF_SUPPORTED		5
92 
93 #define MCX_HARDMTU			9500
94 
95 #define MCX_PAGE_SHIFT			12
96 #define MCX_PAGE_SIZE			(1 << MCX_PAGE_SHIFT)
97 
98 /* queue sizes */
99 #define MCX_LOG_EQ_SIZE			7
100 #define MCX_LOG_CQ_SIZE			12
101 #define MCX_LOG_RQ_SIZE			10
102 #define MCX_LOG_SQ_SIZE			11
103 
104 #define MCX_MAX_QUEUES			16
105 
106 /* completion event moderation - about 10khz, or 90% of the cq */
107 #define MCX_CQ_MOD_PERIOD		50
108 #define MCX_CQ_MOD_COUNTER		\
109 	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
110 
111 #define MCX_LOG_SQ_ENTRY_SIZE		6
112 #define MCX_SQ_ENTRY_MAX_SLOTS		4
113 #define MCX_SQ_SEGS_PER_SLOT		\
114 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
115 #define MCX_SQ_MAX_SEGMENTS		\
116 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
117 
118 #define MCX_LOG_FLOW_TABLE_SIZE		5
119 #define MCX_NUM_STATIC_FLOWS		4 /* promisc, allmulti, ucast, bcast */
120 #define MCX_NUM_MCAST_FLOWS 		\
121 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
122 
123 #define MCX_SQ_INLINE_SIZE		18
124 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
125 
126 /* doorbell offsets */
127 #define MCX_DOORBELL_AREA_SIZE		MCX_PAGE_SIZE
128 
129 #define MCX_CQ_DOORBELL_BASE		0
130 #define MCX_CQ_DOORBELL_STRIDE		64
131 
132 #define MCX_WQ_DOORBELL_BASE		MCX_PAGE_SIZE/2
133 #define MCX_WQ_DOORBELL_STRIDE		64
134 /* make sure the doorbells fit */
135 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
136 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
137     MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
138 
139 #define MCX_WQ_DOORBELL_MASK		0xffff
140 
141 /* uar registers */
142 #define MCX_UAR_CQ_DOORBELL		0x20
143 #define MCX_UAR_EQ_DOORBELL_ARM		0x40
144 #define MCX_UAR_EQ_DOORBELL		0x48
145 #define MCX_UAR_BF			0x800
146 
147 #define MCX_CMDQ_ADDR_HI		0x0010
148 #define MCX_CMDQ_ADDR_LO		0x0014
149 #define MCX_CMDQ_ADDR_NMASK		0xfff
150 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
151 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
152 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
153 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
154 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
155 
156 #define MCX_CMDQ_DOORBELL		0x0018
157 
158 #define MCX_STATE			0x01fc
159 #define MCX_STATE_MASK				(1U << 31)
160 #define MCX_STATE_INITIALIZING			(1 << 31)
161 #define MCX_STATE_READY				(0 << 31)
162 #define MCX_STATE_INTERFACE_MASK		(0x3 << 24)
163 #define MCX_STATE_INTERFACE_FULL_DRIVER		(0x0 << 24)
164 #define MCX_STATE_INTERFACE_DISABLED		(0x1 << 24)
165 
166 #define MCX_INTERNAL_TIMER		0x1000
167 #define MCX_INTERNAL_TIMER_H		0x1000
168 #define MCX_INTERNAL_TIMER_L		0x1004
169 
170 #define MCX_CLEAR_INT			0x100c
171 
172 #define MCX_REG_OP_WRITE		0
173 #define MCX_REG_OP_READ			1
174 
175 #define MCX_REG_PMLP			0x5002
176 #define MCX_REG_PMTU			0x5003
177 #define MCX_REG_PTYS			0x5004
178 #define MCX_REG_PAOS			0x5006
179 #define MCX_REG_PFCC			0x5007
180 #define MCX_REG_PPCNT			0x5008
181 #define MCX_REG_MTCAP			0x9009 /* mgmt temp capabilities */
182 #define MCX_REG_MTMP			0x900a /* mgmt temp */
183 #define MCX_REG_MCIA			0x9014
184 #define MCX_REG_MCAM			0x907f
185 
186 #define MCX_ETHER_CAP_SGMII		0
187 #define MCX_ETHER_CAP_1000_KX		1
188 #define MCX_ETHER_CAP_10G_CX4		2
189 #define MCX_ETHER_CAP_10G_KX4		3
190 #define MCX_ETHER_CAP_10G_KR		4
191 #define MCX_ETHER_CAP_20G_KR2		5
192 #define MCX_ETHER_CAP_40G_CR4		6
193 #define MCX_ETHER_CAP_40G_KR4		7
194 #define MCX_ETHER_CAP_56G_R4		8
195 #define MCX_ETHER_CAP_10G_CR		12
196 #define MCX_ETHER_CAP_10G_SR		13
197 #define MCX_ETHER_CAP_10G_LR		14
198 #define MCX_ETHER_CAP_40G_SR4		15
199 #define MCX_ETHER_CAP_40G_LR4		16
200 #define MCX_ETHER_CAP_50G_SR2		18
201 #define MCX_ETHER_CAP_100G_CR4		20
202 #define MCX_ETHER_CAP_100G_SR4		21
203 #define MCX_ETHER_CAP_100G_KR4		22
204 #define MCX_ETHER_CAP_100G_LR4		23
205 #define MCX_ETHER_CAP_100_TX		24
206 #define MCX_ETHER_CAP_1000_T		25
207 #define MCX_ETHER_CAP_10G_T		26
208 #define MCX_ETHER_CAP_25G_CR		27
209 #define MCX_ETHER_CAP_25G_KR		28
210 #define MCX_ETHER_CAP_25G_SR		29
211 #define MCX_ETHER_CAP_50G_CR2		30
212 #define MCX_ETHER_CAP_50G_KR2		31
213 
214 #define MCX_MAX_CQE			32
215 
216 #define MCX_CMD_QUERY_HCA_CAP		0x100
217 #define MCX_CMD_QUERY_ADAPTER		0x101
218 #define MCX_CMD_INIT_HCA		0x102
219 #define MCX_CMD_TEARDOWN_HCA		0x103
220 #define MCX_CMD_ENABLE_HCA		0x104
221 #define MCX_CMD_DISABLE_HCA		0x105
222 #define MCX_CMD_QUERY_PAGES		0x107
223 #define MCX_CMD_MANAGE_PAGES		0x108
224 #define MCX_CMD_SET_HCA_CAP		0x109
225 #define MCX_CMD_QUERY_ISSI		0x10a
226 #define MCX_CMD_SET_ISSI		0x10b
227 #define MCX_CMD_SET_DRIVER_VERSION	0x10d
228 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS	0x203
229 #define MCX_CMD_CREATE_EQ		0x301
230 #define MCX_CMD_DESTROY_EQ		0x302
231 #define MCX_CMD_QUERY_EQ		0x303
232 #define MCX_CMD_CREATE_CQ		0x400
233 #define MCX_CMD_DESTROY_CQ		0x401
234 #define MCX_CMD_QUERY_CQ		0x402
235 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT 0x754
236 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
237 					0x755
238 #define MCX_CMD_QUERY_VPORT_COUNTERS 	0x770
239 #define MCX_CMD_ALLOC_PD		0x800
240 #define MCX_CMD_ALLOC_UAR		0x802
241 #define MCX_CMD_ACCESS_REG		0x805
242 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN	0x816
243 #define MCX_CMD_CREATE_TIR		0x900
244 #define MCX_CMD_DESTROY_TIR		0x902
245 #define MCX_CMD_CREATE_SQ		0x904
246 #define MCX_CMD_MODIFY_SQ		0x905
247 #define MCX_CMD_DESTROY_SQ		0x906
248 #define MCX_CMD_QUERY_SQ		0x907
249 #define MCX_CMD_CREATE_RQ		0x908
250 #define MCX_CMD_MODIFY_RQ		0x909
251 #define MCX_CMD_DESTROY_RQ		0x90a
252 #define MCX_CMD_QUERY_RQ		0x90b
253 #define MCX_CMD_CREATE_TIS		0x912
254 #define MCX_CMD_DESTROY_TIS		0x914
255 #define MCX_CMD_CREATE_RQT		0x916
256 #define MCX_CMD_DESTROY_RQT		0x918
257 #define MCX_CMD_SET_FLOW_TABLE_ROOT	0x92f
258 #define MCX_CMD_CREATE_FLOW_TABLE	0x930
259 #define MCX_CMD_DESTROY_FLOW_TABLE	0x931
260 #define MCX_CMD_QUERY_FLOW_TABLE	0x932
261 #define MCX_CMD_CREATE_FLOW_GROUP	0x933
262 #define MCX_CMD_DESTROY_FLOW_GROUP	0x934
263 #define MCX_CMD_QUERY_FLOW_GROUP	0x935
264 #define MCX_CMD_SET_FLOW_TABLE_ENTRY	0x936
265 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY	0x937
266 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY	0x938
267 #define MCX_CMD_ALLOC_FLOW_COUNTER	0x939
268 #define MCX_CMD_QUERY_FLOW_COUNTER	0x93b
269 
270 #define MCX_QUEUE_STATE_RST		0
271 #define MCX_QUEUE_STATE_RDY		1
272 #define MCX_QUEUE_STATE_ERR		3
273 
274 #define MCX_FLOW_TABLE_TYPE_RX		0
275 #define MCX_FLOW_TABLE_TYPE_TX		1
276 
277 #define MCX_CMDQ_INLINE_DATASIZE	16
278 
279 struct mcx_cmdq_entry {
280 	uint8_t			cq_type;
281 #define MCX_CMDQ_TYPE_PCIE		0x7
282 	uint8_t			cq_reserved0[3];
283 
284 	uint32_t		cq_input_length;
285 	uint64_t		cq_input_ptr;
286 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
287 
288 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
289 	uint64_t		cq_output_ptr;
290 	uint32_t		cq_output_length;
291 
292 	uint8_t			cq_token;
293 	uint8_t			cq_signature;
294 	uint8_t			cq_reserved1[1];
295 	uint8_t			cq_status;
296 #define MCX_CQ_STATUS_SHIFT		1
297 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
302 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
303 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
304 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
305 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
306 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
307 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
308 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
309 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
310 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
311 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
312 					(0x10 << MCX_CQ_STATUS_SHIFT)
313 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
314 #define MCX_CQ_STATUS_OWN_MASK		0x1
315 #define MCX_CQ_STATUS_OWN_SW		0x0
316 #define MCX_CQ_STATUS_OWN_HW		0x1
317 } __packed __aligned(8);
318 
319 #define MCX_CMDQ_MAILBOX_DATASIZE	512
320 
321 struct mcx_cmdq_mailbox {
322 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
323 	uint8_t			mb_reserved0[48];
324 	uint64_t		mb_next_ptr;
325 	uint32_t		mb_block_number;
326 	uint8_t			mb_reserved1[1];
327 	uint8_t			mb_token;
328 	uint8_t			mb_ctrl_signature;
329 	uint8_t			mb_signature;
330 } __packed __aligned(8);
331 
332 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
333 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
334 				    MCX_CMDQ_MAILBOX_ALIGN)
335 /*
336  * command mailbox structures
337  */
338 
339 struct mcx_cmd_enable_hca_in {
340 	uint16_t		cmd_opcode;
341 	uint8_t			cmd_reserved0[4];
342 	uint16_t		cmd_op_mod;
343 	uint8_t			cmd_reserved1[2];
344 	uint16_t		cmd_function_id;
345 	uint8_t			cmd_reserved2[4];
346 } __packed __aligned(4);
347 
348 struct mcx_cmd_enable_hca_out {
349 	uint8_t			cmd_status;
350 	uint8_t			cmd_reserved0[3];
351 	uint32_t		cmd_syndrome;
352 	uint8_t			cmd_reserved1[4];
353 } __packed __aligned(4);
354 
355 struct mcx_cmd_init_hca_in {
356 	uint16_t		cmd_opcode;
357 	uint8_t			cmd_reserved0[4];
358 	uint16_t		cmd_op_mod;
359 	uint8_t			cmd_reserved1[8];
360 } __packed __aligned(4);
361 
362 struct mcx_cmd_init_hca_out {
363 	uint8_t			cmd_status;
364 	uint8_t			cmd_reserved0[3];
365 	uint32_t		cmd_syndrome;
366 	uint8_t			cmd_reserved1[8];
367 } __packed __aligned(4);
368 
369 struct mcx_cmd_teardown_hca_in {
370 	uint16_t		cmd_opcode;
371 	uint8_t			cmd_reserved0[4];
372 	uint16_t		cmd_op_mod;
373 	uint8_t			cmd_reserved1[2];
374 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
375 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
376 	uint16_t		cmd_profile;
377 	uint8_t			cmd_reserved2[4];
378 } __packed __aligned(4);
379 
380 struct mcx_cmd_teardown_hca_out {
381 	uint8_t			cmd_status;
382 	uint8_t			cmd_reserved0[3];
383 	uint32_t		cmd_syndrome;
384 	uint8_t			cmd_reserved1[8];
385 } __packed __aligned(4);
386 
387 struct mcx_cmd_access_reg_in {
388 	uint16_t		cmd_opcode;
389 	uint8_t			cmd_reserved0[4];
390 	uint16_t		cmd_op_mod;
391 	uint8_t			cmd_reserved1[2];
392 	uint16_t		cmd_register_id;
393 	uint32_t		cmd_argument;
394 } __packed __aligned(4);
395 
396 struct mcx_cmd_access_reg_out {
397 	uint8_t			cmd_status;
398 	uint8_t			cmd_reserved0[3];
399 	uint32_t		cmd_syndrome;
400 	uint8_t			cmd_reserved1[8];
401 } __packed __aligned(4);
402 
403 struct mcx_reg_pmtu {
404 	uint8_t			rp_reserved1;
405 	uint8_t			rp_local_port;
406 	uint8_t			rp_reserved2[2];
407 	uint16_t		rp_max_mtu;
408 	uint8_t			rp_reserved3[2];
409 	uint16_t		rp_admin_mtu;
410 	uint8_t			rp_reserved4[2];
411 	uint16_t		rp_oper_mtu;
412 	uint8_t			rp_reserved5[2];
413 } __packed __aligned(4);
414 
415 struct mcx_reg_ptys {
416 	uint8_t			rp_reserved1;
417 	uint8_t			rp_local_port;
418 	uint8_t			rp_reserved2;
419 	uint8_t			rp_proto_mask;
420 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
421 	uint8_t			rp_reserved3[8];
422 	uint32_t		rp_eth_proto_cap;
423 	uint8_t			rp_reserved4[8];
424 	uint32_t		rp_eth_proto_admin;
425 	uint8_t			rp_reserved5[8];
426 	uint32_t		rp_eth_proto_oper;
427 	uint8_t			rp_reserved6[24];
428 } __packed __aligned(4);
429 
430 struct mcx_reg_paos {
431 	uint8_t			rp_reserved1;
432 	uint8_t			rp_local_port;
433 	uint8_t			rp_admin_status;
434 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
435 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
436 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
437 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
438 	uint8_t			rp_oper_status;
439 #define MCX_REG_PAOS_OPER_STATUS_UP		1
440 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
441 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
442 	uint8_t			rp_admin_state_update;
443 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
444 	uint8_t			rp_reserved2[11];
445 } __packed __aligned(4);
446 
447 struct mcx_reg_pfcc {
448 	uint8_t			rp_reserved1;
449 	uint8_t			rp_local_port;
450 	uint8_t			rp_reserved2[3];
451 	uint8_t			rp_prio_mask_tx;
452 	uint8_t			rp_reserved3;
453 	uint8_t			rp_prio_mask_rx;
454 	uint8_t			rp_pptx_aptx;
455 	uint8_t			rp_pfctx;
456 	uint8_t			rp_fctx_dis;
457 	uint8_t			rp_reserved4;
458 	uint8_t			rp_pprx_aprx;
459 	uint8_t			rp_pfcrx;
460 	uint8_t			rp_reserved5[2];
461 	uint16_t		rp_dev_stall_min;
462 	uint16_t		rp_dev_stall_crit;
463 	uint8_t			rp_reserved6[12];
464 } __packed __aligned(4);
465 
466 #define MCX_PMLP_MODULE_NUM_MASK	0xff
467 struct mcx_reg_pmlp {
468 	uint8_t			rp_rxtx;
469 	uint8_t			rp_local_port;
470 	uint8_t			rp_reserved0;
471 	uint8_t			rp_width;
472 	uint32_t		rp_lane0_mapping;
473 	uint32_t		rp_lane1_mapping;
474 	uint32_t		rp_lane2_mapping;
475 	uint32_t		rp_lane3_mapping;
476 	uint8_t			rp_reserved1[44];
477 } __packed __aligned(4);
478 
479 struct mcx_reg_ppcnt {
480 	uint8_t			ppcnt_swid;
481 	uint8_t			ppcnt_local_port;
482 	uint8_t			ppcnt_pnat;
483 	uint8_t			ppcnt_grp;
484 #define MCX_REG_PPCNT_GRP_IEEE8023		0x00
485 #define MCX_REG_PPCNT_GRP_RFC2863		0x01
486 #define MCX_REG_PPCNT_GRP_RFC2819		0x02
487 #define MCX_REG_PPCNT_GRP_RFC3635		0x03
488 #define MCX_REG_PPCNT_GRP_PER_PRIO		0x10
489 #define MCX_REG_PPCNT_GRP_PER_TC		0x11
490 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER		0x11
491 
492 	uint8_t			ppcnt_clr;
493 	uint8_t			ppcnt_reserved1[2];
494 	uint8_t			ppcnt_prio_tc;
495 #define MCX_REG_PPCNT_CLR			(1 << 7)
496 
497 	uint8_t			ppcnt_counter_set[248];
498 } __packed __aligned(8);
499 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
500 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
501     sizeof(uint64_t)) == 0);
502 
503 enum mcx_ppcnt_ieee8023 {
504 	frames_transmitted_ok,
505 	frames_received_ok,
506 	frame_check_sequence_errors,
507 	alignment_errors,
508 	octets_transmitted_ok,
509 	octets_received_ok,
510 	multicast_frames_xmitted_ok,
511 	broadcast_frames_xmitted_ok,
512 	multicast_frames_received_ok,
513 	broadcast_frames_received_ok,
514 	in_range_length_errors,
515 	out_of_range_length_field,
516 	frame_too_long_errors,
517 	symbol_error_during_carrier,
518 	mac_control_frames_transmitted,
519 	mac_control_frames_received,
520 	unsupported_opcodes_received,
521 	pause_mac_ctrl_frames_received,
522 	pause_mac_ctrl_frames_transmitted,
523 
524 	mcx_ppcnt_ieee8023_count
525 };
526 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
527 
528 enum mcx_ppcnt_rfc2863 {
529 	in_octets,
530 	in_ucast_pkts,
531 	in_discards,
532 	in_errors,
533 	in_unknown_protos,
534 	out_octets,
535 	out_ucast_pkts,
536 	out_discards,
537 	out_errors,
538 	in_multicast_pkts,
539 	in_broadcast_pkts,
540 	out_multicast_pkts,
541 	out_broadcast_pkts,
542 
543 	mcx_ppcnt_rfc2863_count
544 };
545 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
546 
547 enum mcx_ppcnt_rfc2819 {
548 	drop_events,
549 	octets,
550 	pkts,
551 	broadcast_pkts,
552 	multicast_pkts,
553 	crc_align_errors,
554 	undersize_pkts,
555 	oversize_pkts,
556 	fragments,
557 	jabbers,
558 	collisions,
559 	pkts64octets,
560 	pkts65to127octets,
561 	pkts128to255octets,
562 	pkts256to511octets,
563 	pkts512to1023octets,
564 	pkts1024to1518octets,
565 	pkts1519to2047octets,
566 	pkts2048to4095octets,
567 	pkts4096to8191octets,
568 	pkts8192to10239octets,
569 
570 	mcx_ppcnt_rfc2819_count
571 };
572 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
573 
574 enum mcx_ppcnt_rfc3635 {
575 	dot3stats_alignment_errors,
576 	dot3stats_fcs_errors,
577 	dot3stats_single_collision_frames,
578 	dot3stats_multiple_collision_frames,
579 	dot3stats_sqe_test_errors,
580 	dot3stats_deferred_transmissions,
581 	dot3stats_late_collisions,
582 	dot3stats_excessive_collisions,
583 	dot3stats_internal_mac_transmit_errors,
584 	dot3stats_carrier_sense_errors,
585 	dot3stats_frame_too_longs,
586 	dot3stats_internal_mac_receive_errors,
587 	dot3stats_symbol_errors,
588 	dot3control_in_unknown_opcodes,
589 	dot3in_pause_frames,
590 	dot3out_pause_frames,
591 
592 	mcx_ppcnt_rfc3635_count
593 };
594 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
595 
596 struct mcx_reg_mcam {
597 	uint8_t			_reserved1[1];
598 	uint8_t			mcam_feature_group;
599 	uint8_t			_reserved2[1];
600 	uint8_t			mcam_access_reg_group;
601 	uint8_t			_reserved3[4];
602 	uint8_t			mcam_access_reg_cap_mask[16];
603 	uint8_t			_reserved4[16];
604 	uint8_t			mcam_feature_cap_mask[16];
605 	uint8_t			_reserved5[16];
606 } __packed __aligned(4);
607 
608 #define MCX_BITFIELD_BIT(bf, b)	(bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
609 
610 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP	6
611 
612 struct mcx_reg_mtcap {
613 	uint8_t			_reserved1[3];
614 	uint8_t			mtcap_sensor_count;
615 	uint8_t			_reserved2[4];
616 
617 	uint64_t		mtcap_sensor_map;
618 };
619 
620 struct mcx_reg_mtmp {
621 	uint8_t			_reserved1[2];
622 	uint16_t		mtmp_sensor_index;
623 
624 	uint8_t			_reserved2[2];
625 	uint16_t		mtmp_temperature;
626 
627 	uint16_t		mtmp_mte_mtr;
628 #define MCX_REG_MTMP_MTE		(1 << 15)
629 #define MCX_REG_MTMP_MTR		(1 << 14)
630 	uint16_t		mtmp_max_temperature;
631 
632 	uint16_t		mtmp_tee;
633 #define MCX_REG_MTMP_TEE_NOPE		(0 << 14)
634 #define MCX_REG_MTMP_TEE_GENERATE	(1 << 14)
635 #define MCX_REG_MTMP_TEE_GENERATE_ONE	(2 << 14)
636 	uint16_t		mtmp_temperature_threshold_hi;
637 
638 	uint8_t			_reserved3[2];
639 	uint16_t		mtmp_temperature_threshold_lo;
640 
641 	uint8_t			_reserved4[4];
642 
643 	uint8_t			mtmp_sensor_name[8];
644 };
645 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
646 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
647 
648 #define MCX_MCIA_EEPROM_BYTES	32
649 struct mcx_reg_mcia {
650 	uint8_t			rm_l;
651 	uint8_t			rm_module;
652 	uint8_t			rm_reserved0;
653 	uint8_t			rm_status;
654 	uint8_t			rm_i2c_addr;
655 	uint8_t			rm_page_num;
656 	uint16_t		rm_dev_addr;
657 	uint16_t		rm_reserved1;
658 	uint16_t		rm_size;
659 	uint32_t		rm_reserved2;
660 	uint8_t			rm_data[48];
661 } __packed __aligned(4);
662 
663 struct mcx_cmd_query_issi_in {
664 	uint16_t		cmd_opcode;
665 	uint8_t			cmd_reserved0[4];
666 	uint16_t		cmd_op_mod;
667 	uint8_t			cmd_reserved1[8];
668 } __packed __aligned(4);
669 
670 struct mcx_cmd_query_issi_il_out {
671 	uint8_t			cmd_status;
672 	uint8_t			cmd_reserved0[3];
673 	uint32_t		cmd_syndrome;
674 	uint8_t			cmd_reserved1[2];
675 	uint16_t		cmd_current_issi;
676 	uint8_t			cmd_reserved2[4];
677 } __packed __aligned(4);
678 
679 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
680 
681 struct mcx_cmd_query_issi_mb_out {
682 	uint8_t			cmd_reserved2[16];
683 	uint8_t			cmd_supported_issi[80]; /* very big endian */
684 } __packed __aligned(4);
685 
686 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
687 
688 struct mcx_cmd_set_issi_in {
689 	uint16_t		cmd_opcode;
690 	uint8_t			cmd_reserved0[4];
691 	uint16_t		cmd_op_mod;
692 	uint8_t			cmd_reserved1[2];
693 	uint16_t		cmd_current_issi;
694 	uint8_t			cmd_reserved2[4];
695 } __packed __aligned(4);
696 
697 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
698 
699 struct mcx_cmd_set_issi_out {
700 	uint8_t			cmd_status;
701 	uint8_t			cmd_reserved0[3];
702 	uint32_t		cmd_syndrome;
703 	uint8_t			cmd_reserved1[8];
704 } __packed __aligned(4);
705 
706 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
707 
708 struct mcx_cmd_query_pages_in {
709 	uint16_t		cmd_opcode;
710 	uint8_t			cmd_reserved0[4];
711 	uint16_t		cmd_op_mod;
712 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
713 #define MCX_CMD_QUERY_PAGES_INIT	0x02
714 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
715 	uint8_t			cmd_reserved1[8];
716 } __packed __aligned(4);
717 
718 struct mcx_cmd_query_pages_out {
719 	uint8_t			cmd_status;
720 	uint8_t			cmd_reserved0[3];
721 	uint32_t		cmd_syndrome;
722 	uint8_t			cmd_reserved1[2];
723 	uint16_t		cmd_func_id;
724 	int32_t			cmd_num_pages;
725 } __packed __aligned(4);
726 
727 struct mcx_cmd_manage_pages_in {
728 	uint16_t		cmd_opcode;
729 	uint8_t			cmd_reserved0[4];
730 	uint16_t		cmd_op_mod;
731 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
732 					0x00
733 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
734 					0x01
735 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
736 					0x02
737 	uint8_t			cmd_reserved1[2];
738 	uint16_t		cmd_func_id;
739 	uint32_t		cmd_input_num_entries;
740 } __packed __aligned(4);
741 
742 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
743 
744 struct mcx_cmd_manage_pages_out {
745 	uint8_t			cmd_status;
746 	uint8_t			cmd_reserved0[3];
747 	uint32_t		cmd_syndrome;
748 	uint32_t		cmd_output_num_entries;
749 	uint8_t			cmd_reserved1[4];
750 } __packed __aligned(4);
751 
752 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
753 
754 struct mcx_cmd_query_hca_cap_in {
755 	uint16_t		cmd_opcode;
756 	uint8_t			cmd_reserved0[4];
757 	uint16_t		cmd_op_mod;
758 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
759 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
760 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
761 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
762 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
763 	uint8_t			cmd_reserved1[8];
764 } __packed __aligned(4);
765 
766 struct mcx_cmd_query_hca_cap_out {
767 	uint8_t			cmd_status;
768 	uint8_t			cmd_reserved0[3];
769 	uint32_t		cmd_syndrome;
770 	uint8_t			cmd_reserved1[8];
771 } __packed __aligned(4);
772 
773 #define MCX_HCA_CAP_LEN			0x1000
774 #define MCX_HCA_CAP_NMAILBOXES		\
775 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
776 
777 #if __GNUC_PREREQ__(4, 3)
778 #define __counter__		__COUNTER__
779 #else
780 #define __counter__		__LINE__
781 #endif
782 
783 #define __token(_tok, _num)	_tok##_num
784 #define _token(_tok, _num)	__token(_tok, _num)
785 #define __reserved__		_token(__reserved, __counter__)
786 
787 struct mcx_cap_device {
788 	uint8_t			reserved0[16];
789 
790 	uint8_t			log_max_srq_sz;
791 	uint8_t			log_max_qp_sz;
792 	uint8_t			__reserved__[1];
793 	uint8_t			log_max_qp; /* 5 bits */
794 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
795 
796 	uint8_t			__reserved__[1];
797 	uint8_t			log_max_srq; /* 5 bits */
798 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
799 	uint8_t			__reserved__[2];
800 
801 	uint8_t			__reserved__[1];
802 	uint8_t			log_max_cq_sz;
803 	uint8_t			__reserved__[1];
804 	uint8_t			log_max_cq; /* 5 bits */
805 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
806 
807 	uint8_t			log_max_eq_sz;
808 	uint8_t			log_max_mkey; /* 6 bits */
809 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
810 	uint8_t			__reserved__[1];
811 	uint8_t			log_max_eq; /* 4 bits */
812 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
813 
814 	uint8_t			max_indirection;
815 	uint8_t			log_max_mrw_sz; /* 7 bits */
816 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
817 	uint8_t			teardown_log_max_msf_list_size;
818 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
819 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
820 					0x3f
821 	uint8_t			log_max_klm_list_size; /* 6 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
823 					0x3f
824 
825 	uint8_t			__reserved__[1];
826 	uint8_t			log_max_ra_req_dc; /* 6 bits */
827 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
828 	uint8_t			__reserved__[1];
829 	uint8_t			log_max_ra_res_dc; /* 6 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
831 					0x3f
832 
833 	uint8_t			__reserved__[1];
834 	uint8_t			log_max_ra_req_qp; /* 6 bits */
835 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
836 					0x3f
837 	uint8_t			__reserved__[1];
838 	uint8_t			log_max_ra_res_qp; /* 6 bits */
839 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
840 					0x3f
841 
842 	uint8_t			flags1;
843 #define MCX_CAP_DEVICE_END_PAD		0x80
844 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
845 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
846 					0x20
847 #define MCX_CAP_DEVICE_START_PAD	0x10
848 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
849 					0x08
850 	uint8_t			__reserved__[1];
851 	uint16_t		gid_table_size;
852 
853 	uint16_t		flags2;
854 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
855 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
856 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
857 					0x2000
858 #define MCX_CAP_DEVICE_DEBUG		0x1000
859 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
860 					0x8000
861 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
862 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
863 	uint16_t		pkey_table_size;
864 
865 	uint8_t			flags3;
866 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
867 					0x80
868 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
869 					0x40
870 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
871 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
872 #define MCX_CAP_DEVICE_ETS		0x04
873 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
874 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
875 					0x01
876 	uint8_t			local_ca_ack_delay; /* 5 bits */
877 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
878 					0x1f
879 #define MCX_CAP_DEVICE_MCAM_REG		0x40
880 	uint8_t			port_type;
881 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
882 					0x80
883 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
884 #define MCX_CAP_DEVICE_PORT_TYPE_ETH	0x01
885 	uint8_t			num_ports;
886 
887 	uint8_t			snapshot_log_max_msg;
888 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
889 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
890 	uint8_t			max_tc; /* 4 bits */
891 #define MCX_CAP_DEVICE_MAX_TC		0x0f
892 	uint8_t			flags4;
893 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
894 #define MCX_CAP_DEVICE_DCBX		0x40
895 #define MCX_CAP_DEVICE_ROL_S		0x02
896 #define MCX_CAP_DEVICE_ROL_G		0x01
897 	uint8_t			wol;
898 #define MCX_CAP_DEVICE_WOL_S		0x40
899 #define MCX_CAP_DEVICE_WOL_G		0x20
900 #define MCX_CAP_DEVICE_WOL_A		0x10
901 #define MCX_CAP_DEVICE_WOL_B		0x08
902 #define MCX_CAP_DEVICE_WOL_M		0x04
903 #define MCX_CAP_DEVICE_WOL_U		0x02
904 #define MCX_CAP_DEVICE_WOL_P		0x01
905 
906 	uint16_t		stat_rate_support;
907 	uint8_t			__reserved__[1];
908 	uint8_t			cqe_version; /* 4 bits */
909 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
910 
911 	uint32_t		flags5;
912 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
913 					0x80000000
914 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
915 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
916 					0x10000000
917 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
918 					0x08000000
919 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
920 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
921 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
922 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
923 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
924 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
925 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
926 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
927 #define MCX_CAP_DEVICE_SHO		0x00000100
928 #define MCX_CAP_DEVICE_TPH		0x00000080
929 #define MCX_CAP_DEVICE_RF		0x00000040
930 #define MCX_CAP_DEVICE_DCT		0x00000020
931 #define MCX_CAP_DEVICE_QOS		0x00000010
932 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
933 #define MCX_CAP_DEVICE_ROCE		0x00000004
934 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
935 
936 	uint32_t		flags6;
937 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
938 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
939 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
940 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
941 					0x10000000
942 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
943 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
944 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
945 #define MCX_CAP_DEVICE_PG		0x01000000
946 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
947 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
948 					0x00400000
949 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
950 					0x00200000
951 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
952 					0x00100000
953 #define MCX_CAP_DEVICE_CD		0x00080000
954 #define MCX_CAP_DEVICE_ATM		0x00040000
955 #define MCX_CAP_DEVICE_APM		0x00020000
956 #define MCX_CAP_DEVICE_IMAICL		0x00010000
957 #define MCX_CAP_DEVICE_QKV		0x00000200
958 #define MCX_CAP_DEVICE_PKV		0x00000100
959 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
960 #define MCX_CAP_DEVICE_XRC		0x00000008
961 #define MCX_CAP_DEVICE_UD		0x00000004
962 #define MCX_CAP_DEVICE_UC		0x00000002
963 #define MCX_CAP_DEVICE_RC		0x00000001
964 
965 	uint8_t			uar_flags;
966 #define MCX_CAP_DEVICE_UAR_4K		0x80
967 	uint8_t			uar_sz;	/* 6 bits */
968 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
969 	uint8_t			__reserved__[1];
970 	uint8_t			log_pg_sz;
971 
972 	uint8_t			flags7;
973 #define MCX_CAP_DEVICE_BF		0x80
974 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
975 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
976 					0x20
977 	uint8_t			log_bf_reg_size; /* 5 bits */
978 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
979 	uint8_t			__reserved__[2];
980 
981 	uint16_t		num_of_diagnostic_counters;
982 	uint16_t		max_wqe_sz_sq;
983 
984 	uint8_t			__reserved__[2];
985 	uint16_t		max_wqe_sz_rq;
986 
987 	uint8_t			__reserved__[2];
988 	uint16_t		max_wqe_sz_sq_dc;
989 
990 	uint32_t		max_qp_mcg; /* 25 bits */
991 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
992 
993 	uint8_t			__reserved__[3];
994 	uint8_t			log_max_mcq;
995 
996 	uint8_t			log_max_transport_domain; /* 5 bits */
997 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
998 					0x1f
999 	uint8_t			log_max_pd; /* 5 bits */
1000 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
1001 	uint8_t			__reserved__[1];
1002 	uint8_t			log_max_xrcd; /* 5 bits */
1003 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
1004 
1005 	uint8_t			__reserved__[2];
1006 	uint16_t		max_flow_counter;
1007 
1008 	uint8_t			log_max_rq; /* 5 bits */
1009 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
1010 	uint8_t			log_max_sq; /* 5 bits */
1011 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
1012 	uint8_t			log_max_tir; /* 5 bits */
1013 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
1014 	uint8_t			log_max_tis; /* 5 bits */
1015 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
1016 
1017 	uint8_t 		flags8;
1018 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
1019 					0x80
1020 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
1021 	uint8_t			log_max_rqt; /* 5 bits */
1022 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
1023 	uint8_t			log_max_rqt_size; /* 5 bits */
1024 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
1025 	uint8_t			log_max_tis_per_sq; /* 5 bits */
1026 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
1027 					0x1f
1028 
1029 	uint8_t			flags9;
1030 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
1031 					0x80
1032 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
1033 					0x1f
1034 	uint8_t			log_min_stride_sz_rq; /* 5 bits */
1035 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
1036 					0x1f
1037 	uint8_t			log_max_stride_sz_sq; /* 5 bits */
1038 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
1039 					0x1f
1040 	uint8_t			log_min_stride_sz_sq; /* 5 bits */
1041 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
1042 					0x1f
1043 
1044 	uint8_t			log_max_hairpin_queues;
1045 #define MXC_CAP_DEVICE_HAIRPIN		0x80
1046 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
1047 					0x1f
1048 	uint8_t			log_min_hairpin_queues;
1049 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
1050 					0x1f
1051 	uint8_t			log_max_hairpin_num_packets;
1052 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
1053 					0x1f
1054 	uint8_t			log_max_mq_sz;
1055 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
1056 					0x1f
1057 
1058 	uint8_t			log_min_hairpin_wq_data_sz;
1059 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
1060 					0x80
1061 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
1062 					0x40
1063 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
1064 					0x20
1065 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
1066 					0x1f
1067 	uint8_t			log_max_vlan_list;
1068 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
1069 					0x80
1070 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
1071 					0x1f
1072 	uint8_t			log_max_current_mc_list;
1073 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
1074 					0x1f
1075 	uint8_t			log_max_current_uc_list;
1076 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
1077 					0x1f
1078 
1079 	uint8_t			__reserved__[4];
1080 
1081 	uint32_t		create_qp_start_hint; /* 24 bits */
1082 
1083 	uint8_t			log_max_uctx; /* 5 bits */
1084 #define MXC_CAP_DEVICE_LOG_MAX_UCTX	0x1f
1085 	uint8_t			log_max_umem; /* 5 bits */
1086 #define MXC_CAP_DEVICE_LOG_MAX_UMEM	0x1f
1087 	uint16_t		max_num_eqs;
1088 
1089 	uint8_t			log_max_l2_table; /* 5 bits */
1090 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE	0x1f
1091 	uint8_t			__reserved__[1];
1092 	uint16_t		log_uar_page_sz;
1093 
1094 	uint8_t			__reserved__[8];
1095 
1096 	uint32_t		device_frequency_mhz;
1097 	uint32_t		device_frequency_khz;
1098 } __packed __aligned(8);
1099 
1100 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
1101 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
1102 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
1103 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
1104 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
1105 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
1106 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
1107 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
1108 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
1109 
1110 struct mcx_cmd_set_driver_version_in {
1111 	uint16_t		cmd_opcode;
1112 	uint8_t			cmd_reserved0[4];
1113 	uint16_t		cmd_op_mod;
1114 	uint8_t			cmd_reserved1[8];
1115 } __packed __aligned(4);
1116 
1117 struct mcx_cmd_set_driver_version_out {
1118 	uint8_t			cmd_status;
1119 	uint8_t			cmd_reserved0[3];
1120 	uint32_t		cmd_syndrome;
1121 	uint8_t			cmd_reserved1[8];
1122 } __packed __aligned(4);
1123 
1124 struct mcx_cmd_set_driver_version {
1125 	uint8_t			cmd_driver_version[64];
1126 } __packed __aligned(8);
1127 
1128 struct mcx_cmd_modify_nic_vport_context_in {
1129 	uint16_t		cmd_opcode;
1130 	uint8_t			cmd_reserved0[4];
1131 	uint16_t		cmd_op_mod;
1132 	uint8_t			cmd_reserved1[4];
1133 	uint32_t		cmd_field_select;
1134 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
1135 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
1136 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
1137 } __packed __aligned(4);
1138 
1139 struct mcx_cmd_modify_nic_vport_context_out {
1140 	uint8_t			cmd_status;
1141 	uint8_t			cmd_reserved0[3];
1142 	uint32_t		cmd_syndrome;
1143 	uint8_t			cmd_reserved1[8];
1144 } __packed __aligned(4);
1145 
1146 struct mcx_cmd_query_nic_vport_context_in {
1147 	uint16_t		cmd_opcode;
1148 	uint8_t			cmd_reserved0[4];
1149 	uint16_t		cmd_op_mod;
1150 	uint8_t			cmd_reserved1[4];
1151 	uint8_t			cmd_allowed_list_type;
1152 	uint8_t			cmd_reserved2[3];
1153 } __packed __aligned(4);
1154 
1155 struct mcx_cmd_query_nic_vport_context_out {
1156 	uint8_t			cmd_status;
1157 	uint8_t			cmd_reserved0[3];
1158 	uint32_t		cmd_syndrome;
1159 	uint8_t			cmd_reserved1[8];
1160 } __packed __aligned(4);
1161 
1162 struct mcx_nic_vport_ctx {
1163 	uint32_t		vp_min_wqe_inline_mode;
1164 	uint8_t			vp_reserved0[32];
1165 	uint32_t		vp_mtu;
1166 	uint8_t			vp_reserved1[200];
1167 	uint16_t		vp_flags;
1168 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
1169 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
1170 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
1171 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
1172 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
1173 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
1174 	uint16_t		vp_allowed_list_size;
1175 	uint64_t		vp_perm_addr;
1176 	uint8_t			vp_reserved2[4];
1177 	/* allowed list follows */
1178 } __packed __aligned(4);
1179 
1180 struct mcx_counter {
1181 	uint64_t		packets;
1182 	uint64_t		octets;
1183 } __packed __aligned(4);
1184 
1185 struct mcx_nic_vport_counters {
1186 	struct mcx_counter	rx_err;
1187 	struct mcx_counter	tx_err;
1188 	uint8_t			reserved0[64]; /* 0x30 */
1189 	struct mcx_counter	rx_bcast;
1190 	struct mcx_counter	tx_bcast;
1191 	struct mcx_counter	rx_ucast;
1192 	struct mcx_counter	tx_ucast;
1193 	struct mcx_counter	rx_mcast;
1194 	struct mcx_counter	tx_mcast;
1195 	uint8_t			reserved1[0x210 - 0xd0];
1196 } __packed __aligned(4);
1197 
1198 struct mcx_cmd_query_vport_counters_in {
1199 	uint16_t		cmd_opcode;
1200 	uint8_t			cmd_reserved0[4];
1201 	uint16_t		cmd_op_mod;
1202 	uint8_t			cmd_reserved1[8];
1203 } __packed __aligned(4);
1204 
1205 struct mcx_cmd_query_vport_counters_mb_in {
1206 	uint8_t			cmd_reserved0[8];
1207 	uint8_t			cmd_clear;
1208 	uint8_t			cmd_reserved1[7];
1209 } __packed __aligned(4);
1210 
1211 struct mcx_cmd_query_vport_counters_out {
1212 	uint8_t			cmd_status;
1213 	uint8_t			cmd_reserved0[3];
1214 	uint32_t		cmd_syndrome;
1215 	uint8_t			cmd_reserved1[8];
1216 } __packed __aligned(4);
1217 
1218 struct mcx_cmd_query_flow_counter_in {
1219 	uint16_t		cmd_opcode;
1220 	uint8_t			cmd_reserved0[4];
1221 	uint16_t		cmd_op_mod;
1222 	uint8_t			cmd_reserved1[8];
1223 } __packed __aligned(4);
1224 
1225 struct mcx_cmd_query_flow_counter_mb_in {
1226 	uint8_t			cmd_reserved0[8];
1227 	uint8_t			cmd_clear;
1228 	uint8_t			cmd_reserved1[5];
1229 	uint16_t		cmd_flow_counter_id;
1230 } __packed __aligned(4);
1231 
1232 struct mcx_cmd_query_flow_counter_out {
1233 	uint8_t			cmd_status;
1234 	uint8_t			cmd_reserved0[3];
1235 	uint32_t		cmd_syndrome;
1236 	uint8_t			cmd_reserved1[8];
1237 } __packed __aligned(4);
1238 
1239 struct mcx_cmd_alloc_uar_in {
1240 	uint16_t		cmd_opcode;
1241 	uint8_t			cmd_reserved0[4];
1242 	uint16_t		cmd_op_mod;
1243 	uint8_t			cmd_reserved1[8];
1244 } __packed __aligned(4);
1245 
1246 struct mcx_cmd_alloc_uar_out {
1247 	uint8_t			cmd_status;
1248 	uint8_t			cmd_reserved0[3];
1249 	uint32_t		cmd_syndrome;
1250 	uint32_t		cmd_uar;
1251 	uint8_t			cmd_reserved1[4];
1252 } __packed __aligned(4);
1253 
1254 struct mcx_cmd_query_special_ctx_in {
1255 	uint16_t		cmd_opcode;
1256 	uint8_t			cmd_reserved0[4];
1257 	uint16_t		cmd_op_mod;
1258 	uint8_t			cmd_reserved1[8];
1259 } __packed __aligned(4);
1260 
1261 struct mcx_cmd_query_special_ctx_out {
1262 	uint8_t			cmd_status;
1263 	uint8_t			cmd_reserved0[3];
1264 	uint32_t		cmd_syndrome;
1265 	uint8_t			cmd_reserved1[4];
1266 	uint32_t		cmd_resd_lkey;
1267 } __packed __aligned(4);
1268 
1269 struct mcx_eq_ctx {
1270 	uint32_t		eq_status;
1271 #define MCX_EQ_CTX_STATE_SHIFT		8
1272 #define MCX_EQ_CTX_STATE_MASK		(0xf << MCX_EQ_CTX_STATE_SHIFT)
1273 #define MCX_EQ_CTX_STATE_ARMED		0x9
1274 #define MCX_EQ_CTX_STATE_FIRED		0xa
1275 #define MCX_EQ_CTX_OI_SHIFT		17
1276 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
1277 #define MCX_EQ_CTX_EC_SHIFT		18
1278 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
1279 #define MCX_EQ_CTX_STATUS_SHIFT		28
1280 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
1281 #define MCX_EQ_CTX_STATUS_OK		0x0
1282 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
1283 	uint32_t		eq_reserved1;
1284 	uint32_t		eq_page_offset;
1285 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
1286 	uint32_t		eq_uar_size;
1287 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
1288 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
1289 	uint32_t		eq_reserved2;
1290 	uint8_t			eq_reserved3[3];
1291 	uint8_t			eq_intr;
1292 	uint32_t		eq_log_page_size;
1293 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1294 	uint32_t		eq_reserved4[3];
1295 	uint32_t		eq_consumer_counter;
1296 	uint32_t		eq_producer_counter;
1297 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
1298 	uint32_t		eq_reserved5[4];
1299 } __packed __aligned(4);
1300 
1301 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1302 
1303 struct mcx_cmd_create_eq_in {
1304 	uint16_t		cmd_opcode;
1305 	uint8_t			cmd_reserved0[4];
1306 	uint16_t		cmd_op_mod;
1307 	uint8_t			cmd_reserved1[8];
1308 } __packed __aligned(4);
1309 
1310 struct mcx_cmd_create_eq_mb_in {
1311 	struct mcx_eq_ctx	cmd_eq_ctx;
1312 	uint8_t			cmd_reserved0[8];
1313 	uint64_t		cmd_event_bitmask;
1314 #define MCX_EVENT_TYPE_COMPLETION	0x00
1315 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
1316 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
1317 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
1318 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
1319 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
1320 #define MCX_EVENT_TYPE_LAST_WQE		0x13
1321 	uint8_t			cmd_reserved1[176];
1322 } __packed __aligned(4);
1323 
1324 struct mcx_cmd_create_eq_out {
1325 	uint8_t			cmd_status;
1326 	uint8_t			cmd_reserved0[3];
1327 	uint32_t		cmd_syndrome;
1328 	uint32_t		cmd_eqn;
1329 	uint8_t			cmd_reserved1[4];
1330 } __packed __aligned(4);
1331 
1332 struct mcx_cmd_query_eq_in {
1333 	uint16_t		cmd_opcode;
1334 	uint8_t			cmd_reserved0[4];
1335 	uint16_t		cmd_op_mod;
1336 	uint32_t		cmd_eqn;
1337 	uint8_t			cmd_reserved1[4];
1338 } __packed __aligned(4);
1339 
1340 struct mcx_cmd_query_eq_out {
1341 	uint8_t			cmd_status;
1342 	uint8_t			cmd_reserved0[3];
1343 	uint32_t		cmd_syndrome;
1344 	uint8_t			cmd_reserved1[8];
1345 } __packed __aligned(4);
1346 
1347 struct mcx_eq_entry {
1348 	uint8_t			eq_reserved1;
1349 	uint8_t			eq_event_type;
1350 	uint8_t			eq_reserved2;
1351 	uint8_t			eq_event_sub_type;
1352 
1353 	uint8_t			eq_reserved3[28];
1354 	uint32_t		eq_event_data[7];
1355 	uint8_t			eq_reserved4[2];
1356 	uint8_t			eq_signature;
1357 	uint8_t			eq_owner;
1358 #define MCX_EQ_ENTRY_OWNER_INIT			1
1359 } __packed __aligned(4);
1360 
1361 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1362 
1363 struct mcx_cmd_alloc_pd_in {
1364 	uint16_t		cmd_opcode;
1365 	uint8_t			cmd_reserved0[4];
1366 	uint16_t		cmd_op_mod;
1367 	uint8_t			cmd_reserved1[8];
1368 } __packed __aligned(4);
1369 
1370 struct mcx_cmd_alloc_pd_out {
1371 	uint8_t			cmd_status;
1372 	uint8_t			cmd_reserved0[3];
1373 	uint32_t		cmd_syndrome;
1374 	uint32_t		cmd_pd;
1375 	uint8_t			cmd_reserved1[4];
1376 } __packed __aligned(4);
1377 
1378 struct mcx_cmd_alloc_td_in {
1379 	uint16_t		cmd_opcode;
1380 	uint8_t			cmd_reserved0[4];
1381 	uint16_t		cmd_op_mod;
1382 	uint8_t			cmd_reserved1[8];
1383 } __packed __aligned(4);
1384 
1385 struct mcx_cmd_alloc_td_out {
1386 	uint8_t			cmd_status;
1387 	uint8_t			cmd_reserved0[3];
1388 	uint32_t		cmd_syndrome;
1389 	uint32_t		cmd_tdomain;
1390 	uint8_t			cmd_reserved1[4];
1391 } __packed __aligned(4);
1392 
1393 struct mcx_cmd_create_tir_in {
1394 	uint16_t		cmd_opcode;
1395 	uint8_t			cmd_reserved0[4];
1396 	uint16_t		cmd_op_mod;
1397 	uint8_t			cmd_reserved1[8];
1398 } __packed __aligned(4);
1399 
1400 struct mcx_cmd_create_tir_mb_in {
1401 	uint8_t			cmd_reserved0[20];
1402 	uint32_t		cmd_disp_type;
1403 #define MCX_TIR_CTX_DISP_TYPE_DIRECT	0
1404 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT	1
1405 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
1406 	uint8_t			cmd_reserved1[8];
1407 	uint32_t		cmd_lro;
1408 	uint8_t			cmd_reserved2[8];
1409 	uint32_t		cmd_inline_rqn;
1410 	uint32_t		cmd_indir_table;
1411 	uint32_t		cmd_tdomain;
1412 #define MCX_TIR_CTX_HASH_TOEPLITZ	2
1413 #define MCX_TIR_CTX_HASH_SHIFT		28
1414 	uint8_t			cmd_rx_hash_key[40];
1415 	uint32_t		cmd_rx_hash_sel_outer;
1416 #define MCX_TIR_CTX_HASH_SEL_SRC_IP	(1 << 0)
1417 #define MCX_TIR_CTX_HASH_SEL_DST_IP	(1 << 1)
1418 #define MCX_TIR_CTX_HASH_SEL_SPORT	(1 << 2)
1419 #define MCX_TIR_CTX_HASH_SEL_DPORT	(1 << 3)
1420 #define MCX_TIR_CTX_HASH_SEL_IPV4	(0 << 31)
1421 #define MCX_TIR_CTX_HASH_SEL_IPV6	(1 << 31)
1422 #define MCX_TIR_CTX_HASH_SEL_TCP	(0 << 30)
1423 #define MCX_TIR_CTX_HASH_SEL_UDP	(1 << 30)
1424 	uint32_t		cmd_rx_hash_sel_inner;
1425 	uint8_t			cmd_reserved3[152];
1426 } __packed __aligned(4);
1427 
1428 struct mcx_cmd_create_tir_out {
1429 	uint8_t			cmd_status;
1430 	uint8_t			cmd_reserved0[3];
1431 	uint32_t		cmd_syndrome;
1432 	uint32_t		cmd_tirn;
1433 	uint8_t			cmd_reserved1[4];
1434 } __packed __aligned(4);
1435 
1436 struct mcx_cmd_destroy_tir_in {
1437 	uint16_t		cmd_opcode;
1438 	uint8_t			cmd_reserved0[4];
1439 	uint16_t		cmd_op_mod;
1440 	uint32_t		cmd_tirn;
1441 	uint8_t			cmd_reserved1[4];
1442 } __packed __aligned(4);
1443 
1444 struct mcx_cmd_destroy_tir_out {
1445 	uint8_t			cmd_status;
1446 	uint8_t			cmd_reserved0[3];
1447 	uint32_t		cmd_syndrome;
1448 	uint8_t			cmd_reserved1[8];
1449 } __packed __aligned(4);
1450 
1451 struct mcx_cmd_create_tis_in {
1452 	uint16_t		cmd_opcode;
1453 	uint8_t			cmd_reserved0[4];
1454 	uint16_t		cmd_op_mod;
1455 	uint8_t			cmd_reserved1[8];
1456 } __packed __aligned(4);
1457 
1458 struct mcx_cmd_create_tis_mb_in {
1459 	uint8_t			cmd_reserved[16];
1460 	uint32_t		cmd_prio;
1461 	uint8_t			cmd_reserved1[32];
1462 	uint32_t		cmd_tdomain;
1463 	uint8_t			cmd_reserved2[120];
1464 } __packed __aligned(4);
1465 
1466 struct mcx_cmd_create_tis_out {
1467 	uint8_t			cmd_status;
1468 	uint8_t			cmd_reserved0[3];
1469 	uint32_t		cmd_syndrome;
1470 	uint32_t		cmd_tisn;
1471 	uint8_t			cmd_reserved1[4];
1472 } __packed __aligned(4);
1473 
1474 struct mcx_cmd_destroy_tis_in {
1475 	uint16_t		cmd_opcode;
1476 	uint8_t			cmd_reserved0[4];
1477 	uint16_t		cmd_op_mod;
1478 	uint32_t		cmd_tisn;
1479 	uint8_t			cmd_reserved1[4];
1480 } __packed __aligned(4);
1481 
1482 struct mcx_cmd_destroy_tis_out {
1483 	uint8_t			cmd_status;
1484 	uint8_t			cmd_reserved0[3];
1485 	uint32_t		cmd_syndrome;
1486 	uint8_t			cmd_reserved1[8];
1487 } __packed __aligned(4);
1488 
1489 struct mcx_cmd_create_rqt_in {
1490 	uint16_t		cmd_opcode;
1491 	uint8_t			cmd_reserved0[4];
1492 	uint16_t		cmd_op_mod;
1493 	uint8_t			cmd_reserved1[8];
1494 } __packed __aligned(4);
1495 
1496 struct mcx_rqt_ctx {
1497 	uint8_t			cmd_reserved0[20];
1498 	uint16_t		cmd_reserved1;
1499 	uint16_t		cmd_rqt_max_size;
1500 	uint16_t		cmd_reserved2;
1501 	uint16_t		cmd_rqt_actual_size;
1502 	uint8_t			cmd_reserved3[212];
1503 } __packed __aligned(4);
1504 
1505 struct mcx_cmd_create_rqt_mb_in {
1506 	uint8_t			cmd_reserved0[16];
1507 	struct mcx_rqt_ctx	cmd_rqt;
1508 } __packed __aligned(4);
1509 
1510 struct mcx_cmd_create_rqt_out {
1511 	uint8_t			cmd_status;
1512 	uint8_t			cmd_reserved0[3];
1513 	uint32_t		cmd_syndrome;
1514 	uint32_t		cmd_rqtn;
1515 	uint8_t			cmd_reserved1[4];
1516 } __packed __aligned(4);
1517 
1518 struct mcx_cmd_destroy_rqt_in {
1519 	uint16_t		cmd_opcode;
1520 	uint8_t			cmd_reserved0[4];
1521 	uint16_t		cmd_op_mod;
1522 	uint32_t		cmd_rqtn;
1523 	uint8_t			cmd_reserved1[4];
1524 } __packed __aligned(4);
1525 
1526 struct mcx_cmd_destroy_rqt_out {
1527 	uint8_t			cmd_status;
1528 	uint8_t			cmd_reserved0[3];
1529 	uint32_t		cmd_syndrome;
1530 	uint8_t			cmd_reserved1[8];
1531 } __packed __aligned(4);
1532 
1533 struct mcx_cq_ctx {
1534 	uint32_t		cq_status;
1535 #define MCX_CQ_CTX_STATUS_SHIFT		28
1536 #define MCX_CQ_CTX_STATUS_MASK		(0xf << MCX_CQ_CTX_STATUS_SHIFT)
1537 #define MCX_CQ_CTX_STATUS_OK		0x0
1538 #define MCX_CQ_CTX_STATUS_OVERFLOW	0x9
1539 #define MCX_CQ_CTX_STATUS_WRITE_FAIL	0xa
1540 #define MCX_CQ_CTX_STATE_SHIFT		8
1541 #define MCX_CQ_CTX_STATE_MASK		(0xf << MCX_CQ_CTX_STATE_SHIFT)
1542 #define MCX_CQ_CTX_STATE_SOLICITED	0x6
1543 #define MCX_CQ_CTX_STATE_ARMED		0x9
1544 #define MCX_CQ_CTX_STATE_FIRED		0xa
1545 	uint32_t		cq_reserved1;
1546 	uint32_t		cq_page_offset;
1547 	uint32_t		cq_uar_size;
1548 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
1549 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
1550 	uint32_t		cq_period_max_count;
1551 #define MCX_CQ_CTX_PERIOD_SHIFT		16
1552 	uint32_t		cq_eqn;
1553 	uint32_t		cq_log_page_size;
1554 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1555 	uint32_t		cq_reserved2;
1556 	uint32_t		cq_last_notified;
1557 	uint32_t		cq_last_solicit;
1558 	uint32_t		cq_consumer_counter;
1559 	uint32_t		cq_producer_counter;
1560 	uint8_t			cq_reserved3[8];
1561 	uint64_t		cq_doorbell;
1562 } __packed __aligned(4);
1563 
1564 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1565 
1566 struct mcx_cmd_create_cq_in {
1567 	uint16_t		cmd_opcode;
1568 	uint8_t			cmd_reserved0[4];
1569 	uint16_t		cmd_op_mod;
1570 	uint8_t			cmd_reserved1[8];
1571 } __packed __aligned(4);
1572 
1573 struct mcx_cmd_create_cq_mb_in {
1574 	struct mcx_cq_ctx	cmd_cq_ctx;
1575 	uint8_t			cmd_reserved1[192];
1576 } __packed __aligned(4);
1577 
1578 struct mcx_cmd_create_cq_out {
1579 	uint8_t			cmd_status;
1580 	uint8_t			cmd_reserved0[3];
1581 	uint32_t		cmd_syndrome;
1582 	uint32_t		cmd_cqn;
1583 	uint8_t			cmd_reserved1[4];
1584 } __packed __aligned(4);
1585 
1586 struct mcx_cmd_destroy_cq_in {
1587 	uint16_t		cmd_opcode;
1588 	uint8_t			cmd_reserved0[4];
1589 	uint16_t		cmd_op_mod;
1590 	uint32_t		cmd_cqn;
1591 	uint8_t			cmd_reserved1[4];
1592 } __packed __aligned(4);
1593 
1594 struct mcx_cmd_destroy_cq_out {
1595 	uint8_t			cmd_status;
1596 	uint8_t			cmd_reserved0[3];
1597 	uint32_t		cmd_syndrome;
1598 	uint8_t			cmd_reserved1[8];
1599 } __packed __aligned(4);
1600 
1601 struct mcx_cmd_query_cq_in {
1602 	uint16_t		cmd_opcode;
1603 	uint8_t			cmd_reserved0[4];
1604 	uint16_t		cmd_op_mod;
1605 	uint32_t		cmd_cqn;
1606 	uint8_t			cmd_reserved1[4];
1607 } __packed __aligned(4);
1608 
1609 struct mcx_cmd_query_cq_out {
1610 	uint8_t			cmd_status;
1611 	uint8_t			cmd_reserved0[3];
1612 	uint32_t		cmd_syndrome;
1613 	uint8_t			cmd_reserved1[8];
1614 } __packed __aligned(4);
1615 
1616 struct mcx_cq_entry {
1617 	uint32_t		__reserved__;
1618 	uint32_t		cq_lro;
1619 	uint32_t		cq_lro_ack_seq_num;
1620 	uint32_t		cq_rx_hash;
1621 	uint8_t			cq_rx_hash_type;
1622 	uint8_t			cq_ml_path;
1623 	uint16_t		__reserved__;
1624 	uint32_t		cq_checksum;
1625 	uint32_t		__reserved__;
1626 	uint32_t		cq_flags;
1627 #define MCX_CQ_ENTRY_FLAGS_L4_OK		(1 << 26)
1628 #define MCX_CQ_ENTRY_FLAGS_L3_OK		(1 << 25)
1629 #define MCX_CQ_ENTRY_FLAGS_L2_OK		(1 << 24)
1630 #define MCX_CQ_ENTRY_FLAGS_CV			(1 << 16)
1631 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK		(0xffff)
1632 
1633 	uint32_t		cq_lro_srqn;
1634 	uint32_t		__reserved__[2];
1635 	uint32_t		cq_byte_cnt;
1636 	uint64_t		cq_timestamp;
1637 	uint8_t			cq_rx_drops;
1638 	uint8_t			cq_flow_tag[3];
1639 	uint16_t		cq_wqe_count;
1640 	uint8_t			cq_signature;
1641 	uint8_t			cq_opcode_owner;
1642 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
1643 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
1644 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
1645 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
1646 
1647 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
1648 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
1649 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
1650 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
1651 
1652 #define MCX_CQ_ENTRY_OPCODE_REQ			0
1653 #define MCX_CQ_ENTRY_OPCODE_SEND		2
1654 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
1655 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
1656 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
1657 
1658 } __packed __aligned(4);
1659 
1660 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1661 
1662 struct mcx_cq_doorbell {
1663 	uint32_t		 db_update_ci;
1664 	uint32_t		 db_arm_ci;
1665 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
1666 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
1667 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
1668 } __packed __aligned(8);
1669 
1670 struct mcx_wq_ctx {
1671 	uint8_t			 wq_type;
1672 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
1673 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
1674 	uint8_t			 wq_reserved0[5];
1675 	uint16_t		 wq_lwm;
1676 	uint32_t		 wq_pd;
1677 	uint32_t		 wq_uar_page;
1678 	uint64_t		 wq_doorbell;
1679 	uint32_t		 wq_hw_counter;
1680 	uint32_t		 wq_sw_counter;
1681 	uint16_t		 wq_log_stride;
1682 	uint8_t			 wq_log_page_sz;
1683 	uint8_t			 wq_log_size;
1684 	uint8_t			 wq_reserved1[156];
1685 } __packed __aligned(4);
1686 
1687 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1688 
1689 struct mcx_sq_ctx {
1690 	uint32_t		sq_flags;
1691 #define MCX_SQ_CTX_RLKEY			(1U << 31)
1692 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
1693 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
1694 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
1695 #define MCX_SQ_CTX_STATE_SHIFT			20
1696 #define MCX_SQ_CTX_STATE_MASK			(0xf << 20)
1697 #define MCX_SQ_CTX_STATE_RST			0
1698 #define MCX_SQ_CTX_STATE_RDY			1
1699 #define MCX_SQ_CTX_STATE_ERR			3
1700 	uint32_t		sq_user_index;
1701 	uint32_t		sq_cqn;
1702 	uint32_t		sq_reserved1[5];
1703 	uint32_t		sq_tis_lst_sz;
1704 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
1705 	uint32_t		sq_reserved2[2];
1706 	uint32_t		sq_tis_num;
1707 	struct mcx_wq_ctx	sq_wq;
1708 } __packed __aligned(4);
1709 
1710 struct mcx_sq_entry_seg {
1711 	uint32_t		sqs_byte_count;
1712 	uint32_t		sqs_lkey;
1713 	uint64_t		sqs_addr;
1714 } __packed __aligned(4);
1715 
1716 struct mcx_sq_entry {
1717 	/* control segment */
1718 	uint32_t		sqe_opcode_index;
1719 #define MCX_SQE_WQE_INDEX_SHIFT			8
1720 #define MCX_SQE_WQE_OPCODE_NOP			0x00
1721 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
1722 	uint32_t		sqe_ds_sq_num;
1723 #define MCX_SQE_SQ_NUM_SHIFT			8
1724 	uint32_t		sqe_signature;
1725 #define MCX_SQE_SIGNATURE_SHIFT			24
1726 #define MCX_SQE_SOLICITED_EVENT			0x02
1727 #define MCX_SQE_CE_CQE_ON_ERR			0x00
1728 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
1729 #define MCX_SQE_CE_CQE_ALWAYS			0x08
1730 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
1731 #define MCX_SQE_FM_NO_FENCE			0x00
1732 #define MCX_SQE_FM_SMALL_FENCE			0x40
1733 	uint32_t		sqe_mkey;
1734 
1735 	/* ethernet segment */
1736 	uint32_t		sqe_reserved1;
1737 	uint32_t		sqe_mss_csum;
1738 #define MCX_SQE_L4_CSUM				(1 << 31)
1739 #define MCX_SQE_L3_CSUM				(1 << 30)
1740 	uint32_t		sqe_reserved2;
1741 	uint16_t		sqe_inline_header_size;
1742 	uint16_t		sqe_inline_headers[9];
1743 
1744 	/* data segment */
1745 	struct mcx_sq_entry_seg sqe_segs[1];
1746 } __packed __aligned(64);
1747 
1748 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1749 
1750 struct mcx_cmd_create_sq_in {
1751 	uint16_t		cmd_opcode;
1752 	uint8_t			cmd_reserved0[4];
1753 	uint16_t		cmd_op_mod;
1754 	uint8_t			cmd_reserved1[8];
1755 } __packed __aligned(4);
1756 
1757 struct mcx_cmd_create_sq_out {
1758 	uint8_t			cmd_status;
1759 	uint8_t			cmd_reserved0[3];
1760 	uint32_t		cmd_syndrome;
1761 	uint32_t		cmd_sqn;
1762 	uint8_t			cmd_reserved1[4];
1763 } __packed __aligned(4);
1764 
1765 struct mcx_cmd_modify_sq_in {
1766 	uint16_t		cmd_opcode;
1767 	uint8_t			cmd_reserved0[4];
1768 	uint16_t		cmd_op_mod;
1769 	uint32_t		cmd_sq_state;
1770 	uint8_t			cmd_reserved1[4];
1771 } __packed __aligned(4);
1772 
1773 struct mcx_cmd_modify_sq_mb_in {
1774 	uint32_t		cmd_modify_hi;
1775 	uint32_t		cmd_modify_lo;
1776 	uint8_t			cmd_reserved0[8];
1777 	struct mcx_sq_ctx	cmd_sq_ctx;
1778 } __packed __aligned(4);
1779 
1780 struct mcx_cmd_modify_sq_out {
1781 	uint8_t			cmd_status;
1782 	uint8_t			cmd_reserved0[3];
1783 	uint32_t		cmd_syndrome;
1784 	uint8_t			cmd_reserved1[8];
1785 } __packed __aligned(4);
1786 
1787 struct mcx_cmd_destroy_sq_in {
1788 	uint16_t		cmd_opcode;
1789 	uint8_t			cmd_reserved0[4];
1790 	uint16_t		cmd_op_mod;
1791 	uint32_t		cmd_sqn;
1792 	uint8_t			cmd_reserved1[4];
1793 } __packed __aligned(4);
1794 
1795 struct mcx_cmd_destroy_sq_out {
1796 	uint8_t			cmd_status;
1797 	uint8_t			cmd_reserved0[3];
1798 	uint32_t		cmd_syndrome;
1799 	uint8_t			cmd_reserved1[8];
1800 } __packed __aligned(4);
1801 
1802 
1803 struct mcx_rq_ctx {
1804 	uint32_t		rq_flags;
1805 #define MCX_RQ_CTX_RLKEY			(1U << 31)
1806 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
1807 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
1808 #define MCX_RQ_CTX_STATE_SHIFT			20
1809 #define MCX_RQ_CTX_STATE_MASK			(0xf << 20)
1810 #define MCX_RQ_CTX_STATE_RST			0
1811 #define MCX_RQ_CTX_STATE_RDY			1
1812 #define MCX_RQ_CTX_STATE_ERR			3
1813 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
1814 	uint32_t		rq_user_index;
1815 	uint32_t		rq_cqn;
1816 	uint32_t		rq_reserved1;
1817 	uint32_t		rq_rmpn;
1818 	uint32_t		rq_reserved2[7];
1819 	struct mcx_wq_ctx	rq_wq;
1820 } __packed __aligned(4);
1821 
1822 struct mcx_rq_entry {
1823 	uint32_t		rqe_byte_count;
1824 	uint32_t		rqe_lkey;
1825 	uint64_t		rqe_addr;
1826 } __packed __aligned(16);
1827 
1828 struct mcx_cmd_create_rq_in {
1829 	uint16_t		cmd_opcode;
1830 	uint8_t			cmd_reserved0[4];
1831 	uint16_t		cmd_op_mod;
1832 	uint8_t			cmd_reserved1[8];
1833 } __packed __aligned(4);
1834 
1835 struct mcx_cmd_create_rq_out {
1836 	uint8_t			cmd_status;
1837 	uint8_t			cmd_reserved0[3];
1838 	uint32_t		cmd_syndrome;
1839 	uint32_t		cmd_rqn;
1840 	uint8_t			cmd_reserved1[4];
1841 } __packed __aligned(4);
1842 
1843 struct mcx_cmd_modify_rq_in {
1844 	uint16_t		cmd_opcode;
1845 	uint8_t			cmd_reserved0[4];
1846 	uint16_t		cmd_op_mod;
1847 	uint32_t		cmd_rq_state;
1848 	uint8_t			cmd_reserved1[4];
1849 } __packed __aligned(4);
1850 
1851 struct mcx_cmd_modify_rq_mb_in {
1852 	uint32_t		cmd_modify_hi;
1853 	uint32_t		cmd_modify_lo;
1854 	uint8_t			cmd_reserved0[8];
1855 	struct mcx_rq_ctx	cmd_rq_ctx;
1856 } __packed __aligned(4);
1857 
1858 struct mcx_cmd_modify_rq_out {
1859 	uint8_t			cmd_status;
1860 	uint8_t			cmd_reserved0[3];
1861 	uint32_t		cmd_syndrome;
1862 	uint8_t			cmd_reserved1[8];
1863 } __packed __aligned(4);
1864 
1865 struct mcx_cmd_destroy_rq_in {
1866 	uint16_t		cmd_opcode;
1867 	uint8_t			cmd_reserved0[4];
1868 	uint16_t		cmd_op_mod;
1869 	uint32_t		cmd_rqn;
1870 	uint8_t			cmd_reserved1[4];
1871 } __packed __aligned(4);
1872 
1873 struct mcx_cmd_destroy_rq_out {
1874 	uint8_t			cmd_status;
1875 	uint8_t			cmd_reserved0[3];
1876 	uint32_t		cmd_syndrome;
1877 	uint8_t			cmd_reserved1[8];
1878 } __packed __aligned(4);
1879 
1880 struct mcx_cmd_create_flow_table_in {
1881 	uint16_t		cmd_opcode;
1882 	uint8_t			cmd_reserved0[4];
1883 	uint16_t		cmd_op_mod;
1884 	uint8_t			cmd_reserved1[8];
1885 } __packed __aligned(4);
1886 
1887 struct mcx_flow_table_ctx {
1888 	uint8_t			ft_miss_action;
1889 	uint8_t			ft_level;
1890 	uint8_t			ft_reserved0;
1891 	uint8_t			ft_log_size;
1892 	uint32_t		ft_table_miss_id;
1893 	uint8_t			ft_reserved1[28];
1894 } __packed __aligned(4);
1895 
1896 struct mcx_cmd_create_flow_table_mb_in {
1897 	uint8_t			cmd_table_type;
1898 	uint8_t			cmd_reserved0[7];
1899 	struct mcx_flow_table_ctx cmd_ctx;
1900 } __packed __aligned(4);
1901 
1902 struct mcx_cmd_create_flow_table_out {
1903 	uint8_t			cmd_status;
1904 	uint8_t			cmd_reserved0[3];
1905 	uint32_t		cmd_syndrome;
1906 	uint32_t		cmd_table_id;
1907 	uint8_t			cmd_reserved1[4];
1908 } __packed __aligned(4);
1909 
1910 struct mcx_cmd_destroy_flow_table_in {
1911 	uint16_t		cmd_opcode;
1912 	uint8_t			cmd_reserved0[4];
1913 	uint16_t		cmd_op_mod;
1914 	uint8_t			cmd_reserved1[8];
1915 } __packed __aligned(4);
1916 
1917 struct mcx_cmd_destroy_flow_table_mb_in {
1918 	uint8_t			cmd_table_type;
1919 	uint8_t			cmd_reserved0[3];
1920 	uint32_t		cmd_table_id;
1921 	uint8_t			cmd_reserved1[40];
1922 } __packed __aligned(4);
1923 
1924 struct mcx_cmd_destroy_flow_table_out {
1925 	uint8_t			cmd_status;
1926 	uint8_t			cmd_reserved0[3];
1927 	uint32_t		cmd_syndrome;
1928 	uint8_t			cmd_reserved1[8];
1929 } __packed __aligned(4);
1930 
1931 struct mcx_cmd_set_flow_table_root_in {
1932 	uint16_t		cmd_opcode;
1933 	uint8_t			cmd_reserved0[4];
1934 	uint16_t		cmd_op_mod;
1935 	uint8_t			cmd_reserved1[8];
1936 } __packed __aligned(4);
1937 
1938 struct mcx_cmd_set_flow_table_root_mb_in {
1939 	uint8_t			cmd_table_type;
1940 	uint8_t			cmd_reserved0[3];
1941 	uint32_t		cmd_table_id;
1942 	uint8_t			cmd_reserved1[56];
1943 } __packed __aligned(4);
1944 
1945 struct mcx_cmd_set_flow_table_root_out {
1946 	uint8_t			cmd_status;
1947 	uint8_t			cmd_reserved0[3];
1948 	uint32_t		cmd_syndrome;
1949 	uint8_t			cmd_reserved1[8];
1950 } __packed __aligned(4);
1951 
1952 struct mcx_flow_match {
1953 	/* outer headers */
1954 	uint8_t			mc_src_mac[6];
1955 	uint16_t		mc_ethertype;
1956 	uint8_t			mc_dest_mac[6];
1957 	uint16_t		mc_first_vlan;
1958 	uint8_t			mc_ip_proto;
1959 	uint8_t			mc_ip_dscp_ecn;
1960 	uint8_t			mc_vlan_flags;
1961 #define MCX_FLOW_MATCH_IP_FRAG	(1 << 5)
1962 	uint8_t			mc_tcp_flags;
1963 	uint16_t		mc_tcp_sport;
1964 	uint16_t		mc_tcp_dport;
1965 	uint32_t		mc_reserved0;
1966 	uint16_t		mc_udp_sport;
1967 	uint16_t		mc_udp_dport;
1968 	uint8_t			mc_src_ip[16];
1969 	uint8_t			mc_dest_ip[16];
1970 
1971 	/* misc parameters */
1972 	uint8_t			mc_reserved1[8];
1973 	uint16_t		mc_second_vlan;
1974 	uint8_t			mc_reserved2[2];
1975 	uint8_t			mc_second_vlan_flags;
1976 	uint8_t			mc_reserved3[15];
1977 	uint32_t		mc_outer_ipv6_flow_label;
1978 	uint8_t			mc_reserved4[32];
1979 
1980 	uint8_t			mc_reserved[384];
1981 } __packed __aligned(4);
1982 
1983 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1984 
1985 struct mcx_cmd_create_flow_group_in {
1986 	uint16_t		cmd_opcode;
1987 	uint8_t			cmd_reserved0[4];
1988 	uint16_t		cmd_op_mod;
1989 	uint8_t			cmd_reserved1[8];
1990 } __packed __aligned(4);
1991 
1992 struct mcx_cmd_create_flow_group_mb_in {
1993 	uint8_t			cmd_table_type;
1994 	uint8_t			cmd_reserved0[3];
1995 	uint32_t		cmd_table_id;
1996 	uint8_t			cmd_reserved1[4];
1997 	uint32_t		cmd_start_flow_index;
1998 	uint8_t			cmd_reserved2[4];
1999 	uint32_t		cmd_end_flow_index;
2000 	uint8_t			cmd_reserved3[23];
2001 	uint8_t			cmd_match_criteria_enable;
2002 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
2003 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
2004 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
2005 	struct mcx_flow_match	cmd_match_criteria;
2006 	uint8_t			cmd_reserved4[448];
2007 } __packed __aligned(4);
2008 
2009 struct mcx_cmd_create_flow_group_out {
2010 	uint8_t			cmd_status;
2011 	uint8_t			cmd_reserved0[3];
2012 	uint32_t		cmd_syndrome;
2013 	uint32_t		cmd_group_id;
2014 	uint8_t			cmd_reserved1[4];
2015 } __packed __aligned(4);
2016 
2017 struct mcx_flow_ctx {
2018 	uint8_t			fc_reserved0[4];
2019 	uint32_t		fc_group_id;
2020 	uint32_t		fc_flow_tag;
2021 	uint32_t		fc_action;
2022 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
2023 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
2024 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
2025 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
2026 	uint32_t		fc_dest_list_size;
2027 	uint32_t		fc_counter_list_size;
2028 	uint8_t			fc_reserved1[40];
2029 	struct mcx_flow_match	fc_match_value;
2030 	uint8_t			fc_reserved2[192];
2031 } __packed __aligned(4);
2032 
2033 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
2034 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
2035 
2036 struct mcx_cmd_destroy_flow_group_in {
2037 	uint16_t		cmd_opcode;
2038 	uint8_t			cmd_reserved0[4];
2039 	uint16_t		cmd_op_mod;
2040 	uint8_t			cmd_reserved1[8];
2041 } __packed __aligned(4);
2042 
2043 struct mcx_cmd_destroy_flow_group_mb_in {
2044 	uint8_t			cmd_table_type;
2045 	uint8_t			cmd_reserved0[3];
2046 	uint32_t		cmd_table_id;
2047 	uint32_t		cmd_group_id;
2048 	uint8_t			cmd_reserved1[36];
2049 } __packed __aligned(4);
2050 
2051 struct mcx_cmd_destroy_flow_group_out {
2052 	uint8_t			cmd_status;
2053 	uint8_t			cmd_reserved0[3];
2054 	uint32_t		cmd_syndrome;
2055 	uint8_t			cmd_reserved1[8];
2056 } __packed __aligned(4);
2057 
2058 struct mcx_cmd_set_flow_table_entry_in {
2059 	uint16_t		cmd_opcode;
2060 	uint8_t			cmd_reserved0[4];
2061 	uint16_t		cmd_op_mod;
2062 	uint8_t			cmd_reserved1[8];
2063 } __packed __aligned(4);
2064 
2065 struct mcx_cmd_set_flow_table_entry_mb_in {
2066 	uint8_t			cmd_table_type;
2067 	uint8_t			cmd_reserved0[3];
2068 	uint32_t		cmd_table_id;
2069 	uint32_t		cmd_modify_enable_mask;
2070 	uint8_t			cmd_reserved1[4];
2071 	uint32_t		cmd_flow_index;
2072 	uint8_t			cmd_reserved2[28];
2073 	struct mcx_flow_ctx	cmd_flow_ctx;
2074 } __packed __aligned(4);
2075 
2076 struct mcx_cmd_set_flow_table_entry_out {
2077 	uint8_t			cmd_status;
2078 	uint8_t			cmd_reserved0[3];
2079 	uint32_t		cmd_syndrome;
2080 	uint8_t			cmd_reserved1[8];
2081 } __packed __aligned(4);
2082 
2083 struct mcx_cmd_query_flow_table_entry_in {
2084 	uint16_t		cmd_opcode;
2085 	uint8_t			cmd_reserved0[4];
2086 	uint16_t		cmd_op_mod;
2087 	uint8_t			cmd_reserved1[8];
2088 } __packed __aligned(4);
2089 
2090 struct mcx_cmd_query_flow_table_entry_mb_in {
2091 	uint8_t			cmd_table_type;
2092 	uint8_t			cmd_reserved0[3];
2093 	uint32_t		cmd_table_id;
2094 	uint8_t			cmd_reserved1[8];
2095 	uint32_t		cmd_flow_index;
2096 	uint8_t			cmd_reserved2[28];
2097 } __packed __aligned(4);
2098 
2099 struct mcx_cmd_query_flow_table_entry_out {
2100 	uint8_t			cmd_status;
2101 	uint8_t			cmd_reserved0[3];
2102 	uint32_t		cmd_syndrome;
2103 	uint8_t			cmd_reserved1[8];
2104 } __packed __aligned(4);
2105 
2106 struct mcx_cmd_query_flow_table_entry_mb_out {
2107 	uint8_t			cmd_reserved0[48];
2108 	struct mcx_flow_ctx	cmd_flow_ctx;
2109 } __packed __aligned(4);
2110 
2111 struct mcx_cmd_delete_flow_table_entry_in {
2112 	uint16_t		cmd_opcode;
2113 	uint8_t			cmd_reserved0[4];
2114 	uint16_t		cmd_op_mod;
2115 	uint8_t			cmd_reserved1[8];
2116 } __packed __aligned(4);
2117 
2118 struct mcx_cmd_delete_flow_table_entry_mb_in {
2119 	uint8_t			cmd_table_type;
2120 	uint8_t			cmd_reserved0[3];
2121 	uint32_t		cmd_table_id;
2122 	uint8_t			cmd_reserved1[8];
2123 	uint32_t		cmd_flow_index;
2124 	uint8_t			cmd_reserved2[28];
2125 } __packed __aligned(4);
2126 
2127 struct mcx_cmd_delete_flow_table_entry_out {
2128 	uint8_t			cmd_status;
2129 	uint8_t			cmd_reserved0[3];
2130 	uint32_t		cmd_syndrome;
2131 	uint8_t			cmd_reserved1[8];
2132 } __packed __aligned(4);
2133 
2134 struct mcx_cmd_query_flow_group_in {
2135 	uint16_t		cmd_opcode;
2136 	uint8_t			cmd_reserved0[4];
2137 	uint16_t		cmd_op_mod;
2138 	uint8_t			cmd_reserved1[8];
2139 } __packed __aligned(4);
2140 
2141 struct mcx_cmd_query_flow_group_mb_in {
2142 	uint8_t			cmd_table_type;
2143 	uint8_t			cmd_reserved0[3];
2144 	uint32_t		cmd_table_id;
2145 	uint32_t		cmd_group_id;
2146 	uint8_t			cmd_reserved1[36];
2147 } __packed __aligned(4);
2148 
2149 struct mcx_cmd_query_flow_group_out {
2150 	uint8_t			cmd_status;
2151 	uint8_t			cmd_reserved0[3];
2152 	uint32_t		cmd_syndrome;
2153 	uint8_t			cmd_reserved1[8];
2154 } __packed __aligned(4);
2155 
2156 struct mcx_cmd_query_flow_group_mb_out {
2157 	uint8_t			cmd_reserved0[12];
2158 	uint32_t		cmd_start_flow_index;
2159 	uint8_t			cmd_reserved1[4];
2160 	uint32_t		cmd_end_flow_index;
2161 	uint8_t			cmd_reserved2[20];
2162 	uint32_t		cmd_match_criteria_enable;
2163 	uint8_t			cmd_match_criteria[512];
2164 	uint8_t			cmd_reserved4[448];
2165 } __packed __aligned(4);
2166 
2167 struct mcx_cmd_query_flow_table_in {
2168 	uint16_t		cmd_opcode;
2169 	uint8_t			cmd_reserved0[4];
2170 	uint16_t		cmd_op_mod;
2171 	uint8_t			cmd_reserved1[8];
2172 } __packed __aligned(4);
2173 
2174 struct mcx_cmd_query_flow_table_mb_in {
2175 	uint8_t			cmd_table_type;
2176 	uint8_t			cmd_reserved0[3];
2177 	uint32_t		cmd_table_id;
2178 	uint8_t			cmd_reserved1[40];
2179 } __packed __aligned(4);
2180 
2181 struct mcx_cmd_query_flow_table_out {
2182 	uint8_t			cmd_status;
2183 	uint8_t			cmd_reserved0[3];
2184 	uint32_t		cmd_syndrome;
2185 	uint8_t			cmd_reserved1[8];
2186 } __packed __aligned(4);
2187 
2188 struct mcx_cmd_query_flow_table_mb_out {
2189 	uint8_t			cmd_reserved0[4];
2190 	struct mcx_flow_table_ctx cmd_ctx;
2191 } __packed __aligned(4);
2192 
2193 struct mcx_cmd_alloc_flow_counter_in {
2194 	uint16_t		cmd_opcode;
2195 	uint8_t			cmd_reserved0[4];
2196 	uint16_t		cmd_op_mod;
2197 	uint8_t			cmd_reserved1[8];
2198 } __packed __aligned(4);
2199 
2200 struct mcx_cmd_query_rq_in {
2201 	uint16_t		cmd_opcode;
2202 	uint8_t			cmd_reserved0[4];
2203 	uint16_t		cmd_op_mod;
2204 	uint32_t		cmd_rqn;
2205 	uint8_t			cmd_reserved1[4];
2206 } __packed __aligned(4);
2207 
2208 struct mcx_cmd_query_rq_out {
2209 	uint8_t			cmd_status;
2210 	uint8_t			cmd_reserved0[3];
2211 	uint32_t		cmd_syndrome;
2212 	uint8_t			cmd_reserved1[8];
2213 } __packed __aligned(4);
2214 
2215 struct mcx_cmd_query_rq_mb_out {
2216 	uint8_t			cmd_reserved0[16];
2217 	struct mcx_rq_ctx	cmd_ctx;
2218 };
2219 
2220 struct mcx_cmd_query_sq_in {
2221 	uint16_t		cmd_opcode;
2222 	uint8_t			cmd_reserved0[4];
2223 	uint16_t		cmd_op_mod;
2224 	uint32_t		cmd_sqn;
2225 	uint8_t			cmd_reserved1[4];
2226 } __packed __aligned(4);
2227 
2228 struct mcx_cmd_query_sq_out {
2229 	uint8_t			cmd_status;
2230 	uint8_t			cmd_reserved0[3];
2231 	uint32_t		cmd_syndrome;
2232 	uint8_t			cmd_reserved1[8];
2233 } __packed __aligned(4);
2234 
2235 struct mcx_cmd_query_sq_mb_out {
2236 	uint8_t			cmd_reserved0[16];
2237 	struct mcx_sq_ctx	cmd_ctx;
2238 };
2239 
2240 struct mcx_cmd_alloc_flow_counter_out {
2241 	uint8_t			cmd_status;
2242 	uint8_t			cmd_reserved0[3];
2243 	uint32_t		cmd_syndrome;
2244 	uint8_t			cmd_reserved1[2];
2245 	uint16_t		cmd_flow_counter_id;
2246 	uint8_t			cmd_reserved2[4];
2247 } __packed __aligned(4);
2248 
2249 struct mcx_wq_doorbell {
2250 	uint32_t		 db_recv_counter;
2251 	uint32_t		 db_send_counter;
2252 } __packed __aligned(8);
2253 
2254 struct mcx_dmamem {
2255 	bus_dmamap_t		 mxm_map;
2256 	bus_dma_segment_t	 mxm_seg;
2257 	int			 mxm_nsegs;
2258 	size_t			 mxm_size;
2259 	void			*mxm_kva;
2260 };
2261 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
2262 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
2263 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
2264 #define MCX_DMA_OFF(_mxm, _off)	((void *)((char *)(_mxm)->mxm_kva + (_off)))
2265 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
2266 
2267 struct mcx_hwmem {
2268 	bus_dmamap_t		 mhm_map;
2269 	bus_dma_segment_t	*mhm_segs;
2270 	unsigned int		 mhm_seg_count;
2271 	unsigned int		 mhm_npages;
2272 };
2273 
2274 struct mcx_slot {
2275 	bus_dmamap_t		 ms_map;
2276 	struct mbuf		*ms_m;
2277 };
2278 
2279 struct mcx_eq {
2280 	int			 eq_n;
2281 	uint32_t		 eq_cons;
2282 	struct mcx_dmamem	 eq_mem;
2283 };
2284 
2285 struct mcx_cq {
2286 	int			 cq_n;
2287 	struct mcx_dmamem	 cq_mem;
2288 	bus_addr_t		 cq_doorbell;
2289 	uint32_t		 cq_cons;
2290 	uint32_t		 cq_count;
2291 };
2292 
2293 struct mcx_calibration {
2294 	uint64_t		 c_timestamp;	/* previous mcx chip time */
2295 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
2296 	uint64_t		 c_tbase;	/* mcx chip time */
2297 	uint64_t		 c_ubase;	/* kernel nanouptime */
2298 	uint64_t		 c_ratio;
2299 };
2300 
2301 #define MCX_CALIBRATE_FIRST    2
2302 #define MCX_CALIBRATE_NORMAL   32
2303 
2304 struct mcx_rxring {
2305 	u_int			 rxr_total;
2306 	u_int			 rxr_inuse;
2307 };
2308 
2309 MBUFQ_HEAD(mcx_mbufq);
2310 
2311 struct mcx_rx {
2312 	struct mcx_softc	*rx_softc;
2313 
2314 	int			 rx_rqn;
2315 	struct mcx_dmamem	 rx_rq_mem;
2316 	struct mcx_slot		*rx_slots;
2317 	bus_addr_t		 rx_doorbell;
2318 
2319 	uint32_t		 rx_prod;
2320 	callout_t		 rx_refill;
2321 	struct mcx_rxring	 rx_rxr;
2322 } __aligned(64);
2323 
2324 struct mcx_tx {
2325 	struct mcx_softc	*tx_softc;
2326 	kmutex_t		 tx_lock;
2327 	pcq_t			*tx_pcq;
2328 	void			*tx_softint;
2329 
2330 	int			 tx_uar;
2331 	int			 tx_sqn;
2332 	struct mcx_dmamem	 tx_sq_mem;
2333 	struct mcx_slot		*tx_slots;
2334 	bus_addr_t		 tx_doorbell;
2335 	int			 tx_bf_offset;
2336 
2337 	uint32_t		 tx_cons;
2338 	uint32_t		 tx_prod;
2339 } __aligned(64);
2340 
2341 struct mcx_queues {
2342 	void			*q_ihc;
2343 	struct mcx_softc	*q_sc;
2344 	int			 q_uar;
2345 	int			 q_index;
2346 	struct mcx_rx		 q_rx;
2347 	struct mcx_tx		 q_tx;
2348 	struct mcx_cq		 q_cq;
2349 	struct mcx_eq		 q_eq;
2350 #if NKSTAT > 0
2351 	struct kstat		*q_kstat;
2352 #endif
2353 };
2354 
2355 struct mcx_flow_group {
2356 	int			 g_id;
2357 	int			 g_table;
2358 	int			 g_start;
2359 	int			 g_size;
2360 };
2361 
2362 #define MCX_FLOW_GROUP_PROMISC		0
2363 #define MCX_FLOW_GROUP_ALLMULTI		1
2364 #define MCX_FLOW_GROUP_MAC		2
2365 #define MCX_FLOW_GROUP_RSS_L4		3
2366 #define MCX_FLOW_GROUP_RSS_L3		4
2367 #define MCX_FLOW_GROUP_RSS_NONE		5
2368 #define	MCX_NUM_FLOW_GROUPS		6
2369 
2370 #define MCX_HASH_SEL_L3		MCX_TIR_CTX_HASH_SEL_SRC_IP | \
2371 				MCX_TIR_CTX_HASH_SEL_DST_IP
2372 #define MCX_HASH_SEL_L4		MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
2373 				MCX_TIR_CTX_HASH_SEL_DPORT
2374 
2375 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP  |\
2376 				MCX_TIR_CTX_HASH_SEL_IPV4
2377 #define MCX_RSS_HASH_SEL_V6_TCP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
2378 				MCX_TIR_CTX_HASH_SEL_IPV6
2379 #define MCX_RSS_HASH_SEL_V4_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2380 				MCX_TIR_CTX_HASH_SEL_IPV4
2381 #define MCX_RSS_HASH_SEL_V6_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2382 				MCX_TIR_CTX_HASH_SEL_IPV6
2383 #define MCX_RSS_HASH_SEL_V4	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
2384 #define MCX_RSS_HASH_SEL_V6	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
2385 
2386 /*
2387  * There are a few different pieces involved in configuring RSS.
2388  * A Receive Queue Table (RQT) is the indirection table that maps packets to
2389  * different rx queues based on a hash value.  We only create one, because
2390  * we want to scatter any traffic we can apply RSS to across all our rx
2391  * queues.  Anything else will only be delivered to the first rx queue,
2392  * which doesn't require an RQT.
2393  *
2394  * A Transport Interface Receive (TIR) delivers packets to either a single rx
2395  * queue or an RQT, and in the latter case, specifies the set of fields
2396  * hashed, the hash function, and the hash key.  We need one of these for each
2397  * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
2398  * and one for non-RSS traffic.
2399  *
2400  * Flow tables hold flow table entries in sequence.  The first entry that
2401  * matches a packet is applied, sending the packet to either another flow
2402  * table or a TIR.  We use one flow table to select packets based on
2403  * destination MAC address, and a second to apply RSS.  The entries in the
2404  * first table send matching packets to the second, and the entries in the
2405  * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
2406  *
2407  * The flow table entry that delivers packets to an RSS TIR must include match
2408  * criteria that ensure packets delivered to the TIR include all the fields
2409  * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
2410  * only accept v4 TCP packets.  Accordingly, we need flow table entries for
2411  * each TIR.
2412  *
2413  * All of this is a lot more flexible than we need, and we can describe most
2414  * of the stuff we need with a simple array.
2415  *
2416  * An RSS config creates a TIR with hashing enabled on a set of fields,
2417  * pointing to either the first rx queue or the RQT containing all the rx
2418  * queues, and a flow table entry that matches on an ether type and
2419  * optionally an ip proto, that delivers packets to the TIR.
2420  */
2421 static struct mcx_rss_rule {
2422 	int			hash_sel;
2423 	int			flow_group;
2424 	int			ethertype;
2425 	int			ip_proto;
2426 } mcx_rss_config[] = {
2427 	/* udp and tcp for v4/v6 */
2428 	{ MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
2429 	  ETHERTYPE_IP, IPPROTO_TCP },
2430 	{ MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
2431 	  ETHERTYPE_IPV6, IPPROTO_TCP },
2432 	{ MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
2433 	  ETHERTYPE_IP, IPPROTO_UDP },
2434 	{ MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
2435 	  ETHERTYPE_IPV6, IPPROTO_UDP },
2436 
2437 	/* other v4/v6 */
2438 	{ MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
2439 	  ETHERTYPE_IP, 0 },
2440 	{ MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
2441 	  ETHERTYPE_IPV6, 0 },
2442 
2443 	/* non v4/v6 */
2444 	{ 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
2445 };
2446 
2447 struct mcx_softc {
2448 	device_t		 sc_dev;
2449 	struct ethercom		 sc_ec;
2450 	struct ifmedia		 sc_media;
2451 	uint64_t		 sc_media_status;
2452 	uint64_t		 sc_media_active;
2453 	kmutex_t		 sc_media_mutex;
2454 
2455 	pci_chipset_tag_t	 sc_pc;
2456 	pci_intr_handle_t	*sc_intrs;
2457 	void			*sc_ihc;
2458 	pcitag_t		 sc_tag;
2459 
2460 	bus_dma_tag_t		 sc_dmat;
2461 	bus_space_tag_t		 sc_memt;
2462 	bus_space_handle_t	 sc_memh;
2463 	bus_size_t		 sc_mems;
2464 
2465 	struct mcx_dmamem	 sc_cmdq_mem;
2466 	unsigned int		 sc_cmdq_mask;
2467 	unsigned int		 sc_cmdq_size;
2468 
2469 	unsigned int		 sc_cmdq_token;
2470 
2471 	struct mcx_hwmem	 sc_boot_pages;
2472 	struct mcx_hwmem	 sc_init_pages;
2473 	struct mcx_hwmem	 sc_regular_pages;
2474 
2475 	int			 sc_uar;
2476 	int			 sc_pd;
2477 	int			 sc_tdomain;
2478 	uint32_t		 sc_lkey;
2479 	int			 sc_tis;
2480 	int			 sc_tir[__arraycount(mcx_rss_config)];
2481 	int			 sc_rqt;
2482 
2483 	struct mcx_dmamem	 sc_doorbell_mem;
2484 
2485 	struct mcx_eq		 sc_admin_eq;
2486 	struct mcx_eq		 sc_queue_eq;
2487 
2488 	int			 sc_hardmtu;
2489 	int			 sc_rxbufsz;
2490 
2491 	int			 sc_bf_size;
2492 	int			 sc_max_rqt_size;
2493 
2494 	struct workqueue	*sc_workq;
2495 	struct work		 sc_port_change;
2496 
2497 	int			 sc_mac_flow_table_id;
2498 	int			 sc_rss_flow_table_id;
2499 	struct mcx_flow_group	 sc_flow_group[MCX_NUM_FLOW_GROUPS];
2500 	int			 sc_promisc_flow_enabled;
2501 	int			 sc_allmulti_flow_enabled;
2502 	int			 sc_mcast_flow_base;
2503 	int			 sc_extra_mcast;
2504 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
2505 
2506 	struct mcx_calibration	 sc_calibration[2];
2507 	unsigned int		 sc_calibration_gen;
2508 	callout_t		 sc_calibrate;
2509 	uint32_t		 sc_mhz;
2510 	uint32_t		 sc_khz;
2511 
2512 	struct mcx_queues	*sc_queues;
2513 	unsigned int		 sc_nqueues;
2514 
2515 	int			 sc_mcam_reg;
2516 
2517 #if NKSTAT > 0
2518 	struct kstat		*sc_kstat_ieee8023;
2519 	struct kstat		*sc_kstat_rfc2863;
2520 	struct kstat		*sc_kstat_rfc2819;
2521 	struct kstat		*sc_kstat_rfc3635;
2522 	unsigned int		 sc_kstat_mtmp_count;
2523 	struct kstat		**sc_kstat_mtmp;
2524 #endif
2525 
2526 	struct timecounter	 sc_timecounter;
2527 };
2528 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2529 
2530 static int	mcx_match(device_t, cfdata_t, void *);
2531 static void	mcx_attach(device_t, device_t, void *);
2532 
2533 static void *	mcx_establish_intr(struct mcx_softc *, int, kcpuset_t *,
2534 		    int (*)(void *), void *, const char *);
2535 
2536 static void	mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2537 static u_int	mcx_rxr_get(struct mcx_rxring *, u_int);
2538 static void	mcx_rxr_put(struct mcx_rxring *, u_int);
2539 static u_int	mcx_rxr_inuse(struct mcx_rxring *);
2540 
2541 #if NKSTAT > 0
2542 static void	mcx_kstat_attach(struct mcx_softc *);
2543 #endif
2544 
2545 static void	mcx_timecounter_attach(struct mcx_softc *);
2546 
2547 static int	mcx_version(struct mcx_softc *);
2548 static int	mcx_init_wait(struct mcx_softc *);
2549 static int	mcx_enable_hca(struct mcx_softc *);
2550 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
2551 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2552 		    int);
2553 static int	mcx_issi(struct mcx_softc *);
2554 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2555 static int	mcx_hca_max_caps(struct mcx_softc *);
2556 static int	mcx_hca_set_caps(struct mcx_softc *);
2557 static int	mcx_init_hca(struct mcx_softc *);
2558 static int	mcx_set_driver_version(struct mcx_softc *);
2559 static int	mcx_iff(struct mcx_softc *);
2560 static int	mcx_alloc_uar(struct mcx_softc *, int *);
2561 static int	mcx_alloc_pd(struct mcx_softc *);
2562 static int	mcx_alloc_tdomain(struct mcx_softc *);
2563 static int	mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
2564 		    uint64_t, int);
2565 static int	mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2566 static int	mcx_query_special_contexts(struct mcx_softc *);
2567 static int	mcx_set_port_mtu(struct mcx_softc *, int);
2568 static int	mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
2569 		    int);
2570 static int	mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
2571 static int	mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
2572 		    int);
2573 static int	mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
2574 static int	mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
2575 static int	mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
2576 static int	mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
2577 static int	mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
2578 static int	mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
2579 		    int *);
2580 static int	mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
2581 		    int *);
2582 static int	mcx_destroy_tir(struct mcx_softc *, int);
2583 static int	mcx_create_tis(struct mcx_softc *, int *);
2584 static int	mcx_destroy_tis(struct mcx_softc *, int);
2585 static int	mcx_create_rqt(struct mcx_softc *, int, int *, int *);
2586 static int	mcx_destroy_rqt(struct mcx_softc *, int);
2587 static int	mcx_create_flow_table(struct mcx_softc *, int, int, int *);
2588 static int	mcx_set_flow_table_root(struct mcx_softc *, int);
2589 static int	mcx_destroy_flow_table(struct mcx_softc *, int);
2590 static int	mcx_create_flow_group(struct mcx_softc *, int, int, int,
2591 		    int, int, struct mcx_flow_match *);
2592 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
2593 static int	mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
2594 		    const uint8_t *, uint32_t);
2595 static int	mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
2596 		    int, int, uint32_t);
2597 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2598 
2599 #if NKSTAT > 0
2600 static int	mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
2601 static int	mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
2602 static int	mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
2603 static int	mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
2604 #endif
2605 
2606 #if 0
2607 static int	mcx_dump_flow_table(struct mcx_softc *, int);
2608 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
2609 static int	mcx_dump_flow_group(struct mcx_softc *, int);
2610 #endif
2611 
2612 
2613 /*
2614 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2615 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2616 */
2617 static void	mcx_refill(void *);
2618 static int	mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
2619 		    struct mcx_cq_entry *, struct mcx_mbufq *,
2620 		    const struct mcx_calibration *);
2621 static int	mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
2622 		    struct mcx_cq_entry *);
2623 static void	mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
2624 		    struct mcx_cq *);
2625 
2626 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
2627 static void	mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
2628 static int	mcx_admin_intr(void *);
2629 static int	mcx_cq_intr(void *);
2630 
2631 static int	mcx_init(struct ifnet *);
2632 static void	mcx_stop(struct ifnet *, int);
2633 static int	mcx_ioctl(struct ifnet *, u_long, void *);
2634 static void	mcx_start(struct ifnet *);
2635 static int	mcx_transmit(struct ifnet *, struct mbuf *);
2636 static void	mcx_deferred_transmit(void *);
2637 static void	mcx_media_add_types(struct mcx_softc *);
2638 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
2639 static int	mcx_media_change(struct ifnet *);
2640 #if 0
2641 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2642 #endif
2643 static void	mcx_port_change(struct work *, void *);
2644 
2645 static void	mcx_calibrate_first(struct mcx_softc *);
2646 static void	mcx_calibrate(void *);
2647 
2648 static inline uint32_t
2649 		mcx_rd(struct mcx_softc *, bus_size_t);
2650 static inline void
2651 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2652 static inline void
2653 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2654 
2655 static uint64_t	mcx_timer(struct mcx_softc *);
2656 
2657 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2658 		    bus_size_t, u_int align);
2659 static void	mcx_dmamem_zero(struct mcx_dmamem *);
2660 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2661 
2662 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2663 		    unsigned int);
2664 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2665 
2666 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2667 
2668 static const struct {
2669 	pci_vendor_id_t		vendor;
2670 	pci_product_id_t	product;
2671 } mcx_devices[] = {
2672 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
2673 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700VF },
2674 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
2675 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710VF },
2676 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
2677 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800VF },
2678 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
2679 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800VF },
2680 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908 },
2681 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908VF },
2682 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2892 },
2683 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2894 },
2684 };
2685 
2686 struct mcx_eth_proto_capability {
2687 	uint64_t	cap_media;
2688 	uint64_t	cap_baudrate;
2689 };
2690 
2691 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2692 	[MCX_ETHER_CAP_SGMII]		= { IFM_1000_SGMII,	IF_Gbps(1) },
2693 	[MCX_ETHER_CAP_1000_KX]		= { IFM_1000_KX,	IF_Gbps(1) },
2694 	[MCX_ETHER_CAP_10G_CX4]		= { IFM_10G_CX4,	IF_Gbps(10) },
2695 	[MCX_ETHER_CAP_10G_KX4]		= { IFM_10G_KX4,	IF_Gbps(10) },
2696 	[MCX_ETHER_CAP_10G_KR]		= { IFM_10G_KR,		IF_Gbps(10) },
2697 	[MCX_ETHER_CAP_20G_KR2]		= { IFM_20G_KR2,	IF_Gbps(20) },
2698 	[MCX_ETHER_CAP_40G_CR4]		= { IFM_40G_CR4,	IF_Gbps(40) },
2699 	[MCX_ETHER_CAP_40G_KR4]		= { IFM_40G_KR4,	IF_Gbps(40) },
2700 	[MCX_ETHER_CAP_56G_R4]		= { IFM_56G_R4,		IF_Gbps(56) },
2701 	[MCX_ETHER_CAP_10G_CR]		= { IFM_10G_CR1,	IF_Gbps(10) },
2702 	[MCX_ETHER_CAP_10G_SR]		= { IFM_10G_SR,		IF_Gbps(10) },
2703 	[MCX_ETHER_CAP_10G_LR]		= { IFM_10G_LR,		IF_Gbps(10) },
2704 	[MCX_ETHER_CAP_40G_SR4]		= { IFM_40G_SR4,	IF_Gbps(40) },
2705 	[MCX_ETHER_CAP_40G_LR4]		= { IFM_40G_LR4,	IF_Gbps(40) },
2706 	[MCX_ETHER_CAP_50G_SR2]		= { IFM_50G_SR2,	IF_Gbps(50) },
2707 	[MCX_ETHER_CAP_100G_CR4]	= { IFM_100G_CR4,	IF_Gbps(100) },
2708 	[MCX_ETHER_CAP_100G_SR4]	= { IFM_100G_SR4,	IF_Gbps(100) },
2709 	[MCX_ETHER_CAP_100G_KR4]	= { IFM_100G_KR4,	IF_Gbps(100) },
2710 	[MCX_ETHER_CAP_100G_LR4]	= { IFM_100G_LR4,	IF_Gbps(100) },
2711 	[MCX_ETHER_CAP_100_TX]		= { IFM_100_TX,		IF_Mbps(100) },
2712 	[MCX_ETHER_CAP_1000_T]		= { IFM_1000_T,		IF_Gbps(1) },
2713 	[MCX_ETHER_CAP_10G_T]		= { IFM_10G_T,		IF_Gbps(10) },
2714 	[MCX_ETHER_CAP_25G_CR]		= { IFM_25G_CR,		IF_Gbps(25) },
2715 	[MCX_ETHER_CAP_25G_KR]		= { IFM_25G_KR,		IF_Gbps(25) },
2716 	[MCX_ETHER_CAP_25G_SR]		= { IFM_25G_SR,		IF_Gbps(25) },
2717 	[MCX_ETHER_CAP_50G_CR2]		= { IFM_50G_CR2,	IF_Gbps(50) },
2718 	[MCX_ETHER_CAP_50G_KR2]		= { IFM_50G_KR2,	IF_Gbps(50) },
2719 };
2720 
2721 static int
mcx_get_id(uint32_t val)2722 mcx_get_id(uint32_t val)
2723 {
2724 	return be32toh(val) & 0x00ffffff;
2725 }
2726 
2727 static int
mcx_match(device_t parent,cfdata_t cf,void * aux)2728 mcx_match(device_t parent, cfdata_t cf, void *aux)
2729 {
2730 	struct pci_attach_args *pa = aux;
2731 	int n;
2732 
2733 	for (n = 0; n < __arraycount(mcx_devices); n++) {
2734 		if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2735 		    PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2736 			return 1;
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 void
mcx_attach(device_t parent,device_t self,void * aux)2743 mcx_attach(device_t parent, device_t self, void *aux)
2744 {
2745 	struct mcx_softc *sc = device_private(self);
2746 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2747 	struct pci_attach_args *pa = aux;
2748 	struct ifcapreq ifcr;
2749 	uint8_t enaddr[ETHER_ADDR_LEN];
2750 	int counts[PCI_INTR_TYPE_SIZE];
2751 	char intrxname[32];
2752 	pcireg_t memtype;
2753 	uint32_t r;
2754 	unsigned int cq_stride;
2755 	unsigned int cq_size;
2756 	int i, msix;
2757 	kcpuset_t *affinity;
2758 
2759 	sc->sc_dev = self;
2760 	sc->sc_pc = pa->pa_pc;
2761 	sc->sc_tag = pa->pa_tag;
2762 	if (pci_dma64_available(pa))
2763 		sc->sc_dmat = pa->pa_dmat64;
2764 	else
2765 		sc->sc_dmat = pa->pa_dmat;
2766 
2767 	/* Map the PCI memory space */
2768 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2769 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2770 #ifdef __NetBSD__
2771 	    0,
2772 #else
2773 	    BUS_SPACE_MAP_PREFETCHABLE,
2774 #endif
2775 	    &sc->sc_memt, &sc->sc_memh,
2776 	    NULL, &sc->sc_mems)) {
2777 		aprint_error(": unable to map register memory\n");
2778 		return;
2779 	}
2780 
2781 	pci_aprint_devinfo(pa, "Ethernet controller");
2782 
2783 	mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2784 
2785 	if (mcx_version(sc) != 0) {
2786 		/* error printed by mcx_version */
2787 		goto unmap;
2788 	}
2789 
2790 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2791 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2792 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2793 	if (cq_size > MCX_MAX_CQE) {
2794 		aprint_error_dev(self,
2795 		    "command queue size overflow %u\n", cq_size);
2796 		goto unmap;
2797 	}
2798 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2799 		aprint_error_dev(self,
2800 		    "command queue entry size underflow %u\n", cq_stride);
2801 		goto unmap;
2802 	}
2803 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2804 		aprint_error_dev(self, "command queue page overflow\n");
2805 		goto unmap;
2806 	}
2807 
2808 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
2809 	    MCX_PAGE_SIZE) != 0) {
2810 		aprint_error_dev(self, "unable to allocate doorbell memory\n");
2811 		goto unmap;
2812 	}
2813 
2814 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2815 	    MCX_PAGE_SIZE) != 0) {
2816 		aprint_error_dev(self, "unable to allocate command queue\n");
2817 		goto dbfree;
2818 	}
2819 
2820 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2821 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
2822 	    BUS_SPACE_BARRIER_WRITE);
2823 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2824 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
2825 	    BUS_SPACE_BARRIER_WRITE);
2826 
2827 	if (mcx_init_wait(sc) != 0) {
2828 		aprint_error_dev(self, "timeout waiting for init\n");
2829 		goto cqfree;
2830 	}
2831 
2832 	sc->sc_cmdq_mask = cq_size - 1;
2833 	sc->sc_cmdq_size = cq_stride;
2834 
2835 	if (mcx_enable_hca(sc) != 0) {
2836 		/* error printed by mcx_enable_hca */
2837 		goto cqfree;
2838 	}
2839 
2840 	if (mcx_issi(sc) != 0) {
2841 		/* error printed by mcx_issi */
2842 		goto teardown;
2843 	}
2844 
2845 	if (mcx_pages(sc, &sc->sc_boot_pages,
2846 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2847 		/* error printed by mcx_pages */
2848 		goto teardown;
2849 	}
2850 
2851 	if (mcx_hca_max_caps(sc) != 0) {
2852 		/* error printed by mcx_hca_max_caps */
2853 		goto teardown;
2854 	}
2855 
2856 	if (mcx_hca_set_caps(sc) != 0) {
2857 		/* error printed by mcx_hca_set_caps */
2858 		goto teardown;
2859 	}
2860 
2861 	if (mcx_pages(sc, &sc->sc_init_pages,
2862 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2863 		/* error printed by mcx_pages */
2864 		goto teardown;
2865 	}
2866 
2867 	if (mcx_init_hca(sc) != 0) {
2868 		/* error printed by mcx_init_hca */
2869 		goto teardown;
2870 	}
2871 
2872 	if (mcx_pages(sc, &sc->sc_regular_pages,
2873 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2874 		/* error printed by mcx_pages */
2875 		goto teardown;
2876 	}
2877 
2878 	/* apparently not necessary? */
2879 	if (mcx_set_driver_version(sc) != 0) {
2880 		/* error printed by mcx_set_driver_version */
2881 		goto teardown;
2882 	}
2883 
2884 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
2885 		/* error printed by mcx_iff? */
2886 		goto teardown;
2887 	}
2888 
2889 	if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
2890 		/* error printed by mcx_alloc_uar */
2891 		goto teardown;
2892 	}
2893 
2894 	if (mcx_alloc_pd(sc) != 0) {
2895 		/* error printed by mcx_alloc_pd */
2896 		goto teardown;
2897 	}
2898 
2899 	if (mcx_alloc_tdomain(sc) != 0) {
2900 		/* error printed by mcx_alloc_tdomain */
2901 		goto teardown;
2902 	}
2903 
2904 	/*
2905 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2906 	 * mellanox support tells me legacy interrupts are not supported,
2907 	 * so we're stuck with just msi-x.
2908 	 */
2909 	counts[PCI_INTR_TYPE_MSIX] = -1;
2910 	counts[PCI_INTR_TYPE_MSI] = 0;
2911 	counts[PCI_INTR_TYPE_INTX] = 0;
2912 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2913 		aprint_error_dev(self, "unable to allocate interrupt\n");
2914 		goto teardown;
2915 	}
2916 	if (counts[PCI_INTR_TYPE_MSIX] < 2) {
2917 		aprint_error_dev(self, "not enough MSI-X vectors\n");
2918 		goto teardown;
2919 	}
2920 	KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2921 	snprintf(intrxname, sizeof(intrxname), "%s adminq", DEVNAME(sc));
2922 	sc->sc_ihc = mcx_establish_intr(sc, 0, NULL, mcx_admin_intr, sc,
2923 	    intrxname);
2924 	if (sc->sc_ihc == NULL) {
2925 		aprint_error_dev(self, "couldn't establish adminq interrupt\n");
2926 		goto teardown;
2927 	}
2928 
2929 	if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
2930 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
2931 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
2932 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
2933 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
2934 		/* error printed by mcx_create_eq */
2935 		goto teardown;
2936 	}
2937 
2938 	if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2939 		/* error printed by mcx_query_nic_vport_context */
2940 		goto teardown;
2941 	}
2942 
2943 	if (mcx_query_special_contexts(sc) != 0) {
2944 		/* error printed by mcx_query_special_contexts */
2945 		goto teardown;
2946 	}
2947 
2948 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2949 		/* error printed by mcx_set_port_mtu */
2950 		goto teardown;
2951 	}
2952 
2953 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2954 	    ether_sprintf(enaddr));
2955 
2956 	msix = counts[PCI_INTR_TYPE_MSIX];
2957 	msix--; /* admin ops took one */
2958 
2959 	sc->sc_nqueues = uimin(MCX_MAX_QUEUES, msix);
2960 	sc->sc_nqueues = uimin(sc->sc_nqueues, ncpu);
2961 	/* Round down to a power of two.  */
2962 	sc->sc_nqueues = 1U << ilog2(sc->sc_nqueues);
2963 	sc->sc_queues = kmem_zalloc(sc->sc_nqueues * sizeof(*sc->sc_queues),
2964 	    KM_SLEEP);
2965 
2966 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2967 	ifp->if_softc = sc;
2968 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2969 #ifdef MCX_MPSAFE
2970 	ifp->if_extflags = IFEF_MPSAFE;
2971 #endif
2972 	ifp->if_init = mcx_init;
2973 	ifp->if_stop = mcx_stop;
2974 	ifp->if_ioctl = mcx_ioctl;
2975 	ifp->if_start = mcx_start;
2976 	if (sc->sc_nqueues > 1) {
2977 		ifp->if_transmit = mcx_transmit;
2978 	}
2979 	ifp->if_mtu = sc->sc_hardmtu;
2980 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
2981 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx |
2982 	    IFCAP_CSUM_UDPv6_Rx | IFCAP_CSUM_UDPv6_Tx |
2983 	    IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
2984 	    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_TCPv6_Tx;
2985 	IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2986 	IFQ_SET_READY(&ifp->if_snd);
2987 
2988 	sc->sc_ec.ec_capabilities = ETHERCAP_JUMBO_MTU |
2989 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2990 	sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2991 
2992 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
2993 	ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2994 	    mcx_media_status, &sc->sc_media_mutex);
2995 	mcx_media_add_types(sc);
2996 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2997 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2998 
2999 	if_attach(ifp);
3000 
3001 	/* Enable hardware offload by default */
3002 	memset(&ifcr, 0, sizeof(ifcr));
3003 	ifcr.ifcr_capenable = ifp->if_capabilities;
3004 	ifioctl_common(ifp, SIOCSIFCAP, &ifcr);
3005 
3006 	if_deferred_start_init(ifp, NULL);
3007 
3008 	ether_ifattach(ifp, enaddr);
3009 
3010 	kcpuset_create(&affinity, false);
3011 	kcpuset_set(affinity, 0);
3012 
3013 	for (i = 0; i < sc->sc_nqueues; i++) {
3014 		struct mcx_queues *q = &sc->sc_queues[i];
3015 		struct mcx_rx *rx = &q->q_rx;
3016 		struct mcx_tx *tx = &q->q_tx;
3017 		int vec;
3018 
3019 		vec = i + 1;
3020 		q->q_sc = sc;
3021 		q->q_index = i;
3022 
3023 		if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
3024 			aprint_error_dev(self, "unable to alloc uar %d\n", i);
3025 			goto teardown;
3026 		}
3027 
3028 		if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
3029 			aprint_error_dev(self,
3030 			    "unable to create event queue %d\n", i);
3031 			goto teardown;
3032 		}
3033 
3034 		rx->rx_softc = sc;
3035 		callout_init(&rx->rx_refill, CALLOUT_FLAGS);
3036 		callout_setfunc(&rx->rx_refill, mcx_refill, rx);
3037 
3038 		tx->tx_softc = sc;
3039 		mutex_init(&tx->tx_lock, MUTEX_DEFAULT, IPL_NET);
3040 		tx->tx_pcq = pcq_create(MCX_TXQ_NUM, KM_SLEEP);
3041 		tx->tx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
3042 		    mcx_deferred_transmit, tx);
3043 
3044 		snprintf(intrxname, sizeof(intrxname), "%s queue %d",
3045 		    DEVNAME(sc), i);
3046 		q->q_ihc = mcx_establish_intr(sc, vec, affinity, mcx_cq_intr,
3047 		    q, intrxname);
3048 	}
3049 
3050 	callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
3051 	callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
3052 
3053 	if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
3054 	    PRI_NONE, IPL_NET, 0) != 0) {
3055 		aprint_error_dev(self, "couldn't create port change workq\n");
3056 		goto teardown;
3057 	}
3058 
3059 	mcx_port_change(&sc->sc_port_change, sc);
3060 
3061 	sc->sc_mac_flow_table_id = -1;
3062 	sc->sc_rss_flow_table_id = -1;
3063 	sc->sc_rqt = -1;
3064 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
3065 		struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
3066 		mfg->g_id = -1;
3067 		mfg->g_table = -1;
3068 		mfg->g_size = 0;
3069 		mfg->g_start = 0;
3070 	}
3071 	sc->sc_extra_mcast = 0;
3072 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
3073 
3074 #if NKSTAT > 0
3075 	mcx_kstat_attach(sc);
3076 #endif
3077 	mcx_timecounter_attach(sc);
3078 	return;
3079 
3080 teardown:
3081 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
3082 	/* error printed by mcx_teardown_hca, and we're already unwinding */
3083 cqfree:
3084 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
3085 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3086 	    BUS_SPACE_BARRIER_WRITE);
3087 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
3088 	    MCX_CMDQ_INTERFACE_DISABLED);
3089 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
3090 	    BUS_SPACE_BARRIER_WRITE);
3091 
3092 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
3093 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3094 	    BUS_SPACE_BARRIER_WRITE);
3095 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
3096 
3097 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
3098 dbfree:
3099 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
3100 unmap:
3101 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
3102 	sc->sc_mems = 0;
3103 }
3104 
3105 static void *
mcx_establish_intr(struct mcx_softc * sc,int index,kcpuset_t * affinity,int (* func)(void *),void * arg,const char * xname)3106 mcx_establish_intr(struct mcx_softc *sc, int index, kcpuset_t *affinity,
3107     int (*func)(void *), void *arg, const char *xname)
3108 {
3109 	char intrbuf[PCI_INTRSTR_LEN];
3110 	const char *intrstr;
3111 	void *ih;
3112 
3113 	pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[index], PCI_INTR_MPSAFE,
3114 	    true);
3115 
3116 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[index], intrbuf,
3117 	    sizeof(intrbuf));
3118 	ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[index], IPL_NET,
3119 	    func, arg, xname);
3120 	if (ih == NULL) {
3121 		aprint_error_dev(sc->sc_dev,
3122 		    "unable to establish interrupt%s%s\n",
3123 		    intrstr ? " at " : "",
3124 		    intrstr ? intrstr : "");
3125 		return NULL;
3126 	}
3127 
3128 	if (affinity != NULL && index > 0) {
3129 		/* Round-robin affinity */
3130 		kcpuset_zero(affinity);
3131 		kcpuset_set(affinity, (index - 1) % ncpu);
3132 		interrupt_distribute(ih, affinity, NULL);
3133 	}
3134 
3135 	return ih;
3136 }
3137 
3138 static void
mcx_rxr_init(struct mcx_rxring * rxr,u_int lwm __unused,u_int hwm)3139 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
3140 {
3141 	rxr->rxr_total = hwm;
3142 	rxr->rxr_inuse = 0;
3143 }
3144 
3145 static u_int
mcx_rxr_get(struct mcx_rxring * rxr,u_int max)3146 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
3147 {
3148 	const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
3149 
3150 	rxr->rxr_inuse += taken;
3151 
3152 	return taken;
3153 }
3154 
3155 static void
mcx_rxr_put(struct mcx_rxring * rxr,u_int n)3156 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
3157 {
3158 	rxr->rxr_inuse -= n;
3159 }
3160 
3161 static u_int
mcx_rxr_inuse(struct mcx_rxring * rxr)3162 mcx_rxr_inuse(struct mcx_rxring *rxr)
3163 {
3164 	return rxr->rxr_inuse;
3165 }
3166 
3167 static int
mcx_version(struct mcx_softc * sc)3168 mcx_version(struct mcx_softc *sc)
3169 {
3170 	uint32_t fw0, fw1;
3171 	uint16_t cmdif;
3172 
3173 	fw0 = mcx_rd(sc, MCX_FW_VER);
3174 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
3175 
3176 	aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
3177 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
3178 
3179 	cmdif = MCX_CMDIF(fw1);
3180 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
3181 		aprint_error_dev(sc->sc_dev,
3182 		    "unsupported command interface %u\n", cmdif);
3183 		return (-1);
3184 	}
3185 
3186 	return (0);
3187 }
3188 
3189 static int
mcx_init_wait(struct mcx_softc * sc)3190 mcx_init_wait(struct mcx_softc *sc)
3191 {
3192 	unsigned int i;
3193 	uint32_t r;
3194 
3195 	for (i = 0; i < 2000; i++) {
3196 		r = mcx_rd(sc, MCX_STATE);
3197 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
3198 			return (0);
3199 
3200 		delay(1000);
3201 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
3202 		    BUS_SPACE_BARRIER_READ);
3203 	}
3204 
3205 	return (-1);
3206 }
3207 
3208 static uint8_t
mcx_cmdq_poll(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int msec)3209 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3210     unsigned int msec)
3211 {
3212 	unsigned int i;
3213 
3214 	for (i = 0; i < msec; i++) {
3215 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3216 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3217 
3218 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3219 		    MCX_CQ_STATUS_OWN_SW)
3220 			return (0);
3221 
3222 		delay(1000);
3223 	}
3224 
3225 	return (ETIMEDOUT);
3226 }
3227 
3228 static uint32_t
mcx_mix_u64(uint32_t xor,uint64_t u64)3229 mcx_mix_u64(uint32_t xor, uint64_t u64)
3230 {
3231 	xor ^= u64 >> 32;
3232 	xor ^= u64;
3233 
3234 	return (xor);
3235 }
3236 
3237 static uint32_t
mcx_mix_u32(uint32_t xor,uint32_t u32)3238 mcx_mix_u32(uint32_t xor, uint32_t u32)
3239 {
3240 	xor ^= u32;
3241 
3242 	return (xor);
3243 }
3244 
3245 static uint32_t
mcx_mix_u8(uint32_t xor,uint8_t u8)3246 mcx_mix_u8(uint32_t xor, uint8_t u8)
3247 {
3248 	xor ^= u8;
3249 
3250 	return (xor);
3251 }
3252 
3253 static uint8_t
mcx_mix_done(uint32_t xor)3254 mcx_mix_done(uint32_t xor)
3255 {
3256 	xor ^= xor >> 16;
3257 	xor ^= xor >> 8;
3258 
3259 	return (xor);
3260 }
3261 
3262 static uint8_t
mcx_xor(const void * buf,size_t len)3263 mcx_xor(const void *buf, size_t len)
3264 {
3265 	const uint32_t *dwords = buf;
3266 	uint32_t xor = 0xff;
3267 	size_t i;
3268 
3269 	len /= sizeof(*dwords);
3270 
3271 	for (i = 0; i < len; i++)
3272 		xor ^= dwords[i];
3273 
3274 	return (mcx_mix_done(xor));
3275 }
3276 
3277 static uint8_t
mcx_cmdq_token(struct mcx_softc * sc)3278 mcx_cmdq_token(struct mcx_softc *sc)
3279 {
3280 	uint8_t token;
3281 
3282 	do {
3283 		token = ++sc->sc_cmdq_token;
3284 	} while (token == 0);
3285 
3286 	return (token);
3287 }
3288 
3289 static void
mcx_cmdq_init(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,uint32_t ilen,uint32_t olen,uint8_t token)3290 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3291     uint32_t ilen, uint32_t olen, uint8_t token)
3292 {
3293 	memset(cqe, 0, sc->sc_cmdq_size);
3294 
3295 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
3296 	be32enc(&cqe->cq_input_length, ilen);
3297 	be32enc(&cqe->cq_output_length, olen);
3298 	cqe->cq_token = token;
3299 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
3300 }
3301 
3302 static void
mcx_cmdq_sign(struct mcx_cmdq_entry * cqe)3303 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
3304 {
3305 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
3306 }
3307 
3308 static int
mcx_cmdq_verify(const struct mcx_cmdq_entry * cqe)3309 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
3310 {
3311 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
3312 	return (0);
3313 }
3314 
3315 static void *
mcx_cmdq_in(struct mcx_cmdq_entry * cqe)3316 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
3317 {
3318 	return (&cqe->cq_input_data);
3319 }
3320 
3321 static void *
mcx_cmdq_out(struct mcx_cmdq_entry * cqe)3322 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
3323 {
3324 	return (&cqe->cq_output_data);
3325 }
3326 
3327 static void
mcx_cmdq_post(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int slot)3328 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3329     unsigned int slot)
3330 {
3331 	mcx_cmdq_sign(cqe);
3332 
3333 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3334 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
3335 
3336 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
3337 	mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
3338 	    BUS_SPACE_BARRIER_WRITE);
3339 }
3340 
3341 static int
mcx_enable_hca(struct mcx_softc * sc)3342 mcx_enable_hca(struct mcx_softc *sc)
3343 {
3344 	struct mcx_cmdq_entry *cqe;
3345 	struct mcx_cmd_enable_hca_in *in;
3346 	struct mcx_cmd_enable_hca_out *out;
3347 	int error;
3348 	uint8_t status;
3349 
3350 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3351 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3352 
3353 	in = mcx_cmdq_in(cqe);
3354 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
3355 	in->cmd_op_mod = htobe16(0);
3356 	in->cmd_function_id = htobe16(0);
3357 
3358 	mcx_cmdq_post(sc, cqe, 0);
3359 
3360 	error = mcx_cmdq_poll(sc, cqe, 1000);
3361 	if (error != 0) {
3362 		printf(", hca enable timeout\n");
3363 		return (-1);
3364 	}
3365 	if (mcx_cmdq_verify(cqe) != 0) {
3366 		printf(", hca enable command corrupt\n");
3367 		return (-1);
3368 	}
3369 
3370 	status = cqe->cq_output_data[0];
3371 	if (status != MCX_CQ_STATUS_OK) {
3372 		printf(", hca enable failed (%x)\n", status);
3373 		return (-1);
3374 	}
3375 
3376 	return (0);
3377 }
3378 
3379 static int
mcx_teardown_hca(struct mcx_softc * sc,uint16_t profile)3380 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
3381 {
3382 	struct mcx_cmdq_entry *cqe;
3383 	struct mcx_cmd_teardown_hca_in *in;
3384 	struct mcx_cmd_teardown_hca_out *out;
3385 	int error;
3386 	uint8_t status;
3387 
3388 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3389 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3390 
3391 	in = mcx_cmdq_in(cqe);
3392 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
3393 	in->cmd_op_mod = htobe16(0);
3394 	in->cmd_profile = profile;
3395 
3396 	mcx_cmdq_post(sc, cqe, 0);
3397 
3398 	error = mcx_cmdq_poll(sc, cqe, 1000);
3399 	if (error != 0) {
3400 		printf(", hca teardown timeout\n");
3401 		return (-1);
3402 	}
3403 	if (mcx_cmdq_verify(cqe) != 0) {
3404 		printf(", hca teardown command corrupt\n");
3405 		return (-1);
3406 	}
3407 
3408 	status = cqe->cq_output_data[0];
3409 	if (status != MCX_CQ_STATUS_OK) {
3410 		printf(", hca teardown failed (%x)\n", status);
3411 		return (-1);
3412 	}
3413 
3414 	return (0);
3415 }
3416 
3417 static int
mcx_cmdq_mboxes_alloc(struct mcx_softc * sc,struct mcx_dmamem * mxm,unsigned int nmb,uint64_t * ptr,uint8_t token)3418 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
3419     unsigned int nmb, uint64_t *ptr, uint8_t token)
3420 {
3421 	uint8_t *kva;
3422 	uint64_t dva;
3423 	int i;
3424 	int error;
3425 
3426 	error = mcx_dmamem_alloc(sc, mxm,
3427 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
3428 	if (error != 0)
3429 		return (error);
3430 
3431 	mcx_dmamem_zero(mxm);
3432 
3433 	dva = MCX_DMA_DVA(mxm);
3434 	kva = MCX_DMA_KVA(mxm);
3435 	for (i = 0; i < nmb; i++) {
3436 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
3437 
3438 		/* patch the cqe or mbox pointing at this one */
3439 		be64enc(ptr, dva);
3440 
3441 		/* fill in this mbox */
3442 		be32enc(&mbox->mb_block_number, i);
3443 		mbox->mb_token = token;
3444 
3445 		/* move to the next one */
3446 		ptr = &mbox->mb_next_ptr;
3447 
3448 		dva += MCX_CMDQ_MAILBOX_SIZE;
3449 		kva += MCX_CMDQ_MAILBOX_SIZE;
3450 	}
3451 
3452 	return (0);
3453 }
3454 
3455 static uint32_t
mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox * mb)3456 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
3457 {
3458 	uint32_t xor = 0xff;
3459 
3460 	/* only 3 fields get set, so mix them directly */
3461 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
3462 	xor = mcx_mix_u32(xor, mb->mb_block_number);
3463 	xor = mcx_mix_u8(xor, mb->mb_token);
3464 
3465 	return (mcx_mix_done(xor));
3466 }
3467 
3468 static void
mcx_cmdq_mboxes_sign(struct mcx_dmamem * mxm,unsigned int nmb)3469 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
3470 {
3471 	uint8_t *kva;
3472 	int i;
3473 
3474 	kva = MCX_DMA_KVA(mxm);
3475 
3476 	for (i = 0; i < nmb; i++) {
3477 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
3478 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
3479 		mb->mb_ctrl_signature = sig;
3480 		mb->mb_signature = sig ^
3481 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
3482 
3483 		kva += MCX_CMDQ_MAILBOX_SIZE;
3484 	}
3485 }
3486 
3487 static void
mcx_cmdq_mboxes_sync(struct mcx_softc * sc,struct mcx_dmamem * mxm,int ops)3488 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
3489 {
3490 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
3491 	    0, MCX_DMA_LEN(mxm), ops);
3492 }
3493 
3494 static struct mcx_cmdq_mailbox *
mcx_cq_mbox(struct mcx_dmamem * mxm,unsigned int i)3495 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
3496 {
3497 	uint8_t *kva;
3498 
3499 	kva = MCX_DMA_KVA(mxm);
3500 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
3501 
3502 	return ((struct mcx_cmdq_mailbox *)kva);
3503 }
3504 
3505 static inline void *
mcx_cq_mbox_data(struct mcx_cmdq_mailbox * mb)3506 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
3507 {
3508 	return (&mb->mb_data);
3509 }
3510 
3511 static void
mcx_cmdq_mboxes_copyin(struct mcx_dmamem * mxm,unsigned int nmb,void * b,size_t len)3512 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
3513     void *b, size_t len)
3514 {
3515 	uint8_t *buf = b;
3516 	struct mcx_cmdq_mailbox *mb;
3517 	int i;
3518 
3519 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3520 	for (i = 0; i < nmb; i++) {
3521 
3522 		memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
3523 
3524 		if (sizeof(mb->mb_data) >= len)
3525 			break;
3526 
3527 		buf += sizeof(mb->mb_data);
3528 		len -= sizeof(mb->mb_data);
3529 		mb++;
3530 	}
3531 }
3532 
3533 static void
mcx_cmdq_mboxes_pas(struct mcx_dmamem * mxm,int offset,int npages,struct mcx_dmamem * buf)3534 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
3535     struct mcx_dmamem *buf)
3536 {
3537 	uint64_t *pas;
3538 	int mbox, mbox_pages, i;
3539 
3540 	mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
3541 	offset %= MCX_CMDQ_MAILBOX_DATASIZE;
3542 
3543 	pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3544 	pas += (offset / sizeof(*pas));
3545 	mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
3546 	for (i = 0; i < npages; i++) {
3547 		if (i == mbox_pages) {
3548 			mbox++;
3549 			pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3550 			mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
3551 		}
3552 		*pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
3553 		pas++;
3554 	}
3555 }
3556 
3557 static void
mcx_cmdq_mboxes_copyout(struct mcx_dmamem * mxm,int nmb,void * b,size_t len)3558 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
3559 {
3560 	uint8_t *buf = b;
3561 	struct mcx_cmdq_mailbox *mb;
3562 	int i;
3563 
3564 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3565 	for (i = 0; i < nmb; i++) {
3566 		memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
3567 
3568 		if (sizeof(mb->mb_data) >= len)
3569 			break;
3570 
3571 		buf += sizeof(mb->mb_data);
3572 		len -= sizeof(mb->mb_data);
3573 		mb++;
3574 	}
3575 }
3576 
3577 static void
mcx_cq_mboxes_free(struct mcx_softc * sc,struct mcx_dmamem * mxm)3578 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
3579 {
3580 	mcx_dmamem_free(sc, mxm);
3581 }
3582 
3583 #if 0
3584 static void
3585 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
3586 {
3587 	unsigned int i;
3588 
3589 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
3590 	    be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
3591 
3592 	printf(", idata ");
3593 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
3594 		printf("%02x", cqe->cq_input_data[i]);
3595 
3596 	printf(", odata ");
3597 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
3598 		printf("%02x", cqe->cq_output_data[i]);
3599 
3600 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
3601 	    be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
3602 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
3603 }
3604 
3605 static void
3606 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
3607 {
3608 	int i, j;
3609 	uint8_t *d;
3610 
3611 	for (i = 0; i < num; i++) {
3612 		struct mcx_cmdq_mailbox *mbox;
3613 		mbox = mcx_cq_mbox(mboxes, i);
3614 
3615 		d = mcx_cq_mbox_data(mbox);
3616 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
3617 			if (j != 0 && (j % 16 == 0))
3618 				printf("\n");
3619 			printf("%.2x ", d[j]);
3620 		}
3621 	}
3622 }
3623 #endif
3624 
3625 static int
mcx_access_hca_reg(struct mcx_softc * sc,uint16_t reg,int op,void * data,int len)3626 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
3627     int len)
3628 {
3629 	struct mcx_dmamem mxm;
3630 	struct mcx_cmdq_entry *cqe;
3631 	struct mcx_cmd_access_reg_in *in;
3632 	struct mcx_cmd_access_reg_out *out;
3633 	uint8_t token = mcx_cmdq_token(sc);
3634 	int error, nmb;
3635 
3636 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3637 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
3638 	    token);
3639 
3640 	in = mcx_cmdq_in(cqe);
3641 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
3642 	in->cmd_op_mod = htobe16(op);
3643 	in->cmd_register_id = htobe16(reg);
3644 
3645 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
3646 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3647 	    &cqe->cq_output_ptr, token) != 0) {
3648 		printf(", unable to allocate access reg mailboxen\n");
3649 		return (-1);
3650 	}
3651 	cqe->cq_input_ptr = cqe->cq_output_ptr;
3652 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3653 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3654 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3655 
3656 	mcx_cmdq_post(sc, cqe, 0);
3657 	error = mcx_cmdq_poll(sc, cqe, 1000);
3658 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3659 
3660 	if (error != 0) {
3661 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3662 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3663 		goto free;
3664 	}
3665 	error = mcx_cmdq_verify(cqe);
3666 	if (error != 0) {
3667 		printf("%s: access reg (%s %x) reply corrupt\n",
3668 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3669 		    reg);
3670 		goto free;
3671 	}
3672 
3673 	out = mcx_cmdq_out(cqe);
3674 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3675 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3676 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3677 		    reg, out->cmd_status, be32toh(out->cmd_syndrome));
3678 		error = -1;
3679 		goto free;
3680 	}
3681 
3682 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3683 free:
3684 	mcx_dmamem_free(sc, &mxm);
3685 
3686 	return (error);
3687 }
3688 
3689 static int
mcx_set_issi(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int slot)3690 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3691     unsigned int slot)
3692 {
3693 	struct mcx_cmd_set_issi_in *in;
3694 	struct mcx_cmd_set_issi_out *out;
3695 	uint8_t status;
3696 
3697 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3698 
3699 	in = mcx_cmdq_in(cqe);
3700 	in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3701 	in->cmd_op_mod = htobe16(0);
3702 	in->cmd_current_issi = htobe16(MCX_ISSI);
3703 
3704 	mcx_cmdq_post(sc, cqe, slot);
3705 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3706 		return (-1);
3707 	if (mcx_cmdq_verify(cqe) != 0)
3708 		return (-1);
3709 
3710 	status = cqe->cq_output_data[0];
3711 	if (status != MCX_CQ_STATUS_OK)
3712 		return (-1);
3713 
3714 	return (0);
3715 }
3716 
3717 static int
mcx_issi(struct mcx_softc * sc)3718 mcx_issi(struct mcx_softc *sc)
3719 {
3720 	struct mcx_dmamem mxm;
3721 	struct mcx_cmdq_entry *cqe;
3722 	struct mcx_cmd_query_issi_in *in;
3723 	struct mcx_cmd_query_issi_il_out *out;
3724 	struct mcx_cmd_query_issi_mb_out *mb;
3725 	uint8_t token = mcx_cmdq_token(sc);
3726 	uint8_t status;
3727 	int error;
3728 
3729 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3730 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3731 
3732 	in = mcx_cmdq_in(cqe);
3733 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3734 	in->cmd_op_mod = htobe16(0);
3735 
3736 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3737 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3738 	    &cqe->cq_output_ptr, token) != 0) {
3739 		printf(", unable to allocate query issi mailbox\n");
3740 		return (-1);
3741 	}
3742 	mcx_cmdq_mboxes_sign(&mxm, 1);
3743 
3744 	mcx_cmdq_post(sc, cqe, 0);
3745 	error = mcx_cmdq_poll(sc, cqe, 1000);
3746 	if (error != 0) {
3747 		printf(", query issi timeout\n");
3748 		goto free;
3749 	}
3750 	error = mcx_cmdq_verify(cqe);
3751 	if (error != 0) {
3752 		printf(", query issi reply corrupt\n");
3753 		goto free;
3754 	}
3755 
3756 	status = cqe->cq_output_data[0];
3757 	switch (status) {
3758 	case MCX_CQ_STATUS_OK:
3759 		break;
3760 	case MCX_CQ_STATUS_BAD_OPCODE:
3761 		/* use ISSI 0 */
3762 		goto free;
3763 	default:
3764 		printf(", query issi failed (%x)\n", status);
3765 		error = -1;
3766 		goto free;
3767 	}
3768 
3769 	out = mcx_cmdq_out(cqe);
3770 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3771 		/* use ISSI 1 */
3772 		goto free;
3773 	}
3774 
3775 	/* don't need to read cqe anymore, can be used for SET ISSI */
3776 
3777 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3778 	CTASSERT(MCX_ISSI < NBBY);
3779 	 /* XXX math is hard */
3780 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3781 		/* use ISSI 0 */
3782 		goto free;
3783 	}
3784 
3785 	if (mcx_set_issi(sc, cqe, 0) != 0) {
3786 		/* ignore the error, just use ISSI 0 */
3787 	} else {
3788 		/* use ISSI 1 */
3789 	}
3790 
3791 free:
3792 	mcx_cq_mboxes_free(sc, &mxm);
3793 	return (error);
3794 }
3795 
3796 static int
mcx_query_pages(struct mcx_softc * sc,uint16_t type,int32_t * npages,uint16_t * func_id)3797 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3798     int32_t *npages, uint16_t *func_id)
3799 {
3800 	struct mcx_cmdq_entry *cqe;
3801 	struct mcx_cmd_query_pages_in *in;
3802 	struct mcx_cmd_query_pages_out *out;
3803 
3804 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3805 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3806 
3807 	in = mcx_cmdq_in(cqe);
3808 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3809 	in->cmd_op_mod = type;
3810 
3811 	mcx_cmdq_post(sc, cqe, 0);
3812 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3813 		printf(", query pages timeout\n");
3814 		return (-1);
3815 	}
3816 	if (mcx_cmdq_verify(cqe) != 0) {
3817 		printf(", query pages reply corrupt\n");
3818 		return (-1);
3819 	}
3820 
3821 	out = mcx_cmdq_out(cqe);
3822 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3823 		printf(", query pages failed (%x)\n", out->cmd_status);
3824 		return (-1);
3825 	}
3826 
3827 	*func_id = out->cmd_func_id;
3828 	*npages = be32dec(&out->cmd_num_pages);
3829 
3830 	return (0);
3831 }
3832 
3833 struct bus_dma_iter {
3834 	bus_dmamap_t		i_map;
3835 	bus_size_t		i_offset;
3836 	unsigned int		i_index;
3837 };
3838 
3839 static void
bus_dma_iter_init(struct bus_dma_iter * i,bus_dmamap_t map)3840 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3841 {
3842 	i->i_map = map;
3843 	i->i_offset = 0;
3844 	i->i_index = 0;
3845 }
3846 
3847 static bus_addr_t
bus_dma_iter_addr(struct bus_dma_iter * i)3848 bus_dma_iter_addr(struct bus_dma_iter *i)
3849 {
3850 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3851 }
3852 
3853 static void
bus_dma_iter_add(struct bus_dma_iter * i,bus_size_t size)3854 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3855 {
3856 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3857 	bus_size_t diff;
3858 
3859 	do {
3860 		diff = seg->ds_len - i->i_offset;
3861 		if (size < diff)
3862 			break;
3863 
3864 		size -= diff;
3865 
3866 		seg++;
3867 
3868 		i->i_offset = 0;
3869 		i->i_index++;
3870 	} while (size > 0);
3871 
3872 	i->i_offset += size;
3873 }
3874 
3875 static int
mcx_add_pages(struct mcx_softc * sc,struct mcx_hwmem * mhm,uint16_t func_id)3876 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3877 {
3878 	struct mcx_dmamem mxm;
3879 	struct mcx_cmdq_entry *cqe;
3880 	struct mcx_cmd_manage_pages_in *in;
3881 	struct mcx_cmd_manage_pages_out *out;
3882 	unsigned int paslen, nmb, i, j, npages;
3883 	struct bus_dma_iter iter;
3884 	uint64_t *pas;
3885 	uint8_t status;
3886 	uint8_t token = mcx_cmdq_token(sc);
3887 	int error;
3888 
3889 	npages = mhm->mhm_npages;
3890 
3891 	paslen = sizeof(*pas) * npages;
3892 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3893 
3894 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3895 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3896 
3897 	in = mcx_cmdq_in(cqe);
3898 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3899 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3900 	in->cmd_func_id = func_id;
3901 	be32enc(&in->cmd_input_num_entries, npages);
3902 
3903 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3904 	    &cqe->cq_input_ptr, token) != 0) {
3905 		printf(", unable to allocate manage pages mailboxen\n");
3906 		return (-1);
3907 	}
3908 
3909 	bus_dma_iter_init(&iter, mhm->mhm_map);
3910 	for (i = 0; i < nmb; i++) {
3911 		unsigned int lim;
3912 
3913 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3914 		lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3915 
3916 		for (j = 0; j < lim; j++) {
3917 			be64enc(&pas[j], bus_dma_iter_addr(&iter));
3918 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3919 		}
3920 
3921 		npages -= lim;
3922 	}
3923 
3924 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3925 
3926 	mcx_cmdq_post(sc, cqe, 0);
3927 	error = mcx_cmdq_poll(sc, cqe, 1000);
3928 	if (error != 0) {
3929 		printf(", manage pages timeout\n");
3930 		goto free;
3931 	}
3932 	error = mcx_cmdq_verify(cqe);
3933 	if (error != 0) {
3934 		printf(", manage pages reply corrupt\n");
3935 		goto free;
3936 	}
3937 
3938 	status = cqe->cq_output_data[0];
3939 	if (status != MCX_CQ_STATUS_OK) {
3940 		printf(", manage pages failed (%x)\n", status);
3941 		error = -1;
3942 		goto free;
3943 	}
3944 
3945 free:
3946 	mcx_dmamem_free(sc, &mxm);
3947 
3948 	return (error);
3949 }
3950 
3951 static int
mcx_pages(struct mcx_softc * sc,struct mcx_hwmem * mhm,uint16_t type)3952 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3953 {
3954 	int32_t npages;
3955 	uint16_t func_id;
3956 
3957 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3958 		/* error printed by mcx_query_pages */
3959 		return (-1);
3960 	}
3961 
3962 	if (npages < 1)
3963 		return (0);
3964 
3965 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3966 		printf(", unable to allocate hwmem\n");
3967 		return (-1);
3968 	}
3969 
3970 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
3971 		printf(", unable to add hwmem\n");
3972 		goto free;
3973 	}
3974 
3975 	return (0);
3976 
3977 free:
3978 	mcx_hwmem_free(sc, mhm);
3979 
3980 	return (-1);
3981 }
3982 
3983 static int
mcx_hca_max_caps(struct mcx_softc * sc)3984 mcx_hca_max_caps(struct mcx_softc *sc)
3985 {
3986 	struct mcx_dmamem mxm;
3987 	struct mcx_cmdq_entry *cqe;
3988 	struct mcx_cmd_query_hca_cap_in *in;
3989 	struct mcx_cmd_query_hca_cap_out *out;
3990 	struct mcx_cmdq_mailbox *mb;
3991 	struct mcx_cap_device *hca;
3992 	uint8_t status;
3993 	uint8_t token = mcx_cmdq_token(sc);
3994 	int error;
3995 
3996 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3997 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3998 	    token);
3999 
4000 	in = mcx_cmdq_in(cqe);
4001 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4002 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
4003 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
4004 
4005 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4006 	    &cqe->cq_output_ptr, token) != 0) {
4007 		printf(", unable to allocate query hca caps mailboxen\n");
4008 		return (-1);
4009 	}
4010 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4011 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4012 
4013 	mcx_cmdq_post(sc, cqe, 0);
4014 	error = mcx_cmdq_poll(sc, cqe, 1000);
4015 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4016 
4017 	if (error != 0) {
4018 		printf(", query hca caps timeout\n");
4019 		goto free;
4020 	}
4021 	error = mcx_cmdq_verify(cqe);
4022 	if (error != 0) {
4023 		printf(", query hca caps reply corrupt\n");
4024 		goto free;
4025 	}
4026 
4027 	status = cqe->cq_output_data[0];
4028 	if (status != MCX_CQ_STATUS_OK) {
4029 		printf(", query hca caps failed (%x)\n", status);
4030 		error = -1;
4031 		goto free;
4032 	}
4033 
4034 	mb = mcx_cq_mbox(&mxm, 0);
4035 	hca = mcx_cq_mbox_data(mb);
4036 
4037 	if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
4038 	    != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
4039 		printf(", not in ethernet mode\n");
4040 		error = -1;
4041 		goto free;
4042 	}
4043 	if (hca->log_pg_sz > PAGE_SHIFT) {
4044 		printf(", minimum system page shift %u is too large\n",
4045 		    hca->log_pg_sz);
4046 		error = -1;
4047 		goto free;
4048 	}
4049 	/*
4050 	 * blueflame register is split into two buffers, and we must alternate
4051 	 * between the two of them.
4052 	 */
4053 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
4054 	sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
4055 
4056 	if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
4057 		sc->sc_mcam_reg = 1;
4058 
4059 	sc->sc_mhz = be32dec(&hca->device_frequency_mhz);
4060 	sc->sc_khz = be32dec(&hca->device_frequency_khz);
4061 
4062 free:
4063 	mcx_dmamem_free(sc, &mxm);
4064 
4065 	return (error);
4066 }
4067 
4068 static int
mcx_hca_set_caps(struct mcx_softc * sc)4069 mcx_hca_set_caps(struct mcx_softc *sc)
4070 {
4071 	struct mcx_dmamem mxm;
4072 	struct mcx_cmdq_entry *cqe;
4073 	struct mcx_cmd_query_hca_cap_in *in;
4074 	struct mcx_cmd_query_hca_cap_out *out;
4075 	struct mcx_cmdq_mailbox *mb;
4076 	struct mcx_cap_device *hca;
4077 	uint8_t status;
4078 	uint8_t token = mcx_cmdq_token(sc);
4079 	int error;
4080 
4081 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4082 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
4083 	    token);
4084 
4085 	in = mcx_cmdq_in(cqe);
4086 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4087 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
4088 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
4089 
4090 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4091 	    &cqe->cq_output_ptr, token) != 0) {
4092 		printf(", unable to allocate manage pages mailboxen\n");
4093 		return (-1);
4094 	}
4095 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4096 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4097 
4098 	mcx_cmdq_post(sc, cqe, 0);
4099 	error = mcx_cmdq_poll(sc, cqe, 1000);
4100 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4101 
4102 	if (error != 0) {
4103 		printf(", query hca caps timeout\n");
4104 		goto free;
4105 	}
4106 	error = mcx_cmdq_verify(cqe);
4107 	if (error != 0) {
4108 		printf(", query hca caps reply corrupt\n");
4109 		goto free;
4110 	}
4111 
4112 	status = cqe->cq_output_data[0];
4113 	if (status != MCX_CQ_STATUS_OK) {
4114 		printf(", query hca caps failed (%x)\n", status);
4115 		error = -1;
4116 		goto free;
4117 	}
4118 
4119 	mb = mcx_cq_mbox(&mxm, 0);
4120 	hca = mcx_cq_mbox_data(mb);
4121 
4122 	hca->log_pg_sz = PAGE_SHIFT;
4123 
4124 free:
4125 	mcx_dmamem_free(sc, &mxm);
4126 
4127 	return (error);
4128 }
4129 
4130 
4131 static int
mcx_init_hca(struct mcx_softc * sc)4132 mcx_init_hca(struct mcx_softc *sc)
4133 {
4134 	struct mcx_cmdq_entry *cqe;
4135 	struct mcx_cmd_init_hca_in *in;
4136 	struct mcx_cmd_init_hca_out *out;
4137 	int error;
4138 	uint8_t status;
4139 
4140 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4141 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4142 
4143 	in = mcx_cmdq_in(cqe);
4144 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
4145 	in->cmd_op_mod = htobe16(0);
4146 
4147 	mcx_cmdq_post(sc, cqe, 0);
4148 
4149 	error = mcx_cmdq_poll(sc, cqe, 1000);
4150 	if (error != 0) {
4151 		printf(", hca init timeout\n");
4152 		return (-1);
4153 	}
4154 	if (mcx_cmdq_verify(cqe) != 0) {
4155 		printf(", hca init command corrupt\n");
4156 		return (-1);
4157 	}
4158 
4159 	status = cqe->cq_output_data[0];
4160 	if (status != MCX_CQ_STATUS_OK) {
4161 		printf(", hca init failed (%x)\n", status);
4162 		return (-1);
4163 	}
4164 
4165 	return (0);
4166 }
4167 
4168 static int
mcx_set_driver_version(struct mcx_softc * sc)4169 mcx_set_driver_version(struct mcx_softc *sc)
4170 {
4171 	struct mcx_dmamem mxm;
4172 	struct mcx_cmdq_entry *cqe;
4173 	struct mcx_cmd_set_driver_version_in *in;
4174 	struct mcx_cmd_set_driver_version_out *out;
4175 	int error;
4176 	int token;
4177 	uint8_t status;
4178 
4179 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4180 	token = mcx_cmdq_token(sc);
4181 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
4182 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
4183 
4184 	in = mcx_cmdq_in(cqe);
4185 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
4186 	in->cmd_op_mod = htobe16(0);
4187 
4188 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4189 	    &cqe->cq_input_ptr, token) != 0) {
4190 		printf(", unable to allocate set driver version mailboxen\n");
4191 		return (-1);
4192 	}
4193 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
4194 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
4195 
4196 	mcx_cmdq_mboxes_sign(&mxm, 1);
4197 	mcx_cmdq_post(sc, cqe, 0);
4198 
4199 	error = mcx_cmdq_poll(sc, cqe, 1000);
4200 	if (error != 0) {
4201 		printf(", set driver version timeout\n");
4202 		goto free;
4203 	}
4204 	if (mcx_cmdq_verify(cqe) != 0) {
4205 		printf(", set driver version command corrupt\n");
4206 		goto free;
4207 	}
4208 
4209 	status = cqe->cq_output_data[0];
4210 	if (status != MCX_CQ_STATUS_OK) {
4211 		printf(", set driver version failed (%x)\n", status);
4212 		error = -1;
4213 		goto free;
4214 	}
4215 
4216 free:
4217 	mcx_dmamem_free(sc, &mxm);
4218 
4219 	return (error);
4220 }
4221 
4222 static int
mcx_iff(struct mcx_softc * sc)4223 mcx_iff(struct mcx_softc *sc)
4224 {
4225 	struct ifnet *ifp = &sc->sc_ec.ec_if;
4226 	struct mcx_dmamem mxm;
4227 	struct mcx_cmdq_entry *cqe;
4228 	struct mcx_cmd_modify_nic_vport_context_in *in;
4229 	struct mcx_cmd_modify_nic_vport_context_out *out;
4230 	struct mcx_nic_vport_ctx *ctx;
4231 	int error;
4232 	int token;
4233 	int insize;
4234 	uint32_t dest;
4235 
4236 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
4237 	    sc->sc_rss_flow_table_id;
4238 
4239 	/* enable or disable the promisc flow */
4240 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
4241 		if (sc->sc_promisc_flow_enabled == 0) {
4242 			mcx_set_flow_table_entry_mac(sc,
4243 			    MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
4244 			sc->sc_promisc_flow_enabled = 1;
4245 		}
4246 	} else if (sc->sc_promisc_flow_enabled != 0) {
4247 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
4248 		sc->sc_promisc_flow_enabled = 0;
4249 	}
4250 
4251 	/* enable or disable the all-multicast flow */
4252 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
4253 		if (sc->sc_allmulti_flow_enabled == 0) {
4254 			uint8_t mcast[ETHER_ADDR_LEN];
4255 
4256 			memset(mcast, 0, sizeof(mcast));
4257 			mcast[0] = 0x01;
4258 			mcx_set_flow_table_entry_mac(sc,
4259 			    MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
4260 			sc->sc_allmulti_flow_enabled = 1;
4261 		}
4262 	} else if (sc->sc_allmulti_flow_enabled != 0) {
4263 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
4264 		sc->sc_allmulti_flow_enabled = 0;
4265 	}
4266 
4267 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
4268 
4269 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4270 	token = mcx_cmdq_token(sc);
4271 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4272 
4273 	in = mcx_cmdq_in(cqe);
4274 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
4275 	in->cmd_op_mod = htobe16(0);
4276 	in->cmd_field_select = htobe32(
4277 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
4278 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
4279 
4280 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4281 		printf(", unable to allocate modify "
4282 		    "nic vport context mailboxen\n");
4283 		return (-1);
4284 	}
4285 	ctx = (struct mcx_nic_vport_ctx *)
4286 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
4287 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
4288 	/*
4289          * always leave promisc-all enabled on the vport since we
4290          * can't give it a vlan list, and we're already doing multicast
4291          * filtering in the flow table.
4292 	 */
4293 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
4294 
4295 	mcx_cmdq_mboxes_sign(&mxm, 1);
4296 	mcx_cmdq_post(sc, cqe, 0);
4297 
4298 	error = mcx_cmdq_poll(sc, cqe, 1000);
4299 	if (error != 0) {
4300 		printf(", modify nic vport context timeout\n");
4301 		goto free;
4302 	}
4303 	if (mcx_cmdq_verify(cqe) != 0) {
4304 		printf(", modify nic vport context command corrupt\n");
4305 		goto free;
4306 	}
4307 
4308 	out = mcx_cmdq_out(cqe);
4309 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4310 		printf(", modify nic vport context failed (%x, %x)\n",
4311 		    out->cmd_status, be32toh(out->cmd_syndrome));
4312 		error = -1;
4313 		goto free;
4314 	}
4315 
4316 free:
4317 	mcx_dmamem_free(sc, &mxm);
4318 
4319 	return (error);
4320 }
4321 
4322 static int
mcx_alloc_uar(struct mcx_softc * sc,int * uar)4323 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
4324 {
4325 	struct mcx_cmdq_entry *cqe;
4326 	struct mcx_cmd_alloc_uar_in *in;
4327 	struct mcx_cmd_alloc_uar_out *out;
4328 	int error;
4329 
4330 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4331 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4332 
4333 	in = mcx_cmdq_in(cqe);
4334 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
4335 	in->cmd_op_mod = htobe16(0);
4336 
4337 	mcx_cmdq_post(sc, cqe, 0);
4338 
4339 	error = mcx_cmdq_poll(sc, cqe, 1000);
4340 	if (error != 0) {
4341 		printf(", alloc uar timeout\n");
4342 		return (-1);
4343 	}
4344 	if (mcx_cmdq_verify(cqe) != 0) {
4345 		printf(", alloc uar command corrupt\n");
4346 		return (-1);
4347 	}
4348 
4349 	out = mcx_cmdq_out(cqe);
4350 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4351 		printf(", alloc uar failed (%x)\n", out->cmd_status);
4352 		return (-1);
4353 	}
4354 
4355 	*uar = mcx_get_id(out->cmd_uar);
4356 	return (0);
4357 }
4358 
4359 static int
mcx_create_eq(struct mcx_softc * sc,struct mcx_eq * eq,int uar,uint64_t events,int vector)4360 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
4361     uint64_t events, int vector)
4362 {
4363 	struct mcx_cmdq_entry *cqe;
4364 	struct mcx_dmamem mxm;
4365 	struct mcx_cmd_create_eq_in *in;
4366 	struct mcx_cmd_create_eq_mb_in *mbin;
4367 	struct mcx_cmd_create_eq_out *out;
4368 	struct mcx_eq_entry *eqe;
4369 	int error;
4370 	uint64_t *pas;
4371 	int insize, npages, paslen, i, token;
4372 
4373 	eq->eq_cons = 0;
4374 
4375 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
4376 	    MCX_PAGE_SIZE);
4377 	paslen = npages * sizeof(*pas);
4378 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
4379 
4380 	if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
4381 	    MCX_PAGE_SIZE) != 0) {
4382 		printf(", unable to allocate event queue memory\n");
4383 		return (-1);
4384 	}
4385 
4386 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
4387 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
4388 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
4389 	}
4390 
4391 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4392 	token = mcx_cmdq_token(sc);
4393 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4394 
4395 	in = mcx_cmdq_in(cqe);
4396 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
4397 	in->cmd_op_mod = htobe16(0);
4398 
4399 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4400 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4401 	    &cqe->cq_input_ptr, token) != 0) {
4402 		printf(", unable to allocate create eq mailboxen\n");
4403 		goto free_eq;
4404 	}
4405 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4406 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
4407 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
4408 	mbin->cmd_eq_ctx.eq_intr = vector;
4409 	mbin->cmd_event_bitmask = htobe64(events);
4410 
4411 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4412 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
4413 
4414 	/* physical addresses follow the mailbox in data */
4415 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
4416 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
4417 	mcx_cmdq_post(sc, cqe, 0);
4418 
4419 	error = mcx_cmdq_poll(sc, cqe, 1000);
4420 	if (error != 0) {
4421 		printf(", create eq timeout\n");
4422 		goto free_mxm;
4423 	}
4424 	if (mcx_cmdq_verify(cqe) != 0) {
4425 		printf(", create eq command corrupt\n");
4426 		goto free_mxm;
4427 	}
4428 
4429 	out = mcx_cmdq_out(cqe);
4430 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4431 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
4432 		    be32toh(out->cmd_syndrome));
4433 		goto free_mxm;
4434 	}
4435 
4436 	eq->eq_n = mcx_get_id(out->cmd_eqn);
4437 
4438 	mcx_dmamem_free(sc, &mxm);
4439 
4440 	mcx_arm_eq(sc, eq, uar);
4441 
4442 	return (0);
4443 
4444 free_mxm:
4445 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4446 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
4447 	mcx_dmamem_free(sc, &mxm);
4448 free_eq:
4449 	mcx_dmamem_free(sc, &eq->eq_mem);
4450 	return (-1);
4451 }
4452 
4453 static int
mcx_alloc_pd(struct mcx_softc * sc)4454 mcx_alloc_pd(struct mcx_softc *sc)
4455 {
4456 	struct mcx_cmdq_entry *cqe;
4457 	struct mcx_cmd_alloc_pd_in *in;
4458 	struct mcx_cmd_alloc_pd_out *out;
4459 	int error;
4460 
4461 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4462 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4463 
4464 	in = mcx_cmdq_in(cqe);
4465 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
4466 	in->cmd_op_mod = htobe16(0);
4467 
4468 	mcx_cmdq_post(sc, cqe, 0);
4469 
4470 	error = mcx_cmdq_poll(sc, cqe, 1000);
4471 	if (error != 0) {
4472 		printf(", alloc pd timeout\n");
4473 		return (-1);
4474 	}
4475 	if (mcx_cmdq_verify(cqe) != 0) {
4476 		printf(", alloc pd command corrupt\n");
4477 		return (-1);
4478 	}
4479 
4480 	out = mcx_cmdq_out(cqe);
4481 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4482 		printf(", alloc pd failed (%x)\n", out->cmd_status);
4483 		return (-1);
4484 	}
4485 
4486 	sc->sc_pd = mcx_get_id(out->cmd_pd);
4487 	return (0);
4488 }
4489 
4490 static int
mcx_alloc_tdomain(struct mcx_softc * sc)4491 mcx_alloc_tdomain(struct mcx_softc *sc)
4492 {
4493 	struct mcx_cmdq_entry *cqe;
4494 	struct mcx_cmd_alloc_td_in *in;
4495 	struct mcx_cmd_alloc_td_out *out;
4496 	int error;
4497 
4498 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4499 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4500 
4501 	in = mcx_cmdq_in(cqe);
4502 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
4503 	in->cmd_op_mod = htobe16(0);
4504 
4505 	mcx_cmdq_post(sc, cqe, 0);
4506 
4507 	error = mcx_cmdq_poll(sc, cqe, 1000);
4508 	if (error != 0) {
4509 		printf(", alloc transport domain timeout\n");
4510 		return (-1);
4511 	}
4512 	if (mcx_cmdq_verify(cqe) != 0) {
4513 		printf(", alloc transport domain command corrupt\n");
4514 		return (-1);
4515 	}
4516 
4517 	out = mcx_cmdq_out(cqe);
4518 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4519 		printf(", alloc transport domain failed (%x)\n",
4520 		    out->cmd_status);
4521 		return (-1);
4522 	}
4523 
4524 	sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
4525 	return (0);
4526 }
4527 
4528 static int
mcx_query_nic_vport_context(struct mcx_softc * sc,uint8_t * enaddr)4529 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
4530 {
4531 	struct mcx_dmamem mxm;
4532 	struct mcx_cmdq_entry *cqe;
4533 	struct mcx_cmd_query_nic_vport_context_in *in;
4534 	struct mcx_cmd_query_nic_vport_context_out *out;
4535 	struct mcx_nic_vport_ctx *ctx;
4536 	uint8_t *addr;
4537 	int error, token, i;
4538 
4539 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4540 	token = mcx_cmdq_token(sc);
4541 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
4542 
4543 	in = mcx_cmdq_in(cqe);
4544 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
4545 	in->cmd_op_mod = htobe16(0);
4546 	in->cmd_allowed_list_type = 0;
4547 
4548 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4549 	    &cqe->cq_output_ptr, token) != 0) {
4550 		printf(", unable to allocate "
4551 		    "query nic vport context mailboxen\n");
4552 		return (-1);
4553 	}
4554 	mcx_cmdq_mboxes_sign(&mxm, 1);
4555 	mcx_cmdq_post(sc, cqe, 0);
4556 
4557 	error = mcx_cmdq_poll(sc, cqe, 1000);
4558 	if (error != 0) {
4559 		printf(", query nic vport context timeout\n");
4560 		goto free;
4561 	}
4562 	if (mcx_cmdq_verify(cqe) != 0) {
4563 		printf(", query nic vport context command corrupt\n");
4564 		goto free;
4565 	}
4566 
4567 	out = mcx_cmdq_out(cqe);
4568 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4569 		printf(", query nic vport context failed (%x, %x)\n",
4570 		    out->cmd_status, be32toh(out->cmd_syndrome));
4571 		error = -1;
4572 		goto free;
4573 	}
4574 
4575 	ctx = (struct mcx_nic_vport_ctx *)
4576 	    mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4577 	addr = (uint8_t *)&ctx->vp_perm_addr;
4578 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
4579 		enaddr[i] = addr[i + 2];
4580 	}
4581 free:
4582 	mcx_dmamem_free(sc, &mxm);
4583 
4584 	return (error);
4585 }
4586 
4587 static int
mcx_query_special_contexts(struct mcx_softc * sc)4588 mcx_query_special_contexts(struct mcx_softc *sc)
4589 {
4590 	struct mcx_cmdq_entry *cqe;
4591 	struct mcx_cmd_query_special_ctx_in *in;
4592 	struct mcx_cmd_query_special_ctx_out *out;
4593 	int error;
4594 
4595 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4596 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4597 
4598 	in = mcx_cmdq_in(cqe);
4599 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
4600 	in->cmd_op_mod = htobe16(0);
4601 
4602 	mcx_cmdq_post(sc, cqe, 0);
4603 
4604 	error = mcx_cmdq_poll(sc, cqe, 1000);
4605 	if (error != 0) {
4606 		printf(", query special contexts timeout\n");
4607 		return (-1);
4608 	}
4609 	if (mcx_cmdq_verify(cqe) != 0) {
4610 		printf(", query special contexts command corrupt\n");
4611 		return (-1);
4612 	}
4613 
4614 	out = mcx_cmdq_out(cqe);
4615 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4616 		printf(", query special contexts failed (%x)\n",
4617 		    out->cmd_status);
4618 		return (-1);
4619 	}
4620 
4621 	sc->sc_lkey = be32toh(out->cmd_resd_lkey);
4622 	return (0);
4623 }
4624 
4625 static int
mcx_set_port_mtu(struct mcx_softc * sc,int mtu)4626 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
4627 {
4628 	struct mcx_reg_pmtu pmtu;
4629 	int error;
4630 
4631 	/* read max mtu */
4632 	memset(&pmtu, 0, sizeof(pmtu));
4633 	pmtu.rp_local_port = 1;
4634 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
4635 	    sizeof(pmtu));
4636 	if (error != 0) {
4637 		printf(", unable to get port MTU\n");
4638 		return error;
4639 	}
4640 
4641 	mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
4642 	pmtu.rp_admin_mtu = htobe16(mtu);
4643 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
4644 	    sizeof(pmtu));
4645 	if (error != 0) {
4646 		printf(", unable to set port MTU\n");
4647 		return error;
4648 	}
4649 
4650 	sc->sc_hardmtu = mtu;
4651 	sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
4652 	return 0;
4653 }
4654 
4655 static int
mcx_create_cq(struct mcx_softc * sc,struct mcx_cq * cq,int uar,int db,int eqn)4656 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
4657 {
4658 	struct mcx_cmdq_entry *cmde;
4659 	struct mcx_cq_entry *cqe;
4660 	struct mcx_dmamem mxm;
4661 	struct mcx_cmd_create_cq_in *in;
4662 	struct mcx_cmd_create_cq_mb_in *mbin;
4663 	struct mcx_cmd_create_cq_out *out;
4664 	int error;
4665 	uint64_t *pas;
4666 	int insize, npages, paslen, i, token;
4667 
4668 	cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
4669 
4670 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
4671 	    MCX_PAGE_SIZE);
4672 	paslen = npages * sizeof(*pas);
4673 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
4674 
4675 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
4676 	    MCX_PAGE_SIZE) != 0) {
4677 		printf("%s: unable to allocate completion queue memory\n",
4678 		    DEVNAME(sc));
4679 		return (-1);
4680 	}
4681 	cqe = MCX_DMA_KVA(&cq->cq_mem);
4682 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4683 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4684 	}
4685 
4686 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4687 	token = mcx_cmdq_token(sc);
4688 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4689 
4690 	in = mcx_cmdq_in(cmde);
4691 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4692 	in->cmd_op_mod = htobe16(0);
4693 
4694 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4695 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4696 	    &cmde->cq_input_ptr, token) != 0) {
4697 		printf("%s: unable to allocate create cq mailboxen\n",
4698 		    DEVNAME(sc));
4699 		goto free_cq;
4700 	}
4701 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4702 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4703 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
4704 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4705 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4706 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4707 	    MCX_CQ_MOD_COUNTER);
4708 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4709 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
4710 
4711 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4712 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
4713 
4714 	/* physical addresses follow the mailbox in data */
4715 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4716 	mcx_cmdq_post(sc, cmde, 0);
4717 
4718 	error = mcx_cmdq_poll(sc, cmde, 1000);
4719 	if (error != 0) {
4720 		printf("%s: create cq timeout\n", DEVNAME(sc));
4721 		goto free_mxm;
4722 	}
4723 	if (mcx_cmdq_verify(cmde) != 0) {
4724 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
4725 		goto free_mxm;
4726 	}
4727 
4728 	out = mcx_cmdq_out(cmde);
4729 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4730 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4731 		    out->cmd_status, be32toh(out->cmd_syndrome));
4732 		goto free_mxm;
4733 	}
4734 
4735 	cq->cq_n = mcx_get_id(out->cmd_cqn);
4736 	cq->cq_cons = 0;
4737 	cq->cq_count = 0;
4738 
4739 	mcx_dmamem_free(sc, &mxm);
4740 
4741 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4742 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4743 	    BUS_DMASYNC_PREWRITE);
4744 
4745 	mcx_arm_cq(sc, cq, uar);
4746 
4747 	return (0);
4748 
4749 free_mxm:
4750 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4751 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4752 	mcx_dmamem_free(sc, &mxm);
4753 free_cq:
4754 	mcx_dmamem_free(sc, &cq->cq_mem);
4755 	return (-1);
4756 }
4757 
4758 static int
mcx_destroy_cq(struct mcx_softc * sc,struct mcx_cq * cq)4759 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
4760 {
4761 	struct mcx_cmdq_entry *cqe;
4762 	struct mcx_cmd_destroy_cq_in *in;
4763 	struct mcx_cmd_destroy_cq_out *out;
4764 	int error;
4765 	int token;
4766 
4767 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4768 	token = mcx_cmdq_token(sc);
4769 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4770 
4771 	in = mcx_cmdq_in(cqe);
4772 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4773 	in->cmd_op_mod = htobe16(0);
4774 	in->cmd_cqn = htobe32(cq->cq_n);
4775 
4776 	mcx_cmdq_post(sc, cqe, 0);
4777 	error = mcx_cmdq_poll(sc, cqe, 1000);
4778 	if (error != 0) {
4779 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
4780 		return error;
4781 	}
4782 	if (mcx_cmdq_verify(cqe) != 0) {
4783 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4784 		return error;
4785 	}
4786 
4787 	out = mcx_cmdq_out(cqe);
4788 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4789 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4790 		    out->cmd_status, be32toh(out->cmd_syndrome));
4791 		return -1;
4792 	}
4793 
4794 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4795 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4796 	    BUS_DMASYNC_POSTWRITE);
4797 
4798 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4799 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4800 	mcx_dmamem_free(sc, &cq->cq_mem);
4801 
4802 	cq->cq_n = 0;
4803 	cq->cq_cons = 0;
4804 	cq->cq_count = 0;
4805 	return 0;
4806 }
4807 
4808 static int
mcx_create_rq(struct mcx_softc * sc,struct mcx_rx * rx,int db,int cqn)4809 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
4810 {
4811 	struct mcx_cmdq_entry *cqe;
4812 	struct mcx_dmamem mxm;
4813 	struct mcx_cmd_create_rq_in *in;
4814 	struct mcx_cmd_create_rq_out *out;
4815 	struct mcx_rq_ctx *mbin;
4816 	int error;
4817 	uint64_t *pas;
4818 	uint32_t rq_flags;
4819 	int insize, npages, paslen, token;
4820 
4821 	rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
4822 	    (db * MCX_WQ_DOORBELL_STRIDE);
4823 
4824 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4825 	    MCX_PAGE_SIZE);
4826 	paslen = npages * sizeof(*pas);
4827 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4828 
4829 	if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
4830 	    MCX_PAGE_SIZE) != 0) {
4831 		printf("%s: unable to allocate receive queue memory\n",
4832 		    DEVNAME(sc));
4833 		return (-1);
4834 	}
4835 
4836 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4837 	token = mcx_cmdq_token(sc);
4838 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4839 
4840 	in = mcx_cmdq_in(cqe);
4841 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4842 	in->cmd_op_mod = htobe16(0);
4843 
4844 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4845 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4846 	    &cqe->cq_input_ptr, token) != 0) {
4847 		printf("%s: unable to allocate create rq mailboxen\n",
4848 		    DEVNAME(sc));
4849 		goto free_rq;
4850 	}
4851 	mbin = (struct mcx_rq_ctx *)
4852 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4853 	rq_flags = MCX_RQ_CTX_RLKEY;
4854 	mbin->rq_flags = htobe32(rq_flags);
4855 	mbin->rq_cqn = htobe32(cqn);
4856 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4857 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4858 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4859 	    rx->rx_doorbell);
4860 	mbin->rq_wq.wq_log_stride = htobe16(4);
4861 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4862 
4863 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4864 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
4865 
4866 	/* physical addresses follow the mailbox in data */
4867 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
4868 	mcx_cmdq_post(sc, cqe, 0);
4869 
4870 	error = mcx_cmdq_poll(sc, cqe, 1000);
4871 	if (error != 0) {
4872 		printf("%s: create rq timeout\n", DEVNAME(sc));
4873 		goto free_mxm;
4874 	}
4875 	if (mcx_cmdq_verify(cqe) != 0) {
4876 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
4877 		goto free_mxm;
4878 	}
4879 
4880 	out = mcx_cmdq_out(cqe);
4881 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4882 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4883 		    out->cmd_status, be32toh(out->cmd_syndrome));
4884 		goto free_mxm;
4885 	}
4886 
4887 	rx->rx_rqn = mcx_get_id(out->cmd_rqn);
4888 
4889 	mcx_dmamem_free(sc, &mxm);
4890 
4891 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4892 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
4893 
4894 	return (0);
4895 
4896 free_mxm:
4897 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4898 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4899 	mcx_dmamem_free(sc, &mxm);
4900 free_rq:
4901 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4902 	return (-1);
4903 }
4904 
4905 static int
mcx_ready_rq(struct mcx_softc * sc,struct mcx_rx * rx)4906 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4907 {
4908 	struct mcx_cmdq_entry *cqe;
4909 	struct mcx_dmamem mxm;
4910 	struct mcx_cmd_modify_rq_in *in;
4911 	struct mcx_cmd_modify_rq_mb_in *mbin;
4912 	struct mcx_cmd_modify_rq_out *out;
4913 	int error;
4914 	int token;
4915 
4916 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4917 	token = mcx_cmdq_token(sc);
4918 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4919 	    sizeof(*out), token);
4920 
4921 	in = mcx_cmdq_in(cqe);
4922 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4923 	in->cmd_op_mod = htobe16(0);
4924 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
4925 
4926 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4927 	    &cqe->cq_input_ptr, token) != 0) {
4928 		printf("%s: unable to allocate modify rq mailbox\n",
4929 		    DEVNAME(sc));
4930 		return (-1);
4931 	}
4932 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4933 	mbin->cmd_rq_ctx.rq_flags = htobe32(
4934 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4935 
4936 	mcx_cmdq_mboxes_sign(&mxm, 1);
4937 	mcx_cmdq_post(sc, cqe, 0);
4938 	error = mcx_cmdq_poll(sc, cqe, 1000);
4939 	if (error != 0) {
4940 		printf("%s: modify rq timeout\n", DEVNAME(sc));
4941 		goto free;
4942 	}
4943 	if (mcx_cmdq_verify(cqe) != 0) {
4944 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4945 		goto free;
4946 	}
4947 
4948 	out = mcx_cmdq_out(cqe);
4949 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4950 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4951 		    out->cmd_status, be32toh(out->cmd_syndrome));
4952 		error = -1;
4953 		goto free;
4954 	}
4955 
4956 free:
4957 	mcx_dmamem_free(sc, &mxm);
4958 	return (error);
4959 }
4960 
4961 static int
mcx_destroy_rq(struct mcx_softc * sc,struct mcx_rx * rx)4962 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4963 {
4964 	struct mcx_cmdq_entry *cqe;
4965 	struct mcx_cmd_destroy_rq_in *in;
4966 	struct mcx_cmd_destroy_rq_out *out;
4967 	int error;
4968 	int token;
4969 
4970 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4971 	token = mcx_cmdq_token(sc);
4972 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4973 
4974 	in = mcx_cmdq_in(cqe);
4975 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4976 	in->cmd_op_mod = htobe16(0);
4977 	in->cmd_rqn = htobe32(rx->rx_rqn);
4978 
4979 	mcx_cmdq_post(sc, cqe, 0);
4980 	error = mcx_cmdq_poll(sc, cqe, 1000);
4981 	if (error != 0) {
4982 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
4983 		return error;
4984 	}
4985 	if (mcx_cmdq_verify(cqe) != 0) {
4986 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4987 		return error;
4988 	}
4989 
4990 	out = mcx_cmdq_out(cqe);
4991 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4992 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4993 		    out->cmd_status, be32toh(out->cmd_syndrome));
4994 		return -1;
4995 	}
4996 
4997 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4998 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
4999 
5000 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
5001 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
5002 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
5003 
5004 	rx->rx_rqn = 0;
5005 	return 0;
5006 }
5007 
5008 static int
mcx_create_tir_direct(struct mcx_softc * sc,struct mcx_rx * rx,int * tirn)5009 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
5010 {
5011 	struct mcx_cmdq_entry *cqe;
5012 	struct mcx_dmamem mxm;
5013 	struct mcx_cmd_create_tir_in *in;
5014 	struct mcx_cmd_create_tir_mb_in *mbin;
5015 	struct mcx_cmd_create_tir_out *out;
5016 	int error;
5017 	int token;
5018 
5019 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5020 	token = mcx_cmdq_token(sc);
5021 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5022 	    sizeof(*out), token);
5023 
5024 	in = mcx_cmdq_in(cqe);
5025 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5026 	in->cmd_op_mod = htobe16(0);
5027 
5028 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5029 	    &cqe->cq_input_ptr, token) != 0) {
5030 		printf("%s: unable to allocate create tir mailbox\n",
5031 		    DEVNAME(sc));
5032 		return (-1);
5033 	}
5034 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5035 	/* leave disp_type = 0, so packets get sent to the inline rqn */
5036 	mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
5037 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5038 
5039 	mcx_cmdq_post(sc, cqe, 0);
5040 	error = mcx_cmdq_poll(sc, cqe, 1000);
5041 	if (error != 0) {
5042 		printf("%s: create tir timeout\n", DEVNAME(sc));
5043 		goto free;
5044 	}
5045 	if (mcx_cmdq_verify(cqe) != 0) {
5046 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
5047 		goto free;
5048 	}
5049 
5050 	out = mcx_cmdq_out(cqe);
5051 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5052 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5053 		    out->cmd_status, be32toh(out->cmd_syndrome));
5054 		error = -1;
5055 		goto free;
5056 	}
5057 
5058 	*tirn = mcx_get_id(out->cmd_tirn);
5059 free:
5060 	mcx_dmamem_free(sc, &mxm);
5061 	return (error);
5062 }
5063 
5064 static int
mcx_create_tir_indirect(struct mcx_softc * sc,int rqtn,uint32_t hash_sel,int * tirn)5065 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
5066     int *tirn)
5067 {
5068 	struct mcx_cmdq_entry *cqe;
5069 	struct mcx_dmamem mxm;
5070 	struct mcx_cmd_create_tir_in *in;
5071 	struct mcx_cmd_create_tir_mb_in *mbin;
5072 	struct mcx_cmd_create_tir_out *out;
5073 	int error;
5074 	int token;
5075 
5076 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5077 	token = mcx_cmdq_token(sc);
5078 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5079 	    sizeof(*out), token);
5080 
5081 	in = mcx_cmdq_in(cqe);
5082 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5083 	in->cmd_op_mod = htobe16(0);
5084 
5085 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5086 	    &cqe->cq_input_ptr, token) != 0) {
5087 		printf("%s: unable to allocate create tir mailbox\n",
5088 		    DEVNAME(sc));
5089 		return (-1);
5090 	}
5091 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5092 	mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
5093 	    << MCX_TIR_CTX_DISP_TYPE_SHIFT);
5094 	mbin->cmd_indir_table = htobe32(rqtn);
5095 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
5096 	    MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
5097 	mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
5098 	stoeplitz_to_key(&mbin->cmd_rx_hash_key,
5099 	    sizeof(mbin->cmd_rx_hash_key));
5100 
5101 	mcx_cmdq_post(sc, cqe, 0);
5102 	error = mcx_cmdq_poll(sc, cqe, 1000);
5103 	if (error != 0) {
5104 		printf("%s: create tir timeout\n", DEVNAME(sc));
5105 		goto free;
5106 	}
5107 	if (mcx_cmdq_verify(cqe) != 0) {
5108 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
5109 		goto free;
5110 	}
5111 
5112 	out = mcx_cmdq_out(cqe);
5113 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5114 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5115 		    out->cmd_status, be32toh(out->cmd_syndrome));
5116 		error = -1;
5117 		goto free;
5118 	}
5119 
5120 	*tirn = mcx_get_id(out->cmd_tirn);
5121 free:
5122 	mcx_dmamem_free(sc, &mxm);
5123 	return (error);
5124 }
5125 
5126 static int
mcx_destroy_tir(struct mcx_softc * sc,int tirn)5127 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
5128 {
5129 	struct mcx_cmdq_entry *cqe;
5130 	struct mcx_cmd_destroy_tir_in *in;
5131 	struct mcx_cmd_destroy_tir_out *out;
5132 	int error;
5133 	int token;
5134 
5135 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5136 	token = mcx_cmdq_token(sc);
5137 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5138 
5139 	in = mcx_cmdq_in(cqe);
5140 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
5141 	in->cmd_op_mod = htobe16(0);
5142 	in->cmd_tirn = htobe32(tirn);
5143 
5144 	mcx_cmdq_post(sc, cqe, 0);
5145 	error = mcx_cmdq_poll(sc, cqe, 1000);
5146 	if (error != 0) {
5147 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
5148 		return error;
5149 	}
5150 	if (mcx_cmdq_verify(cqe) != 0) {
5151 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
5152 		return error;
5153 	}
5154 
5155 	out = mcx_cmdq_out(cqe);
5156 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5157 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
5158 		    out->cmd_status, be32toh(out->cmd_syndrome));
5159 		return -1;
5160 	}
5161 
5162 	return (0);
5163 }
5164 
5165 static int
mcx_create_sq(struct mcx_softc * sc,struct mcx_tx * tx,int uar,int db,int cqn)5166 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
5167     int cqn)
5168 {
5169 	struct mcx_cmdq_entry *cqe;
5170 	struct mcx_dmamem mxm;
5171 	struct mcx_cmd_create_sq_in *in;
5172 	struct mcx_sq_ctx *mbin;
5173 	struct mcx_cmd_create_sq_out *out;
5174 	int error;
5175 	uint64_t *pas;
5176 	int insize, npages, paslen, token;
5177 
5178 	tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
5179 	    (db * MCX_WQ_DOORBELL_STRIDE) + 4;
5180 
5181 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
5182 	    MCX_PAGE_SIZE);
5183 	paslen = npages * sizeof(*pas);
5184 	insize = sizeof(struct mcx_sq_ctx) + paslen;
5185 
5186 	if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
5187 	    MCX_PAGE_SIZE) != 0) {
5188 		printf("%s: unable to allocate send queue memory\n",
5189 		    DEVNAME(sc));
5190 		return (-1);
5191 	}
5192 
5193 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5194 	token = mcx_cmdq_token(sc);
5195 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
5196 	    token);
5197 
5198 	in = mcx_cmdq_in(cqe);
5199 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
5200 	in->cmd_op_mod = htobe16(0);
5201 
5202 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
5203 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
5204 	    &cqe->cq_input_ptr, token) != 0) {
5205 		printf("%s: unable to allocate create sq mailboxen\n",
5206 		    DEVNAME(sc));
5207 		goto free_sq;
5208 	}
5209 	mbin = (struct mcx_sq_ctx *)
5210 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
5211 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
5212 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
5213 	mbin->sq_cqn = htobe32(cqn);
5214 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
5215 	mbin->sq_tis_num = htobe32(sc->sc_tis);
5216 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
5217 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
5218 	mbin->sq_wq.wq_uar_page = htobe32(uar);
5219 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
5220 	    tx->tx_doorbell);
5221 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
5222 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
5223 
5224 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5225 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
5226 
5227 	/* physical addresses follow the mailbox in data */
5228 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
5229 	    npages, &tx->tx_sq_mem);
5230 	mcx_cmdq_post(sc, cqe, 0);
5231 
5232 	error = mcx_cmdq_poll(sc, cqe, 1000);
5233 	if (error != 0) {
5234 		printf("%s: create sq timeout\n", DEVNAME(sc));
5235 		goto free_mxm;
5236 	}
5237 	if (mcx_cmdq_verify(cqe) != 0) {
5238 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
5239 		goto free_mxm;
5240 	}
5241 
5242 	out = mcx_cmdq_out(cqe);
5243 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5244 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
5245 		    out->cmd_status, be32toh(out->cmd_syndrome));
5246 		goto free_mxm;
5247 	}
5248 
5249 	tx->tx_uar = uar;
5250 	tx->tx_sqn = mcx_get_id(out->cmd_sqn);
5251 
5252 	mcx_dmamem_free(sc, &mxm);
5253 
5254 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5255 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
5256 
5257 	return (0);
5258 
5259 free_mxm:
5260 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5261 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5262 	mcx_dmamem_free(sc, &mxm);
5263 free_sq:
5264 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5265 	return (-1);
5266 }
5267 
5268 static int
mcx_destroy_sq(struct mcx_softc * sc,struct mcx_tx * tx)5269 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5270 {
5271 	struct mcx_cmdq_entry *cqe;
5272 	struct mcx_cmd_destroy_sq_in *in;
5273 	struct mcx_cmd_destroy_sq_out *out;
5274 	int error;
5275 	int token;
5276 
5277 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5278 	token = mcx_cmdq_token(sc);
5279 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5280 
5281 	in = mcx_cmdq_in(cqe);
5282 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
5283 	in->cmd_op_mod = htobe16(0);
5284 	in->cmd_sqn = htobe32(tx->tx_sqn);
5285 
5286 	mcx_cmdq_post(sc, cqe, 0);
5287 	error = mcx_cmdq_poll(sc, cqe, 1000);
5288 	if (error != 0) {
5289 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
5290 		return error;
5291 	}
5292 	if (mcx_cmdq_verify(cqe) != 0) {
5293 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
5294 		return error;
5295 	}
5296 
5297 	out = mcx_cmdq_out(cqe);
5298 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5299 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
5300 		    out->cmd_status, be32toh(out->cmd_syndrome));
5301 		return -1;
5302 	}
5303 
5304 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5305 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
5306 
5307 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5308 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5309 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5310 
5311 	tx->tx_sqn = 0;
5312 	return 0;
5313 }
5314 
5315 static int
mcx_ready_sq(struct mcx_softc * sc,struct mcx_tx * tx)5316 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5317 {
5318 	struct mcx_cmdq_entry *cqe;
5319 	struct mcx_dmamem mxm;
5320 	struct mcx_cmd_modify_sq_in *in;
5321 	struct mcx_cmd_modify_sq_mb_in *mbin;
5322 	struct mcx_cmd_modify_sq_out *out;
5323 	int error;
5324 	int token;
5325 
5326 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5327 	token = mcx_cmdq_token(sc);
5328 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5329 	    sizeof(*out), token);
5330 
5331 	in = mcx_cmdq_in(cqe);
5332 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
5333 	in->cmd_op_mod = htobe16(0);
5334 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
5335 
5336 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5337 	    &cqe->cq_input_ptr, token) != 0) {
5338 		printf("%s: unable to allocate modify sq mailbox\n",
5339 		    DEVNAME(sc));
5340 		return (-1);
5341 	}
5342 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5343 	mbin->cmd_sq_ctx.sq_flags = htobe32(
5344 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
5345 
5346 	mcx_cmdq_mboxes_sign(&mxm, 1);
5347 	mcx_cmdq_post(sc, cqe, 0);
5348 	error = mcx_cmdq_poll(sc, cqe, 1000);
5349 	if (error != 0) {
5350 		printf("%s: modify sq timeout\n", DEVNAME(sc));
5351 		goto free;
5352 	}
5353 	if (mcx_cmdq_verify(cqe) != 0) {
5354 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
5355 		goto free;
5356 	}
5357 
5358 	out = mcx_cmdq_out(cqe);
5359 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5360 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
5361 		    out->cmd_status, be32toh(out->cmd_syndrome));
5362 		error = -1;
5363 		goto free;
5364 	}
5365 
5366 free:
5367 	mcx_dmamem_free(sc, &mxm);
5368 	return (error);
5369 }
5370 
5371 static int
mcx_create_tis(struct mcx_softc * sc,int * tis)5372 mcx_create_tis(struct mcx_softc *sc, int *tis)
5373 {
5374 	struct mcx_cmdq_entry *cqe;
5375 	struct mcx_dmamem mxm;
5376 	struct mcx_cmd_create_tis_in *in;
5377 	struct mcx_cmd_create_tis_mb_in *mbin;
5378 	struct mcx_cmd_create_tis_out *out;
5379 	int error;
5380 	int token;
5381 
5382 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5383 	token = mcx_cmdq_token(sc);
5384 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5385 	    sizeof(*out), token);
5386 
5387 	in = mcx_cmdq_in(cqe);
5388 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
5389 	in->cmd_op_mod = htobe16(0);
5390 
5391 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5392 	    &cqe->cq_input_ptr, token) != 0) {
5393 		printf("%s: unable to allocate create tis mailbox\n",
5394 		    DEVNAME(sc));
5395 		return (-1);
5396 	}
5397 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5398 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5399 
5400 	mcx_cmdq_mboxes_sign(&mxm, 1);
5401 	mcx_cmdq_post(sc, cqe, 0);
5402 	error = mcx_cmdq_poll(sc, cqe, 1000);
5403 	if (error != 0) {
5404 		printf("%s: create tis timeout\n", DEVNAME(sc));
5405 		goto free;
5406 	}
5407 	if (mcx_cmdq_verify(cqe) != 0) {
5408 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
5409 		goto free;
5410 	}
5411 
5412 	out = mcx_cmdq_out(cqe);
5413 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5414 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
5415 		    out->cmd_status, be32toh(out->cmd_syndrome));
5416 		error = -1;
5417 		goto free;
5418 	}
5419 
5420 	*tis = mcx_get_id(out->cmd_tisn);
5421 free:
5422 	mcx_dmamem_free(sc, &mxm);
5423 	return (error);
5424 }
5425 
5426 static int
mcx_destroy_tis(struct mcx_softc * sc,int tis)5427 mcx_destroy_tis(struct mcx_softc *sc, int tis)
5428 {
5429 	struct mcx_cmdq_entry *cqe;
5430 	struct mcx_cmd_destroy_tis_in *in;
5431 	struct mcx_cmd_destroy_tis_out *out;
5432 	int error;
5433 	int token;
5434 
5435 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5436 	token = mcx_cmdq_token(sc);
5437 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5438 
5439 	in = mcx_cmdq_in(cqe);
5440 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
5441 	in->cmd_op_mod = htobe16(0);
5442 	in->cmd_tisn = htobe32(tis);
5443 
5444 	mcx_cmdq_post(sc, cqe, 0);
5445 	error = mcx_cmdq_poll(sc, cqe, 1000);
5446 	if (error != 0) {
5447 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
5448 		return error;
5449 	}
5450 	if (mcx_cmdq_verify(cqe) != 0) {
5451 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
5452 		return error;
5453 	}
5454 
5455 	out = mcx_cmdq_out(cqe);
5456 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5457 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
5458 		    out->cmd_status, be32toh(out->cmd_syndrome));
5459 		return -1;
5460 	}
5461 
5462 	return 0;
5463 }
5464 
5465 static int
mcx_create_rqt(struct mcx_softc * sc,int size,int * rqns,int * rqt)5466 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
5467 {
5468 	struct mcx_cmdq_entry *cqe;
5469 	struct mcx_dmamem mxm;
5470 	struct mcx_cmd_create_rqt_in *in;
5471 	struct mcx_cmd_create_rqt_mb_in *mbin;
5472 	struct mcx_cmd_create_rqt_out *out;
5473 	struct mcx_rqt_ctx *rqt_ctx;
5474 	int *rqtn;
5475 	int error;
5476 	int token;
5477 	int i;
5478 
5479 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5480 	token = mcx_cmdq_token(sc);
5481 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
5482 	    (size * sizeof(int)), sizeof(*out), token);
5483 
5484 	in = mcx_cmdq_in(cqe);
5485 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
5486 	in->cmd_op_mod = htobe16(0);
5487 
5488 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5489 	    &cqe->cq_input_ptr, token) != 0) {
5490 		printf("%s: unable to allocate create rqt mailbox\n",
5491 		    DEVNAME(sc));
5492 		return (-1);
5493 	}
5494 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5495 	rqt_ctx = &mbin->cmd_rqt;
5496 	rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
5497 	rqt_ctx->cmd_rqt_actual_size = htobe16(size);
5498 
5499 	/* rqt list follows the rqt context */
5500 	rqtn = (int *)(rqt_ctx + 1);
5501 	for (i = 0; i < size; i++) {
5502 		rqtn[i] = htobe32(rqns[i]);
5503 	}
5504 
5505 	mcx_cmdq_mboxes_sign(&mxm, 1);
5506 	mcx_cmdq_post(sc, cqe, 0);
5507 	error = mcx_cmdq_poll(sc, cqe, 1000);
5508 	if (error != 0) {
5509 		printf("%s: create rqt timeout\n", DEVNAME(sc));
5510 		goto free;
5511 	}
5512 	if (mcx_cmdq_verify(cqe) != 0) {
5513 		printf("%s: create rqt command corrupt\n", DEVNAME(sc));
5514 		goto free;
5515 	}
5516 
5517 	out = mcx_cmdq_out(cqe);
5518 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5519 		printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
5520 		    out->cmd_status, be32toh(out->cmd_syndrome));
5521 		error = -1;
5522 		goto free;
5523 	}
5524 
5525 	*rqt = mcx_get_id(out->cmd_rqtn);
5526 	return (0);
5527 free:
5528 	mcx_dmamem_free(sc, &mxm);
5529 	return (error);
5530 }
5531 
5532 static int
mcx_destroy_rqt(struct mcx_softc * sc,int rqt)5533 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
5534 {
5535 	struct mcx_cmdq_entry *cqe;
5536 	struct mcx_cmd_destroy_rqt_in *in;
5537 	struct mcx_cmd_destroy_rqt_out *out;
5538 	int error;
5539 	int token;
5540 
5541 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5542 	token = mcx_cmdq_token(sc);
5543 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5544 
5545 	in = mcx_cmdq_in(cqe);
5546 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
5547 	in->cmd_op_mod = htobe16(0);
5548 	in->cmd_rqtn = htobe32(rqt);
5549 
5550 	mcx_cmdq_post(sc, cqe, 0);
5551 	error = mcx_cmdq_poll(sc, cqe, 1000);
5552 	if (error != 0) {
5553 		printf("%s: destroy rqt timeout\n", DEVNAME(sc));
5554 		return error;
5555 	}
5556 	if (mcx_cmdq_verify(cqe) != 0) {
5557 		printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
5558 		return error;
5559 	}
5560 
5561 	out = mcx_cmdq_out(cqe);
5562 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5563 		printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
5564 		    out->cmd_status, be32toh(out->cmd_syndrome));
5565 		return -1;
5566 	}
5567 
5568 	return 0;
5569 }
5570 
5571 #if 0
5572 static int
5573 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
5574 {
5575 	struct mcx_cmdq_entry *cqe;
5576 	struct mcx_cmd_alloc_flow_counter_in *in;
5577 	struct mcx_cmd_alloc_flow_counter_out *out;
5578 	int error;
5579 
5580 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5581 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
5582 
5583 	in = mcx_cmdq_in(cqe);
5584 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
5585 	in->cmd_op_mod = htobe16(0);
5586 
5587 	mcx_cmdq_post(sc, cqe, 0);
5588 
5589 	error = mcx_cmdq_poll(sc, cqe, 1000);
5590 	if (error != 0) {
5591 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
5592 		return (-1);
5593 	}
5594 	if (mcx_cmdq_verify(cqe) != 0) {
5595 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
5596 		return (-1);
5597 	}
5598 
5599 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
5600 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5601 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
5602 		    out->cmd_status);
5603 		return (-1);
5604 	}
5605 
5606 	sc->sc_flow_counter_id[i]  = be16toh(out->cmd_flow_counter_id);
5607 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
5608 
5609 	return (0);
5610 }
5611 #endif
5612 
5613 static int
mcx_create_flow_table(struct mcx_softc * sc,int log_size,int level,int * flow_table_id)5614 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
5615     int *flow_table_id)
5616 {
5617 	struct mcx_cmdq_entry *cqe;
5618 	struct mcx_dmamem mxm;
5619 	struct mcx_cmd_create_flow_table_in *in;
5620 	struct mcx_cmd_create_flow_table_mb_in *mbin;
5621 	struct mcx_cmd_create_flow_table_out *out;
5622 	int error;
5623 	int token;
5624 
5625 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5626 	token = mcx_cmdq_token(sc);
5627 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5628 	    sizeof(*out), token);
5629 
5630 	in = mcx_cmdq_in(cqe);
5631 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
5632 	in->cmd_op_mod = htobe16(0);
5633 
5634 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5635 	    &cqe->cq_input_ptr, token) != 0) {
5636 		printf("%s: unable to allocate create flow table mailbox\n",
5637 		    DEVNAME(sc));
5638 		return (-1);
5639 	}
5640 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5641 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5642 	mbin->cmd_ctx.ft_log_size = log_size;
5643 	mbin->cmd_ctx.ft_level = level;
5644 
5645 	mcx_cmdq_mboxes_sign(&mxm, 1);
5646 	mcx_cmdq_post(sc, cqe, 0);
5647 	error = mcx_cmdq_poll(sc, cqe, 1000);
5648 	if (error != 0) {
5649 		printf("%s: create flow table timeout\n", DEVNAME(sc));
5650 		goto free;
5651 	}
5652 	if (mcx_cmdq_verify(cqe) != 0) {
5653 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
5654 		goto free;
5655 	}
5656 
5657 	out = mcx_cmdq_out(cqe);
5658 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5659 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
5660 		    out->cmd_status, be32toh(out->cmd_syndrome));
5661 		error = -1;
5662 		goto free;
5663 	}
5664 
5665 	*flow_table_id = mcx_get_id(out->cmd_table_id);
5666 free:
5667 	mcx_dmamem_free(sc, &mxm);
5668 	return (error);
5669 }
5670 
5671 static int
mcx_set_flow_table_root(struct mcx_softc * sc,int flow_table_id)5672 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
5673 {
5674 	struct mcx_cmdq_entry *cqe;
5675 	struct mcx_dmamem mxm;
5676 	struct mcx_cmd_set_flow_table_root_in *in;
5677 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
5678 	struct mcx_cmd_set_flow_table_root_out *out;
5679 	int error;
5680 	int token;
5681 
5682 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5683 	token = mcx_cmdq_token(sc);
5684 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5685 	    sizeof(*out), token);
5686 
5687 	in = mcx_cmdq_in(cqe);
5688 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
5689 	in->cmd_op_mod = htobe16(0);
5690 
5691 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5692 	    &cqe->cq_input_ptr, token) != 0) {
5693 		printf("%s: unable to allocate set flow table root mailbox\n",
5694 		    DEVNAME(sc));
5695 		return (-1);
5696 	}
5697 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5698 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5699 	mbin->cmd_table_id = htobe32(flow_table_id);
5700 
5701 	mcx_cmdq_mboxes_sign(&mxm, 1);
5702 	mcx_cmdq_post(sc, cqe, 0);
5703 	error = mcx_cmdq_poll(sc, cqe, 1000);
5704 	if (error != 0) {
5705 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
5706 		goto free;
5707 	}
5708 	if (mcx_cmdq_verify(cqe) != 0) {
5709 		printf("%s: set flow table root command corrupt\n",
5710 		    DEVNAME(sc));
5711 		goto free;
5712 	}
5713 
5714 	out = mcx_cmdq_out(cqe);
5715 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5716 		printf("%s: set flow table root failed (%x, %x)\n",
5717 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5718 		error = -1;
5719 		goto free;
5720 	}
5721 
5722 free:
5723 	mcx_dmamem_free(sc, &mxm);
5724 	return (error);
5725 }
5726 
5727 static int
mcx_destroy_flow_table(struct mcx_softc * sc,int flow_table_id)5728 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
5729 {
5730 	struct mcx_cmdq_entry *cqe;
5731 	struct mcx_dmamem mxm;
5732 	struct mcx_cmd_destroy_flow_table_in *in;
5733 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
5734 	struct mcx_cmd_destroy_flow_table_out *out;
5735 	int error;
5736 	int token;
5737 
5738 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5739 	token = mcx_cmdq_token(sc);
5740 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5741 
5742 	in = mcx_cmdq_in(cqe);
5743 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
5744 	in->cmd_op_mod = htobe16(0);
5745 
5746 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5747 	    &cqe->cq_input_ptr, token) != 0) {
5748 		printf("%s: unable to allocate destroy flow table mailbox\n",
5749 		    DEVNAME(sc));
5750 		return (-1);
5751 	}
5752 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5753 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5754 	mb->cmd_table_id = htobe32(flow_table_id);
5755 
5756 	mcx_cmdq_mboxes_sign(&mxm, 1);
5757 	mcx_cmdq_post(sc, cqe, 0);
5758 	error = mcx_cmdq_poll(sc, cqe, 1000);
5759 	if (error != 0) {
5760 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
5761 		goto free;
5762 	}
5763 	if (mcx_cmdq_verify(cqe) != 0) {
5764 		printf("%s: destroy flow table command corrupt\n",
5765 		    DEVNAME(sc));
5766 		goto free;
5767 	}
5768 
5769 	out = mcx_cmdq_out(cqe);
5770 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5771 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
5772 		    out->cmd_status, be32toh(out->cmd_syndrome));
5773 		error = -1;
5774 		goto free;
5775 	}
5776 
5777 free:
5778 	mcx_dmamem_free(sc, &mxm);
5779 	return (error);
5780 }
5781 
5782 
5783 static int
mcx_create_flow_group(struct mcx_softc * sc,int flow_table_id,int group,int start,int size,int match_enable,struct mcx_flow_match * match)5784 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
5785     int start, int size, int match_enable, struct mcx_flow_match *match)
5786 {
5787 	struct mcx_cmdq_entry *cqe;
5788 	struct mcx_dmamem mxm;
5789 	struct mcx_cmd_create_flow_group_in *in;
5790 	struct mcx_cmd_create_flow_group_mb_in *mbin;
5791 	struct mcx_cmd_create_flow_group_out *out;
5792 	struct mcx_flow_group *mfg;
5793 	int error;
5794 	int token;
5795 
5796 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5797 	token = mcx_cmdq_token(sc);
5798 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5799 	    token);
5800 
5801 	in = mcx_cmdq_in(cqe);
5802 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
5803 	in->cmd_op_mod = htobe16(0);
5804 
5805 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5806 	    != 0) {
5807 		printf("%s: unable to allocate create flow group mailbox\n",
5808 		    DEVNAME(sc));
5809 		return (-1);
5810 	}
5811 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5812 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5813 	mbin->cmd_table_id = htobe32(flow_table_id);
5814 	mbin->cmd_start_flow_index = htobe32(start);
5815 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
5816 
5817 	mbin->cmd_match_criteria_enable = match_enable;
5818 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
5819 
5820 	mcx_cmdq_mboxes_sign(&mxm, 2);
5821 	mcx_cmdq_post(sc, cqe, 0);
5822 	error = mcx_cmdq_poll(sc, cqe, 1000);
5823 	if (error != 0) {
5824 		printf("%s: create flow group timeout\n", DEVNAME(sc));
5825 		goto free;
5826 	}
5827 	if (mcx_cmdq_verify(cqe) != 0) {
5828 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
5829 		goto free;
5830 	}
5831 
5832 	out = mcx_cmdq_out(cqe);
5833 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5834 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
5835 		    out->cmd_status, be32toh(out->cmd_syndrome));
5836 		error = -1;
5837 		goto free;
5838 	}
5839 
5840 	mfg = &sc->sc_flow_group[group];
5841 	mfg->g_id = mcx_get_id(out->cmd_group_id);
5842 	mfg->g_table = flow_table_id;
5843 	mfg->g_start = start;
5844 	mfg->g_size = size;
5845 
5846 free:
5847 	mcx_dmamem_free(sc, &mxm);
5848 	return (error);
5849 }
5850 
5851 static int
mcx_destroy_flow_group(struct mcx_softc * sc,int group)5852 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
5853 {
5854 	struct mcx_cmdq_entry *cqe;
5855 	struct mcx_dmamem mxm;
5856 	struct mcx_cmd_destroy_flow_group_in *in;
5857 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
5858 	struct mcx_cmd_destroy_flow_group_out *out;
5859 	struct mcx_flow_group *mfg;
5860 	int error;
5861 	int token;
5862 
5863 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5864 	token = mcx_cmdq_token(sc);
5865 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5866 
5867 	in = mcx_cmdq_in(cqe);
5868 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
5869 	in->cmd_op_mod = htobe16(0);
5870 
5871 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5872 	    &cqe->cq_input_ptr, token) != 0) {
5873 		printf("%s: unable to allocate destroy flow group mailbox\n",
5874 		    DEVNAME(sc));
5875 		return (-1);
5876 	}
5877 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5878 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5879 	mfg = &sc->sc_flow_group[group];
5880 	mb->cmd_table_id = htobe32(mfg->g_table);
5881 	mb->cmd_group_id = htobe32(mfg->g_id);
5882 
5883 	mcx_cmdq_mboxes_sign(&mxm, 2);
5884 	mcx_cmdq_post(sc, cqe, 0);
5885 	error = mcx_cmdq_poll(sc, cqe, 1000);
5886 	if (error != 0) {
5887 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
5888 		goto free;
5889 	}
5890 	if (mcx_cmdq_verify(cqe) != 0) {
5891 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
5892 		goto free;
5893 	}
5894 
5895 	out = mcx_cmdq_out(cqe);
5896 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5897 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
5898 		    out->cmd_status, be32toh(out->cmd_syndrome));
5899 		error = -1;
5900 		goto free;
5901 	}
5902 
5903 	mfg->g_id = -1;
5904 	mfg->g_table = -1;
5905 	mfg->g_size = 0;
5906 	mfg->g_start = 0;
5907 free:
5908 	mcx_dmamem_free(sc, &mxm);
5909 	return (error);
5910 }
5911 
5912 static int
mcx_set_flow_table_entry_mac(struct mcx_softc * sc,int group,int index,const uint8_t * macaddr,uint32_t dest)5913 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
5914     const uint8_t *macaddr, uint32_t dest)
5915 {
5916 	struct mcx_cmdq_entry *cqe;
5917 	struct mcx_dmamem mxm;
5918 	struct mcx_cmd_set_flow_table_entry_in *in;
5919 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5920 	struct mcx_cmd_set_flow_table_entry_out *out;
5921 	struct mcx_flow_group *mfg;
5922 	uint32_t *pdest;
5923 	int error;
5924 	int token;
5925 
5926 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5927 	token = mcx_cmdq_token(sc);
5928 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5929 	    sizeof(*out), token);
5930 
5931 	in = mcx_cmdq_in(cqe);
5932 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5933 	in->cmd_op_mod = htobe16(0);
5934 
5935 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5936 	    != 0) {
5937 		printf("%s: unable to allocate set flow table entry mailbox\n",
5938 		    DEVNAME(sc));
5939 		return (-1);
5940 	}
5941 
5942 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5943 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5944 
5945 	mfg = &sc->sc_flow_group[group];
5946 	mbin->cmd_table_id = htobe32(mfg->g_table);
5947 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5948 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5949 
5950 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
5951 	pdest = (uint32_t *)
5952 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5953 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5954 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5955 	*pdest = htobe32(dest);
5956 
5957 	/* the only thing we match on at the moment is the dest mac address */
5958 	if (macaddr != NULL) {
5959 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5960 		    ETHER_ADDR_LEN);
5961 	}
5962 
5963 	mcx_cmdq_mboxes_sign(&mxm, 2);
5964 	mcx_cmdq_post(sc, cqe, 0);
5965 	error = mcx_cmdq_poll(sc, cqe, 1000);
5966 	if (error != 0) {
5967 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5968 		goto free;
5969 	}
5970 	if (mcx_cmdq_verify(cqe) != 0) {
5971 		printf("%s: set flow table entry command corrupt\n",
5972 		    DEVNAME(sc));
5973 		goto free;
5974 	}
5975 
5976 	out = mcx_cmdq_out(cqe);
5977 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5978 		printf("%s: set flow table entry failed (%x, %x)\n",
5979 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5980 		error = -1;
5981 		goto free;
5982 	}
5983 
5984 free:
5985 	mcx_dmamem_free(sc, &mxm);
5986 	return (error);
5987 }
5988 
5989 static int
mcx_set_flow_table_entry_proto(struct mcx_softc * sc,int group,int index,int ethertype,int ip_proto,uint32_t dest)5990 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
5991     int ethertype, int ip_proto, uint32_t dest)
5992 {
5993 	struct mcx_cmdq_entry *cqe;
5994 	struct mcx_dmamem mxm;
5995 	struct mcx_cmd_set_flow_table_entry_in *in;
5996 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5997 	struct mcx_cmd_set_flow_table_entry_out *out;
5998 	struct mcx_flow_group *mfg;
5999 	uint32_t *pdest;
6000 	int error;
6001 	int token;
6002 
6003 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6004 	token = mcx_cmdq_token(sc);
6005 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
6006 	    sizeof(*out), token);
6007 
6008 	in = mcx_cmdq_in(cqe);
6009 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
6010 	in->cmd_op_mod = htobe16(0);
6011 
6012 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
6013 	    != 0) {
6014 		printf("%s: unable to allocate set flow table entry mailbox\n",
6015 		    DEVNAME(sc));
6016 		return (-1);
6017 	}
6018 
6019 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6020 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6021 
6022 	mfg = &sc->sc_flow_group[group];
6023 	mbin->cmd_table_id = htobe32(mfg->g_table);
6024 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6025 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
6026 
6027 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
6028 	pdest = (uint32_t *)
6029 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
6030 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
6031 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
6032 	*pdest = htobe32(dest);
6033 
6034 	mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
6035 	mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
6036 
6037 	mcx_cmdq_mboxes_sign(&mxm, 2);
6038 	mcx_cmdq_post(sc, cqe, 0);
6039 	error = mcx_cmdq_poll(sc, cqe, 1000);
6040 	if (error != 0) {
6041 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
6042 		goto free;
6043 	}
6044 	if (mcx_cmdq_verify(cqe) != 0) {
6045 		printf("%s: set flow table entry command corrupt\n",
6046 		    DEVNAME(sc));
6047 		goto free;
6048 	}
6049 
6050 	out = mcx_cmdq_out(cqe);
6051 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6052 		printf("%s: set flow table entry failed (%x, %x)\n",
6053 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
6054 		error = -1;
6055 		goto free;
6056 	}
6057 
6058 free:
6059 	mcx_dmamem_free(sc, &mxm);
6060 	return (error);
6061 }
6062 
6063 static int
mcx_delete_flow_table_entry(struct mcx_softc * sc,int group,int index)6064 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
6065 {
6066 	struct mcx_cmdq_entry *cqe;
6067 	struct mcx_dmamem mxm;
6068 	struct mcx_cmd_delete_flow_table_entry_in *in;
6069 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
6070 	struct mcx_cmd_delete_flow_table_entry_out *out;
6071 	struct mcx_flow_group *mfg;
6072 	int error;
6073 	int token;
6074 
6075 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6076 	token = mcx_cmdq_token(sc);
6077 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
6078 	    token);
6079 
6080 	in = mcx_cmdq_in(cqe);
6081 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
6082 	in->cmd_op_mod = htobe16(0);
6083 
6084 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6085 	    &cqe->cq_input_ptr, token) != 0) {
6086 		printf("%s: unable to allocate "
6087 		    "delete flow table entry mailbox\n", DEVNAME(sc));
6088 		return (-1);
6089 	}
6090 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6091 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6092 
6093 	mfg = &sc->sc_flow_group[group];
6094 	mbin->cmd_table_id = htobe32(mfg->g_table);
6095 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6096 
6097 	mcx_cmdq_mboxes_sign(&mxm, 2);
6098 	mcx_cmdq_post(sc, cqe, 0);
6099 	error = mcx_cmdq_poll(sc, cqe, 1000);
6100 	if (error != 0) {
6101 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
6102 		goto free;
6103 	}
6104 	if (mcx_cmdq_verify(cqe) != 0) {
6105 		printf("%s: delete flow table entry command corrupt\n",
6106 		    DEVNAME(sc));
6107 		goto free;
6108 	}
6109 
6110 	out = mcx_cmdq_out(cqe);
6111 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6112 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
6113 		    DEVNAME(sc), group, index, out->cmd_status,
6114 		    be32toh(out->cmd_syndrome));
6115 		error = -1;
6116 		goto free;
6117 	}
6118 
6119 free:
6120 	mcx_dmamem_free(sc, &mxm);
6121 	return (error);
6122 }
6123 
6124 #if 0
6125 int
6126 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
6127 {
6128 	struct mcx_dmamem mxm;
6129 	struct mcx_cmdq_entry *cqe;
6130 	struct mcx_cmd_query_flow_table_in *in;
6131 	struct mcx_cmd_query_flow_table_mb_in *mbin;
6132 	struct mcx_cmd_query_flow_table_out *out;
6133 	struct mcx_cmd_query_flow_table_mb_out *mbout;
6134 	uint8_t token = mcx_cmdq_token(sc);
6135 	int error;
6136 	int i;
6137 	uint8_t *dump;
6138 
6139 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6140 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6141 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6142 
6143 	in = mcx_cmdq_in(cqe);
6144 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
6145 	in->cmd_op_mod = htobe16(0);
6146 
6147 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6148 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
6149 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6150 	    &cqe->cq_output_ptr, token) != 0) {
6151 		printf(", unable to allocate query flow table mailboxes\n");
6152 		return (-1);
6153 	}
6154 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6155 
6156 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6157 	mbin->cmd_table_type = 0;
6158 	mbin->cmd_table_id = htobe32(flow_table_id);
6159 
6160 	mcx_cmdq_mboxes_sign(&mxm, 1);
6161 
6162 	mcx_cmdq_post(sc, cqe, 0);
6163 	error = mcx_cmdq_poll(sc, cqe, 1000);
6164 	if (error != 0) {
6165 		printf("%s: query flow table timeout\n", DEVNAME(sc));
6166 		goto free;
6167 	}
6168 	error = mcx_cmdq_verify(cqe);
6169 	if (error != 0) {
6170 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
6171 		goto free;
6172 	}
6173 
6174 	out = mcx_cmdq_out(cqe);
6175 	switch (out->cmd_status) {
6176 	case MCX_CQ_STATUS_OK:
6177 		break;
6178 	default:
6179 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
6180 		    out->cmd_status, be32toh(out->cmd_syndrome));
6181 		error = -1;
6182 		goto free;
6183 	}
6184 
6185         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
6186 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6187 	dump = (uint8_t *)mbout + 8;
6188 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
6189 		printf("%.2x ", dump[i]);
6190 		if (i % 16 == 15)
6191 			printf("\n");
6192 	}
6193 free:
6194 	mcx_cq_mboxes_free(sc, &mxm);
6195 	return (error);
6196 }
6197 int
6198 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
6199 {
6200 	struct mcx_dmamem mxm;
6201 	struct mcx_cmdq_entry *cqe;
6202 	struct mcx_cmd_query_flow_table_entry_in *in;
6203 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
6204 	struct mcx_cmd_query_flow_table_entry_out *out;
6205 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
6206 	uint8_t token = mcx_cmdq_token(sc);
6207 	int error;
6208 	int i;
6209 	uint8_t *dump;
6210 
6211 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6212 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6213 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6214 
6215 	in = mcx_cmdq_in(cqe);
6216 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
6217 	in->cmd_op_mod = htobe16(0);
6218 
6219 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6220 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6221 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6222 	    &cqe->cq_output_ptr, token) != 0) {
6223 		printf(", unable to allocate "
6224 		    "query flow table entry mailboxes\n");
6225 		return (-1);
6226 	}
6227 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6228 
6229 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6230 	mbin->cmd_table_type = 0;
6231 	mbin->cmd_table_id = htobe32(flow_table_id);
6232 	mbin->cmd_flow_index = htobe32(index);
6233 
6234 	mcx_cmdq_mboxes_sign(&mxm, 1);
6235 
6236 	mcx_cmdq_post(sc, cqe, 0);
6237 	error = mcx_cmdq_poll(sc, cqe, 1000);
6238 	if (error != 0) {
6239 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
6240 		goto free;
6241 	}
6242 	error = mcx_cmdq_verify(cqe);
6243 	if (error != 0) {
6244 		printf("%s: query flow table entry reply corrupt\n",
6245 		    DEVNAME(sc));
6246 		goto free;
6247 	}
6248 
6249 	out = mcx_cmdq_out(cqe);
6250 	switch (out->cmd_status) {
6251 	case MCX_CQ_STATUS_OK:
6252 		break;
6253 	default:
6254 		printf("%s: query flow table entry failed (%x/%x)\n",
6255 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
6256 		error = -1;
6257 		goto free;
6258 	}
6259 
6260         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
6261 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6262 	dump = (uint8_t *)mbout;
6263 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6264 		printf("%.2x ", dump[i]);
6265 		if (i % 16 == 15)
6266 			printf("\n");
6267 	}
6268 
6269 free:
6270 	mcx_cq_mboxes_free(sc, &mxm);
6271 	return (error);
6272 }
6273 
6274 int
6275 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
6276 {
6277 	struct mcx_dmamem mxm;
6278 	struct mcx_cmdq_entry *cqe;
6279 	struct mcx_cmd_query_flow_group_in *in;
6280 	struct mcx_cmd_query_flow_group_mb_in *mbin;
6281 	struct mcx_cmd_query_flow_group_out *out;
6282 	struct mcx_cmd_query_flow_group_mb_out *mbout;
6283 	uint8_t token = mcx_cmdq_token(sc);
6284 	int error;
6285 	int i;
6286 	uint8_t *dump;
6287 
6288 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6289 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6290 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6291 
6292 	in = mcx_cmdq_in(cqe);
6293 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
6294 	in->cmd_op_mod = htobe16(0);
6295 
6296 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6297 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6298 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6299 	    &cqe->cq_output_ptr, token) != 0) {
6300 		printf(", unable to allocate query flow group mailboxes\n");
6301 		return (-1);
6302 	}
6303 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6304 
6305 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6306 	mbin->cmd_table_type = 0;
6307 	mbin->cmd_table_id = htobe32(flow_table_id);
6308 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
6309 
6310 	mcx_cmdq_mboxes_sign(&mxm, 1);
6311 
6312 	mcx_cmdq_post(sc, cqe, 0);
6313 	error = mcx_cmdq_poll(sc, cqe, 1000);
6314 	if (error != 0) {
6315 		printf("%s: query flow group timeout\n", DEVNAME(sc));
6316 		goto free;
6317 	}
6318 	error = mcx_cmdq_verify(cqe);
6319 	if (error != 0) {
6320 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
6321 		goto free;
6322 	}
6323 
6324 	out = mcx_cmdq_out(cqe);
6325 	switch (out->cmd_status) {
6326 	case MCX_CQ_STATUS_OK:
6327 		break;
6328 	default:
6329 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
6330 		    out->cmd_status, be32toh(out->cmd_syndrome));
6331 		error = -1;
6332 		goto free;
6333 	}
6334 
6335         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
6336 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6337 	dump = (uint8_t *)mbout;
6338 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6339 		printf("%.2x ", dump[i]);
6340 		if (i % 16 == 15)
6341 			printf("\n");
6342 	}
6343 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
6344 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6345 		printf("%.2x ", dump[i]);
6346 		if (i % 16 == 15)
6347 			printf("\n");
6348 	}
6349 
6350 free:
6351 	mcx_cq_mboxes_free(sc, &mxm);
6352 	return (error);
6353 }
6354 
6355 static int
6356 mcx_dump_counters(struct mcx_softc *sc)
6357 {
6358 	struct mcx_dmamem mxm;
6359 	struct mcx_cmdq_entry *cqe;
6360 	struct mcx_cmd_query_vport_counters_in *in;
6361 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
6362 	struct mcx_cmd_query_vport_counters_out *out;
6363 	struct mcx_nic_vport_counters *counters;
6364 	int error, token;
6365 
6366 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6367 	token = mcx_cmdq_token(sc);
6368 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6369 	    sizeof(*out) + sizeof(*counters), token);
6370 
6371 	in = mcx_cmdq_in(cqe);
6372 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
6373 	in->cmd_op_mod = htobe16(0);
6374 
6375 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6376 	    &cqe->cq_output_ptr, token) != 0) {
6377 		printf(", unable to allocate "
6378 		    "query nic vport counters mailboxen\n");
6379 		return (-1);
6380 	}
6381 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6382 
6383 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6384 	mbin->cmd_clear = 0x80;
6385 
6386 	mcx_cmdq_mboxes_sign(&mxm, 1);
6387 	mcx_cmdq_post(sc, cqe, 0);
6388 
6389 	error = mcx_cmdq_poll(sc, cqe, 1000);
6390 	if (error != 0) {
6391 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
6392 		goto free;
6393 	}
6394 	if (mcx_cmdq_verify(cqe) != 0) {
6395 		printf("%s: query nic vport counters command corrupt\n",
6396 		    DEVNAME(sc));
6397 		goto free;
6398 	}
6399 
6400 	out = mcx_cmdq_out(cqe);
6401 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6402 		printf("%s: query nic vport counters failed (%x, %x)\n",
6403 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6404 		error = -1;
6405 		goto free;
6406 	}
6407 
6408 	counters = (struct mcx_nic_vport_counters *)
6409 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6410 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
6411 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
6412 	    counters->rx_err.packets + counters->tx_err.packets)
6413 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
6414 		    DEVNAME(sc),
6415 		    betoh64(counters->tx_err.packets),
6416 		    betoh64(counters->rx_err.packets),
6417 		    betoh64(counters->tx_ucast.packets),
6418 		    betoh64(counters->rx_ucast.packets),
6419 		    betoh64(counters->tx_bcast.packets),
6420 		    betoh64(counters->rx_bcast.packets));
6421 free:
6422 	mcx_dmamem_free(sc, &mxm);
6423 
6424 	return (error);
6425 }
6426 
6427 static int
6428 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
6429 {
6430 	struct mcx_dmamem mxm;
6431 	struct mcx_cmdq_entry *cqe;
6432 	struct mcx_cmd_query_flow_counter_in *in;
6433 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
6434 	struct mcx_cmd_query_flow_counter_out *out;
6435 	struct mcx_counter *counters;
6436 	int error, token;
6437 
6438 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6439 	token = mcx_cmdq_token(sc);
6440 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
6441 	    sizeof(*counters), token);
6442 
6443 	in = mcx_cmdq_in(cqe);
6444 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
6445 	in->cmd_op_mod = htobe16(0);
6446 
6447 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6448 	    &cqe->cq_output_ptr, token) != 0) {
6449 		printf(", unable to allocate query flow counter mailboxen\n");
6450 		return (-1);
6451 	}
6452 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6453 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6454 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
6455 	mbin->cmd_clear = 0x80;
6456 
6457 	mcx_cmdq_mboxes_sign(&mxm, 1);
6458 	mcx_cmdq_post(sc, cqe, 0);
6459 
6460 	error = mcx_cmdq_poll(sc, cqe, 1000);
6461 	if (error != 0) {
6462 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
6463 		goto free;
6464 	}
6465 	if (mcx_cmdq_verify(cqe) != 0) {
6466 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
6467 		goto free;
6468 	}
6469 
6470 	out = mcx_cmdq_out(cqe);
6471 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6472 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
6473 		    out->cmd_status, betoh32(out->cmd_syndrome));
6474 		error = -1;
6475 		goto free;
6476 	}
6477 
6478 	counters = (struct mcx_counter *)
6479 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6480 	if (counters->packets)
6481 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
6482 		    betoh64(counters->packets));
6483 free:
6484 	mcx_dmamem_free(sc, &mxm);
6485 
6486 	return (error);
6487 }
6488 
6489 #endif
6490 
6491 #if NKSTAT > 0
6492 
6493 int
mcx_query_rq(struct mcx_softc * sc,struct mcx_rx * rx,struct mcx_rq_ctx * rq_ctx)6494 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
6495 {
6496 	struct mcx_dmamem mxm;
6497 	struct mcx_cmdq_entry *cqe;
6498 	struct mcx_cmd_query_rq_in *in;
6499 	struct mcx_cmd_query_rq_out *out;
6500 	struct mcx_cmd_query_rq_mb_out *mbout;
6501 	uint8_t token = mcx_cmdq_token(sc);
6502 	int error;
6503 
6504 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6505 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6506 	    token);
6507 
6508 	in = mcx_cmdq_in(cqe);
6509 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
6510 	in->cmd_op_mod = htobe16(0);
6511 	in->cmd_rqn = htobe32(rx->rx_rqn);
6512 
6513 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6514 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6515 	    &cqe->cq_output_ptr, token) != 0) {
6516 		printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
6517 		return (-1);
6518 	}
6519 
6520 	mcx_cmdq_mboxes_sign(&mxm, 1);
6521 
6522 	mcx_cmdq_post(sc, cqe, 0);
6523 	error = mcx_cmdq_poll(sc, cqe, 1000);
6524 	if (error != 0) {
6525 		printf("%s: query rq timeout\n", DEVNAME(sc));
6526 		goto free;
6527 	}
6528 	error = mcx_cmdq_verify(cqe);
6529 	if (error != 0) {
6530 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
6531 		goto free;
6532 	}
6533 
6534 	out = mcx_cmdq_out(cqe);
6535 	switch (out->cmd_status) {
6536 	case MCX_CQ_STATUS_OK:
6537 		break;
6538 	default:
6539 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
6540 		    out->cmd_status, be32toh(out->cmd_syndrome));
6541 		error = -1;
6542 		goto free;
6543 	}
6544 
6545         mbout = (struct mcx_cmd_query_rq_mb_out *)
6546 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6547 	memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
6548 
6549 free:
6550 	mcx_cq_mboxes_free(sc, &mxm);
6551 	return (error);
6552 }
6553 
6554 int
mcx_query_sq(struct mcx_softc * sc,struct mcx_tx * tx,struct mcx_sq_ctx * sq_ctx)6555 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
6556 {
6557 	struct mcx_dmamem mxm;
6558 	struct mcx_cmdq_entry *cqe;
6559 	struct mcx_cmd_query_sq_in *in;
6560 	struct mcx_cmd_query_sq_out *out;
6561 	struct mcx_cmd_query_sq_mb_out *mbout;
6562 	uint8_t token = mcx_cmdq_token(sc);
6563 	int error;
6564 
6565 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6566 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6567 	    token);
6568 
6569 	in = mcx_cmdq_in(cqe);
6570 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
6571 	in->cmd_op_mod = htobe16(0);
6572 	in->cmd_sqn = htobe32(tx->tx_sqn);
6573 
6574 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6575 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6576 	    &cqe->cq_output_ptr, token) != 0) {
6577 		printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
6578 		return (-1);
6579 	}
6580 
6581 	mcx_cmdq_mboxes_sign(&mxm, 1);
6582 
6583 	mcx_cmdq_post(sc, cqe, 0);
6584 	error = mcx_cmdq_poll(sc, cqe, 1000);
6585 	if (error != 0) {
6586 		printf("%s: query sq timeout\n", DEVNAME(sc));
6587 		goto free;
6588 	}
6589 	error = mcx_cmdq_verify(cqe);
6590 	if (error != 0) {
6591 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
6592 		goto free;
6593 	}
6594 
6595 	out = mcx_cmdq_out(cqe);
6596 	switch (out->cmd_status) {
6597 	case MCX_CQ_STATUS_OK:
6598 		break;
6599 	default:
6600 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
6601 		    out->cmd_status, be32toh(out->cmd_syndrome));
6602 		error = -1;
6603 		goto free;
6604 	}
6605 
6606         mbout = (struct mcx_cmd_query_sq_mb_out *)
6607 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6608 	memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
6609 
6610 free:
6611 	mcx_cq_mboxes_free(sc, &mxm);
6612 	return (error);
6613 }
6614 
6615 int
mcx_query_cq(struct mcx_softc * sc,struct mcx_cq * cq,struct mcx_cq_ctx * cq_ctx)6616 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
6617 {
6618 	struct mcx_dmamem mxm;
6619 	struct mcx_cmdq_entry *cqe;
6620 	struct mcx_cmd_query_cq_in *in;
6621 	struct mcx_cmd_query_cq_out *out;
6622 	struct mcx_cq_ctx *ctx;
6623 	uint8_t token = mcx_cmdq_token(sc);
6624 	int error;
6625 
6626 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6627 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6628 	    token);
6629 
6630 	in = mcx_cmdq_in(cqe);
6631 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
6632 	in->cmd_op_mod = htobe16(0);
6633 	in->cmd_cqn = htobe32(cq->cq_n);
6634 
6635 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6636 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6637 	    &cqe->cq_output_ptr, token) != 0) {
6638 		printf("%s: unable to allocate query cq mailboxes\n",
6639 		    DEVNAME(sc));
6640 		return (-1);
6641 	}
6642 
6643 	mcx_cmdq_mboxes_sign(&mxm, 1);
6644 
6645 	mcx_cmdq_post(sc, cqe, 0);
6646 	error = mcx_cmdq_poll(sc, cqe, 1000);
6647 	if (error != 0) {
6648 		printf("%s: query cq timeout\n", DEVNAME(sc));
6649 		goto free;
6650 	}
6651 	if (mcx_cmdq_verify(cqe) != 0) {
6652 		printf("%s: query cq reply corrupt\n", DEVNAME(sc));
6653 		goto free;
6654 	}
6655 
6656 	out = mcx_cmdq_out(cqe);
6657 	switch (out->cmd_status) {
6658 	case MCX_CQ_STATUS_OK:
6659 		break;
6660 	default:
6661 		printf("%s: query qc failed (%x/%x)\n", DEVNAME(sc),
6662 		    out->cmd_status, be32toh(out->cmd_syndrome));
6663 		error = -1;
6664 		goto free;
6665 	}
6666 
6667 	ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6668 	memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
6669 free:
6670 	mcx_dmamem_free(sc, &mxm);
6671 	return (error);
6672 }
6673 
6674 int
mcx_query_eq(struct mcx_softc * sc,struct mcx_eq * eq,struct mcx_eq_ctx * eq_ctx)6675 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
6676 {
6677 	struct mcx_dmamem mxm;
6678 	struct mcx_cmdq_entry *cqe;
6679 	struct mcx_cmd_query_eq_in *in;
6680 	struct mcx_cmd_query_eq_out *out;
6681 	struct mcq_eq_ctx *ctx;
6682 	uint8_t token = mcx_cmdq_token(sc);
6683 	int error;
6684 
6685 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6686 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6687 	    token);
6688 
6689 	in = mcx_cmdq_in(cqe);
6690 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
6691 	in->cmd_op_mod = htobe16(0);
6692 	in->cmd_eqn = htobe32(eq->eq_n);
6693 
6694 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6695 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6696 	    &cqe->cq_output_ptr, token) != 0) {
6697 		printf("%s: unable to allocate query eq mailboxes\n",
6698 		    DEVNAME(sc));
6699 		return (-1);
6700 	}
6701 
6702 	mcx_cmdq_mboxes_sign(&mxm, 1);
6703 
6704 	mcx_cmdq_post(sc, cqe, 0);
6705 	error = mcx_cmdq_poll(sc, cqe, 1000);
6706 	if (error != 0) {
6707 		printf("%s: query eq timeout\n", DEVNAME(sc));
6708 		goto free;
6709 	}
6710 	if (mcx_cmdq_verify(cqe) != 0) {
6711 		printf("%s: query eq reply corrupt\n", DEVNAME(sc));
6712 		goto free;
6713 	}
6714 
6715 	out = mcx_cmdq_out(cqe);
6716 	switch (out->cmd_status) {
6717 	case MCX_CQ_STATUS_OK:
6718 		break;
6719 	default:
6720 		printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
6721 		    out->cmd_status, be32toh(out->cmd_syndrome));
6722 		error = -1;
6723 		goto free;
6724 	}
6725 
6726 	ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6727 	memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
6728 free:
6729 	mcx_dmamem_free(sc, &mxm);
6730 	return (error);
6731 }
6732 
6733 #endif /* NKSTAT > 0 */
6734 
6735 
6736 static inline unsigned int
mcx_rx_fill_slots(struct mcx_softc * sc,struct mcx_rx * rx,uint nslots)6737 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
6738 {
6739 	struct mcx_rq_entry *ring, *rqe;
6740 	struct mcx_slot *ms;
6741 	struct mbuf *m;
6742 	uint slot, p, fills;
6743 
6744 	ring = MCX_DMA_KVA(&rx->rx_rq_mem);
6745 	p = rx->rx_prod;
6746 
6747 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6748 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
6749 
6750 	slot = (p % (1 << MCX_LOG_RQ_SIZE));
6751 	rqe = ring;
6752 	for (fills = 0; fills < nslots; fills++) {
6753 		slot = p % (1 << MCX_LOG_RQ_SIZE);
6754 
6755 		ms = &rx->rx_slots[slot];
6756 		rqe = &ring[slot];
6757 
6758 		m = NULL;
6759 		MGETHDR(m, M_DONTWAIT, MT_DATA);
6760 		if (m == NULL)
6761 			break;
6762 
6763 		MCLGET(m, M_DONTWAIT);
6764 		if ((m->m_flags & M_EXT) == 0) {
6765 			m_freem(m);
6766 			break;
6767 		}
6768 
6769 		m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
6770 		m_adj(m, m->m_ext.ext_size - sc->sc_rxbufsz);
6771 		m_adj(m, ETHER_ALIGN);
6772 
6773 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6774 		    BUS_DMA_NOWAIT) != 0) {
6775 			m_freem(m);
6776 			break;
6777 		}
6778 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
6779 		ms->ms_m = m;
6780 
6781 		be32enc(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
6782 		be64enc(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
6783 		be32enc(&rqe->rqe_lkey, sc->sc_lkey);
6784 
6785 		p++;
6786 	}
6787 
6788 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6789 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
6790 
6791 	rx->rx_prod = p;
6792 
6793 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6794 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
6795 	be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
6796 	    p & MCX_WQ_DOORBELL_MASK);
6797 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6798 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
6799 
6800 	return (nslots - fills);
6801 }
6802 
6803 static int
mcx_rx_fill(struct mcx_softc * sc,struct mcx_rx * rx)6804 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
6805 {
6806 	u_int slots;
6807 
6808 	slots = mcx_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
6809 	if (slots == 0)
6810 		return (1);
6811 
6812 	slots = mcx_rx_fill_slots(sc, rx, slots);
6813 	mcx_rxr_put(&rx->rx_rxr, slots);
6814 	return (0);
6815 }
6816 
6817 void
mcx_refill(void * xrx)6818 mcx_refill(void *xrx)
6819 {
6820 	struct mcx_rx *rx = xrx;
6821 	struct mcx_softc *sc = rx->rx_softc;
6822 
6823 	mcx_rx_fill(sc, rx);
6824 
6825 	if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
6826 		callout_schedule(&rx->rx_refill, 1);
6827 }
6828 
6829 static int
mcx_process_txeof(struct mcx_softc * sc,struct mcx_tx * tx,struct mcx_cq_entry * cqe)6830 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
6831     struct mcx_cq_entry *cqe)
6832 {
6833 	struct mcx_slot *ms;
6834 	bus_dmamap_t map;
6835 	int slot, slots;
6836 
6837 	slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
6838 
6839 	ms = &tx->tx_slots[slot];
6840 	map = ms->ms_map;
6841 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6842 	    BUS_DMASYNC_POSTWRITE);
6843 
6844 	slots = 1;
6845 	if (map->dm_nsegs > 1)
6846 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
6847 
6848 	bus_dmamap_unload(sc->sc_dmat, map);
6849 	m_freem(ms->ms_m);
6850 	ms->ms_m = NULL;
6851 
6852 	return (slots);
6853 }
6854 
6855 static uint64_t
mcx_uptime(void)6856 mcx_uptime(void)
6857 {
6858 	struct timespec ts;
6859 
6860 	nanouptime(&ts);
6861 
6862 	return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
6863 }
6864 
6865 static void
mcx_calibrate_first(struct mcx_softc * sc)6866 mcx_calibrate_first(struct mcx_softc *sc)
6867 {
6868 	struct mcx_calibration *c = &sc->sc_calibration[0];
6869 	int s;
6870 
6871 	sc->sc_calibration_gen = 0;
6872 
6873 	s = splhigh(); /* crit_enter? */
6874 	c->c_ubase = mcx_uptime();
6875 	c->c_tbase = mcx_timer(sc);
6876 	splx(s);
6877 	c->c_ratio = 0;
6878 
6879 #if notyet
6880 	callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
6881 #endif
6882 }
6883 
6884 #define MCX_TIMESTAMP_SHIFT 24
6885 
6886 static void
mcx_calibrate(void * arg)6887 mcx_calibrate(void *arg)
6888 {
6889 	struct mcx_softc *sc = arg;
6890 	struct mcx_calibration *nc, *pc;
6891 	uint64_t udiff, tdiff;
6892 	unsigned int gen;
6893 	int s;
6894 
6895 	if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
6896 		return;
6897 
6898 	callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
6899 
6900 	gen = sc->sc_calibration_gen;
6901 	pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
6902 	gen++;
6903 	nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
6904 
6905 	nc->c_uptime = pc->c_ubase;
6906 	nc->c_timestamp = pc->c_tbase;
6907 
6908 	s = splhigh(); /* crit_enter? */
6909 	nc->c_ubase = mcx_uptime();
6910 	nc->c_tbase = mcx_timer(sc);
6911 	splx(s);
6912 
6913 	udiff = nc->c_ubase - nc->c_uptime;
6914 	tdiff = nc->c_tbase - nc->c_timestamp;
6915 
6916 	/*
6917 	 * udiff is the wall clock time between calibration ticks,
6918 	 * which should be 32 seconds or 32 billion nanoseconds. if
6919 	 * we squint, 1 billion nanoseconds is kind of like a 32 bit
6920 	 * number, so 32 billion should still have a lot of high bits
6921 	 * spare. we use this space by shifting the nanoseconds up
6922 	 * 24 bits so we have a nice big number to divide by the
6923 	 * number of mcx timer ticks.
6924 	 */
6925 	nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
6926 
6927 	membar_producer();
6928 	sc->sc_calibration_gen = gen;
6929 }
6930 
6931 static int
mcx_process_rx(struct mcx_softc * sc,struct mcx_rx * rx,struct mcx_cq_entry * cqe,struct mcx_mbufq * mq,const struct mcx_calibration * c)6932 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
6933     struct mcx_cq_entry *cqe, struct mcx_mbufq *mq,
6934     const struct mcx_calibration *c)
6935 {
6936 	struct ifnet *ifp = &sc->sc_ec.ec_if;
6937 	struct mcx_slot *ms;
6938 	struct mbuf *m;
6939 	uint32_t flags, len;
6940 	int slot;
6941 
6942 	len = be32dec(&cqe->cq_byte_cnt);
6943 	slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
6944 
6945 	ms = &rx->rx_slots[slot];
6946 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD);
6947 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
6948 
6949 	m = ms->ms_m;
6950 	ms->ms_m = NULL;
6951 
6952 	m_set_rcvif(m, &sc->sc_ec.ec_if);
6953 	m->m_pkthdr.len = m->m_len = len;
6954 
6955 #if 0
6956 	if (cqe->cq_rx_hash_type) {
6957 		m->m_pkthdr.ph_flowid = be32toh(cqe->cq_rx_hash);
6958 		m->m_pkthdr.csum_flags |= M_FLOWID;
6959 	}
6960 #endif
6961 
6962 	flags = be32dec(&cqe->cq_flags);
6963 	if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK) {
6964 		if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6965 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6966 	}
6967 	if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK) {
6968 		if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx)
6969 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
6970 		if (ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx)
6971 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv6;
6972 		if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx)
6973 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
6974 		if (ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx)
6975 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv6;
6976 	}
6977 	if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
6978 		vlan_set_tag(m, flags & MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
6979 	}
6980 
6981 #if notyet
6982 	if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_LINK0) && c->c_ratio) {
6983 		uint64_t t = be64dec(&cqe->cq_timestamp);
6984 		t -= c->c_timestamp;
6985 		t *= c->c_ratio;
6986 		t >>= MCX_TIMESTAMP_SHIFT;
6987 		t += c->c_uptime;
6988 
6989 		m->m_pkthdr.ph_timestamp = t;
6990 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
6991 	}
6992 #endif
6993 
6994 	MBUFQ_ENQUEUE(mq, m);
6995 
6996 	return (1);
6997 }
6998 
6999 static struct mcx_cq_entry *
mcx_next_cq_entry(struct mcx_softc * sc,struct mcx_cq * cq)7000 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
7001 {
7002 	struct mcx_cq_entry *cqe;
7003 	int next;
7004 
7005 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
7006 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
7007 
7008 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
7009 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
7010 		return (&cqe[next]);
7011 	}
7012 
7013 	return (NULL);
7014 }
7015 
7016 static void
mcx_arm_cq(struct mcx_softc * sc,struct mcx_cq * cq,int uar)7017 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
7018 {
7019 	struct mcx_cq_doorbell *db;
7020 	bus_size_t offset;
7021 	uint32_t val;
7022 	uint64_t uval;
7023 
7024 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
7025 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
7026 
7027 	db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
7028 
7029 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7030 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
7031 
7032 	be32enc(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
7033 	be32enc(&db->db_arm_ci, val);
7034 
7035 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7036 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
7037 
7038 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
7039 
7040 	uval = (uint64_t)val << 32;
7041 	uval |= cq->cq_n;
7042 
7043 	bus_space_write_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
7044 	mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
7045 }
7046 
7047 void
mcx_process_cq(struct mcx_softc * sc,struct mcx_queues * q,struct mcx_cq * cq)7048 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
7049 {
7050 	struct mcx_rx *rx = &q->q_rx;
7051 	struct mcx_tx *tx = &q->q_tx;
7052 	struct ifnet *ifp = &sc->sc_ec.ec_if;
7053 	const struct mcx_calibration *c;
7054 	unsigned int gen;
7055 	struct mcx_cq_entry *cqe;
7056 	struct mcx_mbufq mq;
7057 	struct mbuf *m;
7058 	int rxfree, txfree;
7059 
7060 	MBUFQ_INIT(&mq);
7061 
7062 	gen = sc->sc_calibration_gen;
7063 	membar_consumer();
7064 	c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
7065 
7066 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7067 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
7068 
7069 	rxfree = 0;
7070 	txfree = 0;
7071 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
7072 		uint8_t opcode;
7073 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
7074 		switch (opcode) {
7075 		case MCX_CQ_ENTRY_OPCODE_REQ:
7076 			txfree += mcx_process_txeof(sc, tx, cqe);
7077 			break;
7078 		case MCX_CQ_ENTRY_OPCODE_SEND:
7079 			rxfree += mcx_process_rx(sc, rx, cqe, &mq, c);
7080 			break;
7081 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
7082 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
7083 			/* uint8_t *cqp = (uint8_t *)cqe; */
7084 			/* printf("%s: cq completion error: %x\n",
7085 			    DEVNAME(sc), cqp[0x37]); */
7086 			break;
7087 
7088 		default:
7089 			/* printf("%s: cq completion opcode %x??\n",
7090 			    DEVNAME(sc), opcode); */
7091 			break;
7092 		}
7093 
7094 		cq->cq_cons++;
7095 	}
7096 
7097 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7098 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
7099 
7100 	if (rxfree > 0) {
7101 		mcx_rxr_put(&rx->rx_rxr, rxfree);
7102 		while (MBUFQ_FIRST(&mq) != NULL) {
7103 			MBUFQ_DEQUEUE(&mq, m);
7104 			if_percpuq_enqueue(ifp->if_percpuq, m);
7105 		}
7106 
7107 		mcx_rx_fill(sc, rx);
7108 		if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
7109 			callout_schedule(&rx->rx_refill, 1);
7110 	}
7111 
7112 	cq->cq_count++;
7113 	mcx_arm_cq(sc, cq, q->q_uar);
7114 
7115 	if (txfree > 0) {
7116 		tx->tx_cons += txfree;
7117 		if_schedule_deferred_start(ifp);
7118 	}
7119 }
7120 
7121 
7122 static void
mcx_arm_eq(struct mcx_softc * sc,struct mcx_eq * eq,int uar)7123 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
7124 {
7125 	bus_size_t offset;
7126 	uint32_t val;
7127 
7128 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
7129 	val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
7130 
7131 	mcx_wr(sc, offset, val);
7132 	mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
7133 }
7134 
7135 static struct mcx_eq_entry *
mcx_next_eq_entry(struct mcx_softc * sc,struct mcx_eq * eq)7136 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
7137 {
7138 	struct mcx_eq_entry *eqe;
7139 	int next;
7140 
7141 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
7142 	next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
7143 	if ((eqe[next].eq_owner & 1) ==
7144 	    ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
7145 		eq->eq_cons++;
7146 		return (&eqe[next]);
7147 	}
7148 	return (NULL);
7149 }
7150 
7151 int
mcx_admin_intr(void * xsc)7152 mcx_admin_intr(void *xsc)
7153 {
7154 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
7155 	struct mcx_eq *eq = &sc->sc_admin_eq;
7156 	struct mcx_eq_entry *eqe;
7157 
7158 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7159 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7160 
7161 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7162 		switch (eqe->eq_event_type) {
7163 		case MCX_EVENT_TYPE_LAST_WQE:
7164 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
7165 			break;
7166 
7167 		case MCX_EVENT_TYPE_CQ_ERROR:
7168 			/* printf("%s: cq error\n", DEVNAME(sc)); */
7169 			break;
7170 
7171 		case MCX_EVENT_TYPE_CMD_COMPLETION:
7172 			/* wakeup probably */
7173 			break;
7174 
7175 		case MCX_EVENT_TYPE_PORT_CHANGE:
7176 			workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
7177 			break;
7178 
7179 		default:
7180 			/* printf("%s: something happened\n", DEVNAME(sc)); */
7181 			break;
7182 		}
7183 	}
7184 
7185 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7186 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7187 
7188 	mcx_arm_eq(sc, eq, sc->sc_uar);
7189 
7190 	return (1);
7191 }
7192 
7193 int
mcx_cq_intr(void * xq)7194 mcx_cq_intr(void *xq)
7195 {
7196 	struct mcx_queues *q = (struct mcx_queues *)xq;
7197 	struct mcx_softc *sc = q->q_sc;
7198 	struct mcx_eq *eq = &q->q_eq;
7199 	struct mcx_eq_entry *eqe;
7200 	int cqn;
7201 
7202 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7203 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7204 
7205 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7206 		switch (eqe->eq_event_type) {
7207 		case MCX_EVENT_TYPE_COMPLETION:
7208 			cqn = be32toh(eqe->eq_event_data[6]);
7209 			if (cqn == q->q_cq.cq_n)
7210 				mcx_process_cq(sc, q, &q->q_cq);
7211 			break;
7212 		}
7213 	}
7214 
7215 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7216 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7217 
7218 	mcx_arm_eq(sc, eq, q->q_uar);
7219 
7220 	return (1);
7221 }
7222 
7223 static void
mcx_free_slots(struct mcx_softc * sc,struct mcx_slot * slots,int allocated,int total)7224 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
7225     int total)
7226 {
7227 	struct mcx_slot *ms;
7228 
7229 	int i = allocated;
7230 	while (i-- > 0) {
7231 		ms = &slots[i];
7232 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
7233 		m_freem(ms->ms_m);
7234 	}
7235 	kmem_free(slots, total * sizeof(*ms));
7236 }
7237 
7238 static int
mcx_queue_up(struct mcx_softc * sc,struct mcx_queues * q)7239 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
7240 {
7241 	struct mcx_rx *rx;
7242 	struct mcx_tx *tx;
7243 	struct mcx_slot *ms;
7244 	int i;
7245 
7246 	rx = &q->q_rx;
7247 	rx->rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
7248 	    KM_SLEEP);
7249 
7250 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
7251 		ms = &rx->rx_slots[i];
7252 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
7253 		    sc->sc_hardmtu, 0,
7254 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
7255 		    &ms->ms_map) != 0) {
7256 			printf("%s: failed to allocate rx dma maps\n",
7257 			    DEVNAME(sc));
7258 			goto destroy_rx_slots;
7259 		}
7260 	}
7261 
7262 	tx = &q->q_tx;
7263 	tx->tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
7264 	     KM_SLEEP);
7265 
7266 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
7267 		ms = &tx->tx_slots[i];
7268 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
7269 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
7270 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
7271 		    &ms->ms_map) != 0) {
7272 			printf("%s: failed to allocate tx dma maps\n",
7273 			    DEVNAME(sc));
7274 			goto destroy_tx_slots;
7275 		}
7276 	}
7277 
7278 	if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
7279 	    q->q_eq.eq_n) != 0)
7280 		goto destroy_tx_slots;
7281 
7282 	if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
7283 	    != 0)
7284 		goto destroy_cq;
7285 
7286 	if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
7287 		goto destroy_sq;
7288 
7289 	return 0;
7290 
7291 destroy_sq:
7292 	mcx_destroy_sq(sc, tx);
7293 destroy_cq:
7294 	mcx_destroy_cq(sc, &q->q_cq);
7295 destroy_tx_slots:
7296 	mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
7297 	tx->tx_slots = NULL;
7298 
7299 	i = (1 << MCX_LOG_RQ_SIZE);
7300 destroy_rx_slots:
7301 	mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
7302 	rx->rx_slots = NULL;
7303 	return ENOMEM;
7304 }
7305 
7306 static int
mcx_rss_group_entry_count(struct mcx_softc * sc,int group)7307 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
7308 {
7309 	int i;
7310 	int count;
7311 
7312 	count = 0;
7313 	for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7314 		if (mcx_rss_config[i].flow_group == group)
7315 			count++;
7316 	}
7317 
7318 	return count;
7319 }
7320 
7321 static int
mcx_init(struct ifnet * ifp)7322 mcx_init(struct ifnet *ifp)
7323 {
7324 	struct mcx_softc *sc = ifp->if_softc;
7325 	struct mcx_rx *rx;
7326 	struct mcx_tx *tx;
7327 	int i, start, count, flow_group, flow_index;
7328 	struct mcx_flow_match match_crit;
7329 	struct mcx_rss_rule *rss;
7330 	uint32_t dest;
7331 	int rqns[MCX_MAX_QUEUES] = { 0 };
7332 
7333 	if (ISSET(ifp->if_flags, IFF_RUNNING))
7334 		mcx_stop(ifp, 0);
7335 
7336 	if (mcx_create_tis(sc, &sc->sc_tis) != 0)
7337 		goto down;
7338 
7339 	for (i = 0; i < sc->sc_nqueues; i++) {
7340 		if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
7341 			goto down;
7342 		}
7343 	}
7344 
7345 	/* RSS flow table and flow groups */
7346 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
7347 	    &sc->sc_rss_flow_table_id) != 0)
7348 		goto down;
7349 
7350 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7351 	    sc->sc_rss_flow_table_id;
7352 
7353 	/* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
7354 	memset(&match_crit, 0, sizeof(match_crit));
7355 	match_crit.mc_ethertype = 0xffff;
7356 	match_crit.mc_ip_proto = 0xff;
7357 	match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
7358 	start = 0;
7359 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
7360 	if (count != 0) {
7361 		if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7362 		    MCX_FLOW_GROUP_RSS_L4, start, count,
7363 		    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7364 			goto down;
7365 		start += count;
7366 	}
7367 
7368 	/* L3 RSS flow group (v4/v6, including fragments) */
7369 	memset(&match_crit, 0, sizeof(match_crit));
7370 	match_crit.mc_ethertype = 0xffff;
7371 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
7372 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7373 	    MCX_FLOW_GROUP_RSS_L3, start, count,
7374 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7375 		goto down;
7376 	start += count;
7377 
7378 	/* non-RSS flow group */
7379 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
7380 	memset(&match_crit, 0, sizeof(match_crit));
7381 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7382 	    MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
7383 		goto down;
7384 
7385 	/* Root flow table, matching packets based on mac address */
7386 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
7387 	    &sc->sc_mac_flow_table_id) != 0)
7388 		goto down;
7389 
7390 	/* promisc flow group */
7391 	start = 0;
7392 	memset(&match_crit, 0, sizeof(match_crit));
7393 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7394 	    MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
7395 		goto down;
7396 	sc->sc_promisc_flow_enabled = 0;
7397 	start++;
7398 
7399 	/* all multicast flow group */
7400 	match_crit.mc_dest_mac[0] = 0x01;
7401 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7402 	    MCX_FLOW_GROUP_ALLMULTI, start, 1,
7403 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7404 		goto down;
7405 	sc->sc_allmulti_flow_enabled = 0;
7406 	start++;
7407 
7408 	/* mac address matching flow group */
7409 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
7410 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7411 	    MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
7412 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7413 		goto down;
7414 
7415 	/* flow table entries for unicast and broadcast */
7416 	start = 0;
7417 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7418 	    LLADDR(satosdl(ifp->if_dl->ifa_addr)), dest) != 0)
7419 		goto down;
7420 	start++;
7421 
7422 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7423 	    etherbroadcastaddr, dest) != 0)
7424 		goto down;
7425 	start++;
7426 
7427 	/* multicast entries go after that */
7428 	sc->sc_mcast_flow_base = start;
7429 
7430 	/* re-add any existing multicast flows */
7431 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7432 		if (sc->sc_mcast_flows[i][0] != 0) {
7433 			mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
7434 			    sc->sc_mcast_flow_base + i,
7435 			    sc->sc_mcast_flows[i], dest);
7436 		}
7437 	}
7438 
7439 	if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
7440 		goto down;
7441 
7442 	/*
7443 	 * the RQT can be any size as long as it's a power of two.
7444 	 * since we also restrict the number of queues to a power of two,
7445 	 * we can just put each rx queue in once.
7446 	 */
7447 	for (i = 0; i < sc->sc_nqueues; i++)
7448 		rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
7449 
7450 	if (mcx_create_rqt(sc, sc->sc_nqueues, rqns, &sc->sc_rqt) != 0)
7451 		goto down;
7452 
7453 	start = 0;
7454 	flow_index = 0;
7455 	flow_group = -1;
7456 	for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7457 		rss = &mcx_rss_config[i];
7458 		if (rss->flow_group != flow_group) {
7459 			flow_group = rss->flow_group;
7460 			flow_index = 0;
7461 		}
7462 
7463 		if (rss->hash_sel == 0) {
7464 			if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
7465 			    &sc->sc_tir[i]) != 0)
7466 				goto down;
7467 		} else {
7468 			if (mcx_create_tir_indirect(sc, sc->sc_rqt,
7469 			    rss->hash_sel, &sc->sc_tir[i]) != 0)
7470 				goto down;
7471 		}
7472 
7473 		if (mcx_set_flow_table_entry_proto(sc, flow_group,
7474 		    flow_index, rss->ethertype, rss->ip_proto,
7475 		    MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
7476 			goto down;
7477 		flow_index++;
7478 	}
7479 
7480 	for (i = 0; i < sc->sc_nqueues; i++) {
7481 		struct mcx_queues *q = &sc->sc_queues[i];
7482 		rx = &q->q_rx;
7483 		tx = &q->q_tx;
7484 
7485 		/* start the queues */
7486 		if (mcx_ready_sq(sc, tx) != 0)
7487 			goto down;
7488 
7489 		if (mcx_ready_rq(sc, rx) != 0)
7490 			goto down;
7491 
7492 		mcx_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
7493 		rx->rx_prod = 0;
7494 		mcx_rx_fill(sc, rx);
7495 
7496 		tx->tx_cons = 0;
7497 		tx->tx_prod = 0;
7498 	}
7499 
7500 	mcx_calibrate_first(sc);
7501 
7502 	SET(ifp->if_flags, IFF_RUNNING);
7503 	CLR(ifp->if_flags, IFF_OACTIVE);
7504 	if_schedule_deferred_start(ifp);
7505 
7506 	return 0;
7507 down:
7508 	mcx_stop(ifp, 0);
7509 	return EIO;
7510 }
7511 
7512 static void
mcx_stop(struct ifnet * ifp,int disable)7513 mcx_stop(struct ifnet *ifp, int disable)
7514 {
7515 	struct mcx_softc *sc = ifp->if_softc;
7516 	struct mcx_rss_rule *rss;
7517 	int group, i, flow_group, flow_index;
7518 
7519 	CLR(ifp->if_flags, IFF_RUNNING);
7520 
7521 	/*
7522 	 * delete flow table entries first, so no packets can arrive
7523 	 * after the barriers
7524 	 */
7525 	if (sc->sc_promisc_flow_enabled)
7526 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
7527 	if (sc->sc_allmulti_flow_enabled)
7528 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
7529 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
7530 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
7531 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7532 		if (sc->sc_mcast_flows[i][0] != 0) {
7533 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
7534 			    sc->sc_mcast_flow_base + i);
7535 		}
7536 	}
7537 
7538 	flow_group = -1;
7539 	flow_index = 0;
7540 	for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7541 		rss = &mcx_rss_config[i];
7542 		if (rss->flow_group != flow_group) {
7543 			flow_group = rss->flow_group;
7544 			flow_index = 0;
7545 		}
7546 
7547 		mcx_delete_flow_table_entry(sc, flow_group, flow_index);
7548 
7549 		mcx_destroy_tir(sc, sc->sc_tir[i]);
7550 		sc->sc_tir[i] = 0;
7551 
7552 		flow_index++;
7553 	}
7554 
7555 	for (i = 0; i < sc->sc_nqueues; i++) {
7556 		callout_halt(&sc->sc_queues[i].q_rx.rx_refill, NULL);
7557 	}
7558 
7559 	callout_halt(&sc->sc_calibrate, NULL);
7560 
7561 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
7562 		if (sc->sc_flow_group[group].g_id != -1)
7563 			mcx_destroy_flow_group(sc, group);
7564 	}
7565 
7566 	if (sc->sc_mac_flow_table_id != -1) {
7567 		mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
7568 		sc->sc_mac_flow_table_id = -1;
7569 	}
7570 	if (sc->sc_rss_flow_table_id != -1) {
7571 		mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
7572 		sc->sc_rss_flow_table_id = -1;
7573 	}
7574 	if (sc->sc_rqt != -1) {
7575 		mcx_destroy_rqt(sc, sc->sc_rqt);
7576 		sc->sc_rqt = -1;
7577 	}
7578 
7579 	for (i = 0; i < sc->sc_nqueues; i++) {
7580 		struct mcx_queues *q = &sc->sc_queues[i];
7581 		struct mcx_rx *rx = &q->q_rx;
7582 		struct mcx_tx *tx = &q->q_tx;
7583 		struct mcx_cq *cq = &q->q_cq;
7584 
7585 		if (rx->rx_rqn != 0)
7586 			mcx_destroy_rq(sc, rx);
7587 
7588 		if (tx->tx_sqn != 0)
7589 			mcx_destroy_sq(sc, tx);
7590 
7591 		if (tx->tx_slots != NULL) {
7592 			mcx_free_slots(sc, tx->tx_slots,
7593 			    (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
7594 			tx->tx_slots = NULL;
7595 		}
7596 		if (rx->rx_slots != NULL) {
7597 			mcx_free_slots(sc, rx->rx_slots,
7598 			    (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
7599 			rx->rx_slots = NULL;
7600 		}
7601 
7602 		if (cq->cq_n != 0)
7603 			mcx_destroy_cq(sc, cq);
7604 	}
7605 	if (sc->sc_tis != 0) {
7606 		mcx_destroy_tis(sc, sc->sc_tis);
7607 		sc->sc_tis = 0;
7608 	}
7609 }
7610 
7611 static int
mcx_ioctl(struct ifnet * ifp,u_long cmd,void * data)7612 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
7613 {
7614 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7615 	struct ifreq *ifr = (struct ifreq *)data;
7616 	struct ethercom *ec = &sc->sc_ec;
7617 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
7618 	struct ether_multi *enm;
7619 	struct ether_multistep step;
7620 	int s, i, flags, error = 0;
7621 	uint32_t dest;
7622 
7623 	s = splnet();
7624 	switch (cmd) {
7625 
7626 	case SIOCADDMULTI:
7627 		if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
7628 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7629 			if (error != 0) {
7630 				splx(s);
7631 				return (error);
7632 			}
7633 
7634 			dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7635 			    sc->sc_rss_flow_table_id;
7636 
7637 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7638 				if (sc->sc_mcast_flows[i][0] == 0) {
7639 					memcpy(sc->sc_mcast_flows[i], addrlo,
7640 					    ETHER_ADDR_LEN);
7641 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7642 						mcx_set_flow_table_entry_mac(sc,
7643 						    MCX_FLOW_GROUP_MAC,
7644 						    sc->sc_mcast_flow_base + i,
7645 						    sc->sc_mcast_flows[i], dest);
7646 					}
7647 					break;
7648 				}
7649 			}
7650 
7651 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
7652 				if (i == MCX_NUM_MCAST_FLOWS) {
7653 					SET(ifp->if_flags, IFF_ALLMULTI);
7654 					sc->sc_extra_mcast++;
7655 					error = ENETRESET;
7656 				}
7657 
7658 				if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
7659 					SET(ifp->if_flags, IFF_ALLMULTI);
7660 					error = ENETRESET;
7661 				}
7662 			}
7663 		}
7664 		break;
7665 
7666 	case SIOCDELMULTI:
7667 		if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
7668 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7669 			if (error != 0) {
7670 				splx(s);
7671 				return (error);
7672 			}
7673 
7674 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7675 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
7676 				    ETHER_ADDR_LEN) == 0) {
7677 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7678 						mcx_delete_flow_table_entry(sc,
7679 						    MCX_FLOW_GROUP_MAC,
7680 						    sc->sc_mcast_flow_base + i);
7681 					}
7682 					sc->sc_mcast_flows[i][0] = 0;
7683 					break;
7684 				}
7685 			}
7686 
7687 			if (i == MCX_NUM_MCAST_FLOWS)
7688 				sc->sc_extra_mcast--;
7689 
7690 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
7691 			    sc->sc_extra_mcast == 0) {
7692 				flags = 0;
7693 				ETHER_LOCK(ec);
7694 				ETHER_FIRST_MULTI(step, ec, enm);
7695 				while (enm != NULL) {
7696 					if (memcmp(enm->enm_addrlo,
7697 					    enm->enm_addrhi, ETHER_ADDR_LEN)) {
7698 						SET(flags, IFF_ALLMULTI);
7699 						break;
7700 					}
7701 					ETHER_NEXT_MULTI(step, enm);
7702 				}
7703 				ETHER_UNLOCK(ec);
7704 				if (!ISSET(flags, IFF_ALLMULTI)) {
7705 					CLR(ifp->if_flags, IFF_ALLMULTI);
7706 					error = ENETRESET;
7707 				}
7708 			}
7709 		}
7710 		break;
7711 
7712 	default:
7713 		error = ether_ioctl(ifp, cmd, data);
7714 	}
7715 
7716 	if (error == ENETRESET) {
7717 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7718 		    (IFF_UP | IFF_RUNNING))
7719 			mcx_iff(sc);
7720 		error = 0;
7721 	}
7722 	splx(s);
7723 
7724 	return (error);
7725 }
7726 
7727 #if 0
7728 static int
7729 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
7730 {
7731 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7732 	struct mcx_reg_mcia mcia;
7733 	struct mcx_reg_pmlp pmlp;
7734 	int offset, error;
7735 
7736 	/* get module number */
7737 	memset(&pmlp, 0, sizeof(pmlp));
7738 	pmlp.rp_local_port = 1;
7739 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
7740 	    sizeof(pmlp));
7741 	if (error != 0) {
7742 		printf("%s: unable to get eeprom module number\n",
7743 		    DEVNAME(sc));
7744 		return error;
7745 	}
7746 
7747 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
7748 		memset(&mcia, 0, sizeof(mcia));
7749 		mcia.rm_l = 0;
7750 		mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
7751 		    MCX_PMLP_MODULE_NUM_MASK;
7752 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
7753 		mcia.rm_page_num = sff->sff_page;
7754 		mcia.rm_dev_addr = htobe16(offset);
7755 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
7756 
7757 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
7758 		    &mcia, sizeof(mcia));
7759 		if (error != 0) {
7760 			printf("%s: unable to read eeprom at %x\n",
7761 			    DEVNAME(sc), offset);
7762 			return error;
7763 		}
7764 
7765 		memcpy(sff->sff_data + offset, mcia.rm_data,
7766 		    MCX_MCIA_EEPROM_BYTES);
7767 	}
7768 
7769 	return 0;
7770 }
7771 #endif
7772 
7773 static int
mcx_load_mbuf(struct mcx_softc * sc,struct mcx_slot * ms,struct mbuf * m)7774 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
7775 {
7776 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7777 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
7778 	case 0:
7779 		break;
7780 
7781 	case EFBIG:
7782 		if (m_defrag(m, M_DONTWAIT) != NULL &&
7783 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7784 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
7785 			break;
7786 
7787 		/* FALLTHROUGH */
7788 	default:
7789 		return (1);
7790 	}
7791 
7792 	ms->ms_m = m;
7793 	return (0);
7794 }
7795 
7796 static void
mcx_send_common_locked(struct ifnet * ifp,struct mcx_tx * tx,bool is_transmit)7797 mcx_send_common_locked(struct ifnet *ifp, struct mcx_tx *tx, bool is_transmit)
7798 {
7799 	struct mcx_softc *sc = ifp->if_softc;
7800 	struct mcx_sq_entry *sq, *sqe;
7801 	struct mcx_sq_entry_seg *sqs;
7802 	struct mcx_slot *ms;
7803 	bus_dmamap_t map;
7804 	struct mbuf *m;
7805 	u_int idx, free, used;
7806 	uint64_t *bf;
7807 	uint32_t csum;
7808 	size_t bf_base;
7809 	int i, seg, nseg;
7810 
7811 	KASSERT(mutex_owned(&tx->tx_lock));
7812 
7813 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7814 		return;
7815 
7816 	bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
7817 
7818 	idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
7819 	free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
7820 
7821 	used = 0;
7822 	bf = NULL;
7823 
7824 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7825 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
7826 
7827 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
7828 
7829 	for (;;) {
7830 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
7831 			SET(ifp->if_flags, IFF_OACTIVE);
7832 			break;
7833 		}
7834 
7835 		if (is_transmit) {
7836 			m = pcq_get(tx->tx_pcq);
7837 		} else {
7838 			IFQ_DEQUEUE(&ifp->if_snd, m);
7839 		}
7840 		if (m == NULL) {
7841 			break;
7842 		}
7843 
7844 		sqe = sq + idx;
7845 		ms = &tx->tx_slots[idx];
7846 		memset(sqe, 0, sizeof(*sqe));
7847 
7848 		/* ctrl segment */
7849 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
7850 		    ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
7851 		/* always generate a completion event */
7852 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
7853 
7854 		/* eth segment */
7855 		csum = 0;
7856 		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
7857 			csum |= MCX_SQE_L3_CSUM;
7858 		if (m->m_pkthdr.csum_flags &
7859 		    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6))
7860 			csum |= MCX_SQE_L4_CSUM;
7861 		sqe->sqe_mss_csum = htobe32(csum);
7862 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
7863 		if (vlan_has_tag(m)) {
7864 			struct ether_vlan_header *evh;
7865 			evh = (struct ether_vlan_header *)
7866 			    &sqe->sqe_inline_headers;
7867 
7868 			m_copydata(m, 0, ETHER_HDR_LEN, evh);
7869 			evh->evl_proto = evh->evl_encap_proto;
7870 			evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
7871 			evh->evl_tag = htons(vlan_get_tag(m));
7872 			m_adj(m, ETHER_HDR_LEN);
7873 		} else {
7874 			m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
7875 			    sqe->sqe_inline_headers);
7876 			m_adj(m, MCX_SQ_INLINE_SIZE);
7877 		}
7878 
7879 		if (mcx_load_mbuf(sc, ms, m) != 0) {
7880 			m_freem(m);
7881 			if_statinc(ifp, if_oerrors);
7882 			continue;
7883 		}
7884 		bf = (uint64_t *)sqe;
7885 
7886 		if (ifp->if_bpf != NULL)
7887 			bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
7888 			    MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
7889 
7890 		map = ms->ms_map;
7891 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
7892 		    BUS_DMASYNC_PREWRITE);
7893 
7894 		sqe->sqe_ds_sq_num =
7895 		    htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
7896 		    (map->dm_nsegs + 3));
7897 
7898 		/* data segment - first wqe has one segment */
7899 		sqs = sqe->sqe_segs;
7900 		seg = 0;
7901 		nseg = 1;
7902 		for (i = 0; i < map->dm_nsegs; i++) {
7903 			if (seg == nseg) {
7904 				/* next slot */
7905 				idx++;
7906 				if (idx == (1 << MCX_LOG_SQ_SIZE))
7907 					idx = 0;
7908 				tx->tx_prod++;
7909 				used++;
7910 
7911 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
7912 				seg = 0;
7913 				nseg = MCX_SQ_SEGS_PER_SLOT;
7914 			}
7915 			sqs[seg].sqs_byte_count =
7916 			    htobe32(map->dm_segs[i].ds_len);
7917 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
7918 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
7919 			seg++;
7920 		}
7921 
7922 		idx++;
7923 		if (idx == (1 << MCX_LOG_SQ_SIZE))
7924 			idx = 0;
7925 		tx->tx_prod++;
7926 		used++;
7927 	}
7928 
7929 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7930 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
7931 
7932 	if (used) {
7933 		bus_size_t blueflame;
7934 
7935 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7936 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
7937 		be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
7938 		    tx->tx_prod & MCX_WQ_DOORBELL_MASK);
7939 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7940 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
7941 
7942 		/*
7943 		 * write the first 64 bits of the last sqe we produced
7944 		 * to the blue flame buffer
7945 		 */
7946 
7947 		blueflame = bf_base + tx->tx_bf_offset;
7948 		bus_space_write_8(sc->sc_memt, sc->sc_memh,
7949 		    blueflame, *bf);
7950 		mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
7951 
7952 		/* next write goes to the other buffer */
7953 		tx->tx_bf_offset ^= sc->sc_bf_size;
7954 	}
7955 }
7956 
7957 static void
mcx_start(struct ifnet * ifp)7958 mcx_start(struct ifnet *ifp)
7959 {
7960 	struct mcx_softc *sc = ifp->if_softc;
7961 	/* mcx_start() always uses TX ring[0] */
7962 	struct mcx_tx *tx = &sc->sc_queues[0].q_tx;
7963 
7964 	mutex_enter(&tx->tx_lock);
7965 	if (!ISSET(ifp->if_flags, IFF_OACTIVE)) {
7966 		mcx_send_common_locked(ifp, tx, false);
7967 	}
7968 	mutex_exit(&tx->tx_lock);
7969 }
7970 
7971 static int
mcx_transmit(struct ifnet * ifp,struct mbuf * m)7972 mcx_transmit(struct ifnet *ifp, struct mbuf *m)
7973 {
7974 	struct mcx_softc *sc = ifp->if_softc;
7975 	struct mcx_tx *tx;
7976 
7977 	tx = &sc->sc_queues[cpu_index(curcpu()) % sc->sc_nqueues].q_tx;
7978 	if (__predict_false(!pcq_put(tx->tx_pcq, m))) {
7979 		m_freem(m);
7980 		return ENOBUFS;
7981 	}
7982 
7983 	if (mutex_tryenter(&tx->tx_lock)) {
7984 		mcx_send_common_locked(ifp, tx, true);
7985 		mutex_exit(&tx->tx_lock);
7986 	} else {
7987 		softint_schedule(tx->tx_softint);
7988 	}
7989 
7990 	return 0;
7991 }
7992 
7993 static void
mcx_deferred_transmit(void * arg)7994 mcx_deferred_transmit(void *arg)
7995 {
7996 	struct mcx_tx *tx = arg;
7997 	struct mcx_softc *sc = tx->tx_softc;
7998 	struct ifnet *ifp = &sc->sc_ec.ec_if;
7999 
8000 	mutex_enter(&tx->tx_lock);
8001 	if (pcq_peek(tx->tx_pcq) != NULL) {
8002 		mcx_send_common_locked(ifp, tx, true);
8003 	}
8004 	mutex_exit(&tx->tx_lock);
8005 }
8006 
8007 
8008 static void
mcx_media_add_types(struct mcx_softc * sc)8009 mcx_media_add_types(struct mcx_softc *sc)
8010 {
8011 	struct mcx_reg_ptys ptys;
8012 	int i;
8013 	uint32_t proto_cap;
8014 
8015 	memset(&ptys, 0, sizeof(ptys));
8016 	ptys.rp_local_port = 1;
8017 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8018 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8019 	    sizeof(ptys)) != 0) {
8020 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
8021 		return;
8022 	}
8023 
8024 	proto_cap = be32toh(ptys.rp_eth_proto_cap);
8025 	for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8026 		const struct mcx_eth_proto_capability *cap;
8027 		if (!ISSET(proto_cap, 1U << i))
8028 			continue;
8029 
8030 		cap = &mcx_eth_cap_map[i];
8031 		if (cap->cap_media == 0)
8032 			continue;
8033 
8034 		ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
8035 	}
8036 }
8037 
8038 static void
mcx_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)8039 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
8040 {
8041 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8042 	struct mcx_reg_ptys ptys;
8043 	int i;
8044 	uint32_t proto_oper;
8045 	uint64_t media_oper;
8046 
8047 	memset(&ptys, 0, sizeof(ptys));
8048 	ptys.rp_local_port = 1;
8049 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8050 
8051 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8052 	    sizeof(ptys)) != 0) {
8053 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
8054 		return;
8055 	}
8056 
8057 	proto_oper = be32toh(ptys.rp_eth_proto_oper);
8058 
8059 	media_oper = 0;
8060 
8061 	for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8062 		const struct mcx_eth_proto_capability *cap;
8063 		if (!ISSET(proto_oper, 1U << i))
8064 			continue;
8065 
8066 		cap = &mcx_eth_cap_map[i];
8067 
8068 		if (cap->cap_media != 0)
8069 			media_oper = cap->cap_media;
8070 	}
8071 
8072 	ifmr->ifm_status = IFM_AVALID;
8073 	if (proto_oper != 0) {
8074 		ifmr->ifm_status |= IFM_ACTIVE;
8075 		ifmr->ifm_active = IFM_ETHER | IFM_FDX | IFM_AUTO | media_oper;
8076 		/* txpause, rxpause, duplex? */
8077 	}
8078 }
8079 
8080 static int
mcx_media_change(struct ifnet * ifp)8081 mcx_media_change(struct ifnet *ifp)
8082 {
8083 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8084 	struct mcx_reg_ptys ptys;
8085 	struct mcx_reg_paos paos;
8086 	uint32_t media;
8087 	int i, error;
8088 
8089 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
8090 		return EINVAL;
8091 
8092 	error = 0;
8093 
8094 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
8095 		/* read ptys to get supported media */
8096 		memset(&ptys, 0, sizeof(ptys));
8097 		ptys.rp_local_port = 1;
8098 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8099 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
8100 		    &ptys, sizeof(ptys)) != 0) {
8101 			printf("%s: unable to read port type/speed\n",
8102 			    DEVNAME(sc));
8103 			return EIO;
8104 		}
8105 
8106 		media = be32toh(ptys.rp_eth_proto_cap);
8107 	} else {
8108 		/* map media type */
8109 		media = 0;
8110 		for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8111 			const struct mcx_eth_proto_capability *cap;
8112 
8113 			cap = &mcx_eth_cap_map[i];
8114 			if (cap->cap_media ==
8115 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
8116 				media = (1 << i);
8117 				break;
8118 			}
8119 		}
8120 	}
8121 
8122 	/* disable the port */
8123 	memset(&paos, 0, sizeof(paos));
8124 	paos.rp_local_port = 1;
8125 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
8126 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8127 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8128 	    sizeof(paos)) != 0) {
8129 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
8130 		return EIO;
8131 	}
8132 
8133 	memset(&ptys, 0, sizeof(ptys));
8134 	ptys.rp_local_port = 1;
8135 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8136 	ptys.rp_eth_proto_admin = htobe32(media);
8137 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
8138 	    sizeof(ptys)) != 0) {
8139 		printf("%s: unable to set port media type/speed\n",
8140 		    DEVNAME(sc));
8141 		error = EIO;
8142 	}
8143 
8144 	/* re-enable the port to start negotiation */
8145 	memset(&paos, 0, sizeof(paos));
8146 	paos.rp_local_port = 1;
8147 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
8148 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8149 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8150 	    sizeof(paos)) != 0) {
8151 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
8152 		error = EIO;
8153 	}
8154 
8155 	return error;
8156 }
8157 
8158 static void
mcx_port_change(struct work * wk,void * xsc)8159 mcx_port_change(struct work *wk, void *xsc)
8160 {
8161 	struct mcx_softc *sc = xsc;
8162 	struct ifnet *ifp = &sc->sc_ec.ec_if;
8163 	struct mcx_reg_ptys ptys = {
8164 		.rp_local_port = 1,
8165 		.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
8166 	};
8167 	int link_state = LINK_STATE_DOWN;
8168 
8169 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8170 	    sizeof(ptys)) == 0) {
8171 		uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper);
8172 		uint64_t baudrate = 0;
8173 		unsigned int i;
8174 
8175 		if (proto_oper != 0)
8176 			link_state = LINK_STATE_UP;
8177 
8178 		for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8179 			const struct mcx_eth_proto_capability *cap;
8180 			if (!ISSET(proto_oper, 1U << i))
8181 				continue;
8182 
8183 			cap = &mcx_eth_cap_map[i];
8184 			if (cap->cap_baudrate == 0)
8185 				continue;
8186 
8187 			baudrate = cap->cap_baudrate;
8188 			break;
8189 		}
8190 
8191 		ifp->if_baudrate = baudrate;
8192 	}
8193 
8194 	if (link_state != ifp->if_link_state) {
8195 		if_link_state_change(ifp, link_state);
8196 	}
8197 }
8198 
8199 
8200 static inline uint32_t
mcx_rd(struct mcx_softc * sc,bus_size_t r)8201 mcx_rd(struct mcx_softc *sc, bus_size_t r)
8202 {
8203 	uint32_t word;
8204 
8205 	word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
8206 
8207 	return (be32toh(word));
8208 }
8209 
8210 static inline void
mcx_wr(struct mcx_softc * sc,bus_size_t r,uint32_t v)8211 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
8212 {
8213 	bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
8214 }
8215 
8216 static inline void
mcx_bar(struct mcx_softc * sc,bus_size_t r,bus_size_t l,int f)8217 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
8218 {
8219 #ifndef __NetBSD__
8220 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
8221 #endif
8222 }
8223 
8224 static uint64_t
mcx_timer(struct mcx_softc * sc)8225 mcx_timer(struct mcx_softc *sc)
8226 {
8227 	uint32_t hi, lo, ni;
8228 
8229 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8230 	for (;;) {
8231 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
8232 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
8233 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8234 
8235 		if (ni == hi)
8236 			break;
8237 
8238 		hi = ni;
8239 	}
8240 
8241 	return (((uint64_t)hi << 32) | (uint64_t)lo);
8242 }
8243 
8244 static int
mcx_dmamem_alloc(struct mcx_softc * sc,struct mcx_dmamem * mxm,bus_size_t size,u_int align)8245 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
8246     bus_size_t size, u_int align)
8247 {
8248 	mxm->mxm_size = size;
8249 
8250 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
8251 	    mxm->mxm_size, 0,
8252 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
8253 	    &mxm->mxm_map) != 0)
8254 		return (1);
8255 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
8256 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
8257 	    BUS_DMA_WAITOK) != 0)
8258 		goto destroy;
8259 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
8260 	    mxm->mxm_size, &mxm->mxm_kva,
8261 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
8262 		goto free;
8263 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
8264 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
8265 		goto unmap;
8266 
8267 	mcx_dmamem_zero(mxm);
8268 
8269 	return (0);
8270 unmap:
8271 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8272 free:
8273 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8274 destroy:
8275 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8276 	return (1);
8277 }
8278 
8279 static void
mcx_dmamem_zero(struct mcx_dmamem * mxm)8280 mcx_dmamem_zero(struct mcx_dmamem *mxm)
8281 {
8282 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
8283 }
8284 
8285 static void
mcx_dmamem_free(struct mcx_softc * sc,struct mcx_dmamem * mxm)8286 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
8287 {
8288 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
8289 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8290 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8291 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8292 }
8293 
8294 static int
mcx_hwmem_alloc(struct mcx_softc * sc,struct mcx_hwmem * mhm,unsigned int pages)8295 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
8296 {
8297 	bus_dma_segment_t *segs;
8298 	bus_size_t len = pages * MCX_PAGE_SIZE;
8299 	size_t seglen;
8300 
8301 	segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
8302 	seglen = sizeof(*segs) * pages;
8303 
8304 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
8305 	    segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
8306 		goto free_segs;
8307 
8308 	if (mhm->mhm_seg_count < pages) {
8309 		size_t nseglen;
8310 
8311 		mhm->mhm_segs = kmem_alloc(
8312 		    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
8313 
8314 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
8315 
8316 		memcpy(mhm->mhm_segs, segs, nseglen);
8317 
8318 		kmem_free(segs, seglen);
8319 
8320 		segs = mhm->mhm_segs;
8321 		seglen = nseglen;
8322 	} else
8323 		mhm->mhm_segs = segs;
8324 
8325 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
8326 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
8327 	    &mhm->mhm_map) != 0)
8328 		goto free_dmamem;
8329 
8330 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
8331 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
8332 		goto destroy;
8333 
8334 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8335 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
8336 
8337 	mhm->mhm_npages = pages;
8338 
8339 	return (0);
8340 
8341 destroy:
8342 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8343 free_dmamem:
8344 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8345 free_segs:
8346 	kmem_free(segs, seglen);
8347 	mhm->mhm_segs = NULL;
8348 
8349 	return (-1);
8350 }
8351 
8352 static void
mcx_hwmem_free(struct mcx_softc * sc,struct mcx_hwmem * mhm)8353 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
8354 {
8355 	if (mhm->mhm_npages == 0)
8356 		return;
8357 
8358 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8359 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
8360 
8361 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
8362 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8363 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8364 	kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
8365 
8366 	mhm->mhm_npages = 0;
8367 }
8368 
8369 #if NKSTAT > 0
8370 struct mcx_ppcnt {
8371 	char			 name[KSTAT_KV_NAMELEN];
8372 	enum kstat_kv_unit	 unit;
8373 };
8374 
8375 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
8376 	{ "Good Tx",		KSTAT_KV_U_PACKETS, },
8377 	{ "Good Rx",		KSTAT_KV_U_PACKETS, },
8378 	{ "FCS errs",		KSTAT_KV_U_PACKETS, },
8379 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8380 	{ "Good Tx",		KSTAT_KV_U_BYTES, },
8381 	{ "Good Rx",		KSTAT_KV_U_BYTES, },
8382 	{ "Multicast Tx",	KSTAT_KV_U_PACKETS, },
8383 	{ "Broadcast Tx",	KSTAT_KV_U_PACKETS, },
8384 	{ "Multicast Rx",	KSTAT_KV_U_PACKETS, },
8385 	{ "Broadcast Rx",	KSTAT_KV_U_PACKETS, },
8386 	{ "In Range Len",	KSTAT_KV_U_PACKETS, },
8387 	{ "Out Of Range Len",	KSTAT_KV_U_PACKETS, },
8388 	{ "Frame Too Long",	KSTAT_KV_U_PACKETS, },
8389 	{ "Symbol Errs",	KSTAT_KV_U_PACKETS, },
8390 	{ "MAC Ctrl Tx",	KSTAT_KV_U_PACKETS, },
8391 	{ "MAC Ctrl Rx",	KSTAT_KV_U_PACKETS, },
8392 	{ "MAC Ctrl Unsup",	KSTAT_KV_U_PACKETS, },
8393 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8394 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8395 };
8396 CTASSERT(__arraycount(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
8397 
8398 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
8399 	{ "Rx Bytes",		KSTAT_KV_U_BYTES, },
8400 	{ "Rx Unicast",		KSTAT_KV_U_PACKETS, },
8401 	{ "Rx Discards",	KSTAT_KV_U_PACKETS, },
8402 	{ "Rx Errors",		KSTAT_KV_U_PACKETS, },
8403 	{ "Rx Unknown Proto",	KSTAT_KV_U_PACKETS, },
8404 	{ "Tx Bytes",		KSTAT_KV_U_BYTES, },
8405 	{ "Tx Unicast",		KSTAT_KV_U_PACKETS, },
8406 	{ "Tx Discards",	KSTAT_KV_U_PACKETS, },
8407 	{ "Tx Errors",		KSTAT_KV_U_PACKETS, },
8408 	{ "Rx Multicast",	KSTAT_KV_U_PACKETS, },
8409 	{ "Rx Broadcast",	KSTAT_KV_U_PACKETS, },
8410 	{ "Tx Multicast",	KSTAT_KV_U_PACKETS, },
8411 	{ "Tx Broadcast",	KSTAT_KV_U_PACKETS, },
8412 };
8413 CTASSERT(__arraycount(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
8414 
8415 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
8416 	{ "Drop Events",	KSTAT_KV_U_PACKETS, },
8417 	{ "Octets",		KSTAT_KV_U_BYTES, },
8418 	{ "Packets",		KSTAT_KV_U_PACKETS, },
8419 	{ "Broadcasts",		KSTAT_KV_U_PACKETS, },
8420 	{ "Multicasts",		KSTAT_KV_U_PACKETS, },
8421 	{ "CRC Align Errs",	KSTAT_KV_U_PACKETS, },
8422 	{ "Undersize",		KSTAT_KV_U_PACKETS, },
8423 	{ "Oversize",		KSTAT_KV_U_PACKETS, },
8424 	{ "Fragments",		KSTAT_KV_U_PACKETS, },
8425 	{ "Jabbers",		KSTAT_KV_U_PACKETS, },
8426 	{ "Collisions",		KSTAT_KV_U_NONE, },
8427 	{ "64B",		KSTAT_KV_U_PACKETS, },
8428 	{ "65-127B",		KSTAT_KV_U_PACKETS, },
8429 	{ "128-255B",		KSTAT_KV_U_PACKETS, },
8430 	{ "256-511B",		KSTAT_KV_U_PACKETS, },
8431 	{ "512-1023B",		KSTAT_KV_U_PACKETS, },
8432 	{ "1024-1518B",		KSTAT_KV_U_PACKETS, },
8433 	{ "1519-2047B",		KSTAT_KV_U_PACKETS, },
8434 	{ "2048-4095B",		KSTAT_KV_U_PACKETS, },
8435 	{ "4096-8191B",		KSTAT_KV_U_PACKETS, },
8436 	{ "8192-10239B",	KSTAT_KV_U_PACKETS, },
8437 };
8438 CTASSERT(__arraycount(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
8439 
8440 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
8441 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8442 	{ "FCS Errs",		KSTAT_KV_U_PACKETS, },
8443 	{ "Single Colls",	KSTAT_KV_U_PACKETS, },
8444 	{ "Multiple Colls",	KSTAT_KV_U_PACKETS, },
8445 	{ "SQE Test Errs",	KSTAT_KV_U_NONE, },
8446 	{ "Deferred Tx",	KSTAT_KV_U_PACKETS, },
8447 	{ "Late Colls",		KSTAT_KV_U_NONE, },
8448 	{ "Exess Colls",	KSTAT_KV_U_NONE, },
8449 	{ "Int MAC Tx Errs",	KSTAT_KV_U_PACKETS, },
8450 	{ "CSM Sense Errs",	KSTAT_KV_U_NONE, },
8451 	{ "Too Long",		KSTAT_KV_U_PACKETS, },
8452 	{ "Int MAC Rx Errs",	KSTAT_KV_U_PACKETS, },
8453 	{ "Symbol Errs",	KSTAT_KV_U_NONE, },
8454 	{ "Unknown Control",	KSTAT_KV_U_PACKETS, },
8455 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8456 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8457 };
8458 CTASSERT(__arraycount(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
8459 
8460 struct mcx_kstat_ppcnt {
8461 	const char		*ksp_name;
8462 	const struct mcx_ppcnt	*ksp_tpl;
8463 	unsigned int		 ksp_n;
8464 	uint8_t			 ksp_grp;
8465 };
8466 
8467 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
8468 	.ksp_name =		"ieee802.3",
8469 	.ksp_tpl =		mcx_ppcnt_ieee8023_tpl,
8470 	.ksp_n =		__arraycount(mcx_ppcnt_ieee8023_tpl),
8471 	.ksp_grp =		MCX_REG_PPCNT_GRP_IEEE8023,
8472 };
8473 
8474 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
8475 	.ksp_name =		"rfc2863",
8476 	.ksp_tpl =		mcx_ppcnt_rfc2863_tpl,
8477 	.ksp_n =		__arraycount(mcx_ppcnt_rfc2863_tpl),
8478 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2863,
8479 };
8480 
8481 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
8482 	.ksp_name =		"rfc2819",
8483 	.ksp_tpl =		mcx_ppcnt_rfc2819_tpl,
8484 	.ksp_n =		__arraycount(mcx_ppcnt_rfc2819_tpl),
8485 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2819,
8486 };
8487 
8488 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
8489 	.ksp_name =		"rfc3635",
8490 	.ksp_tpl =		mcx_ppcnt_rfc3635_tpl,
8491 	.ksp_n =		__arraycount(mcx_ppcnt_rfc3635_tpl),
8492 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC3635,
8493 };
8494 
8495 static int	mcx_kstat_ppcnt_read(struct kstat *);
8496 
8497 static void	mcx_kstat_attach_tmps(struct mcx_softc *sc);
8498 static void	mcx_kstat_attach_queues(struct mcx_softc *sc);
8499 
8500 static struct kstat *
mcx_kstat_attach_ppcnt(struct mcx_softc * sc,const struct mcx_kstat_ppcnt * ksp)8501 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
8502     const struct mcx_kstat_ppcnt *ksp)
8503 {
8504 	struct kstat *ks;
8505 	struct kstat_kv *kvs;
8506 	unsigned int i;
8507 
8508 	ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
8509 	if (ks == NULL)
8510 		return (NULL);
8511 
8512 	kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
8513 	    M_DEVBUF, M_WAITOK);
8514 
8515 	for (i = 0; i < ksp->ksp_n; i++) {
8516 		const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
8517 
8518 		kstat_kv_unit_init(&kvs[i], tpl->name,
8519 		    KSTAT_KV_T_COUNTER64, tpl->unit);
8520 	}
8521 
8522 	ks->ks_softc = sc;
8523 	ks->ks_ptr = (void *)ksp;
8524 	ks->ks_data = kvs;
8525 	ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
8526 	ks->ks_read = mcx_kstat_ppcnt_read;
8527 
8528 	kstat_install(ks);
8529 
8530 	return (ks);
8531 }
8532 
8533 static void
mcx_kstat_attach(struct mcx_softc * sc)8534 mcx_kstat_attach(struct mcx_softc *sc)
8535 {
8536 	sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
8537 	    &mcx_kstat_ppcnt_ieee8023);
8538 	sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
8539 	    &mcx_kstat_ppcnt_rfc2863);
8540 	sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
8541 	    &mcx_kstat_ppcnt_rfc2819);
8542 	sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
8543 	    &mcx_kstat_ppcnt_rfc3635);
8544 
8545 	mcx_kstat_attach_tmps(sc);
8546 	mcx_kstat_attach_queues(sc);
8547 }
8548 
8549 static int
mcx_kstat_ppcnt_read(struct kstat * ks)8550 mcx_kstat_ppcnt_read(struct kstat *ks)
8551 {
8552 	struct mcx_softc *sc = ks->ks_softc;
8553 	struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
8554 	struct mcx_reg_ppcnt ppcnt = {
8555 		.ppcnt_grp = ksp->ksp_grp,
8556 		.ppcnt_local_port = 1,
8557 	};
8558 	struct kstat_kv *kvs = ks->ks_data;
8559 	uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
8560 	unsigned int i;
8561 	int rv;
8562 
8563 	KERNEL_LOCK(); /* XXX */
8564 	rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
8565 	    &ppcnt, sizeof(ppcnt));
8566 	KERNEL_UNLOCK();
8567 	if (rv != 0)
8568 		return (EIO);
8569 
8570 	nanouptime(&ks->ks_updated);
8571 
8572 	for (i = 0; i < ksp->ksp_n; i++)
8573 		kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
8574 
8575 	return (0);
8576 }
8577 
8578 struct mcx_kstat_mtmp {
8579 	struct kstat_kv		ktmp_name;
8580 	struct kstat_kv		ktmp_temperature;
8581 	struct kstat_kv		ktmp_threshold_lo;
8582 	struct kstat_kv		ktmp_threshold_hi;
8583 };
8584 
8585 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
8586 	KSTAT_KV_INITIALIZER("name",		KSTAT_KV_T_ISTR),
8587 	KSTAT_KV_INITIALIZER("temperature",	KSTAT_KV_T_TEMP),
8588 	KSTAT_KV_INITIALIZER("lo threshold",	KSTAT_KV_T_TEMP),
8589 	KSTAT_KV_INITIALIZER("hi threshold",	KSTAT_KV_T_TEMP),
8590 };
8591 
8592 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
8593 
8594 static int mcx_kstat_mtmp_read(struct kstat *);
8595 
8596 static void
mcx_kstat_attach_tmps(struct mcx_softc * sc)8597 mcx_kstat_attach_tmps(struct mcx_softc *sc)
8598 {
8599 	struct kstat *ks;
8600 	struct mcx_reg_mcam mcam;
8601 	struct mcx_reg_mtcap mtcap;
8602 	struct mcx_kstat_mtmp *ktmp;
8603 	uint64_t map;
8604 	unsigned int i, n;
8605 
8606 	memset(&mtcap, 0, sizeof(mtcap));
8607 	memset(&mcam, 0, sizeof(mcam));
8608 
8609 	if (sc->sc_mcam_reg == 0) {
8610 		/* no management capabilities */
8611 		return;
8612 	}
8613 
8614 	if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
8615 	    &mcam, sizeof(mcam)) != 0) {
8616 		/* unable to check management capabilities? */
8617 		return;
8618 	}
8619 
8620 	if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
8621 	    MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
8622 		/* no sensor map */
8623 		return;
8624 	}
8625 
8626 	if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
8627 	    &mtcap, sizeof(mtcap)) != 0) {
8628 		/* unable to find temperature sensors */
8629 		return;
8630 	}
8631 
8632 	sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
8633 	sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
8634 	    sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
8635 
8636 	n = 0;
8637 	map = bemtoh64(&mtcap.mtcap_sensor_map);
8638 	for (i = 0; i < sizeof(map) * NBBY; i++) {
8639 		if (!ISSET(map, (1ULL << i)))
8640 			continue;
8641 
8642 		ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
8643 		    KSTAT_T_KV, 0);
8644 		if (ks == NULL) {
8645 			/* unable to attach temperature sensor %u, i */
8646 			continue;
8647 		}
8648 
8649 		ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
8650 		*ktmp = mcx_kstat_mtmp_tpl;
8651 
8652 		ks->ks_data = ktmp;
8653 		ks->ks_datalen = sizeof(*ktmp);
8654 		TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
8655 		ks->ks_read = mcx_kstat_mtmp_read;
8656 
8657 		ks->ks_softc = sc;
8658 		kstat_install(ks);
8659 
8660 		sc->sc_kstat_mtmp[n++] = ks;
8661 		if (n >= sc->sc_kstat_mtmp_count)
8662 			break;
8663 	}
8664 }
8665 
8666 static uint64_t
mcx_tmp_to_uK(uint16_t * t)8667 mcx_tmp_to_uK(uint16_t *t)
8668 {
8669 	int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
8670 	mt *= 1000000 / 8; /* convert to uC */
8671 	mt += 273150000; /* convert to uK */
8672 
8673 	return (mt);
8674 }
8675 
8676 static int
mcx_kstat_mtmp_read(struct kstat * ks)8677 mcx_kstat_mtmp_read(struct kstat *ks)
8678 {
8679 	struct mcx_softc *sc = ks->ks_softc;
8680 	struct mcx_kstat_mtmp *ktmp = ks->ks_data;
8681 	struct mcx_reg_mtmp mtmp;
8682 	int rv;
8683 	struct timeval updated;
8684 
8685 	TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
8686 
8687 	if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
8688 		return (0);
8689 
8690 	memset(&mtmp, 0, sizeof(mtmp));
8691 	htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
8692 
8693 	KERNEL_LOCK(); /* XXX */
8694 	rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
8695 	    &mtmp, sizeof(mtmp));
8696 	KERNEL_UNLOCK();
8697 	if (rv != 0)
8698 		return (EIO);
8699 
8700 	memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
8701 	    sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
8702 	memcpy(kstat_kv_istr(&ktmp->ktmp_name),
8703 	    mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
8704 	kstat_kv_temp(&ktmp->ktmp_temperature) =
8705 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature);
8706 	kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
8707 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
8708 	kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
8709 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
8710 
8711 	TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
8712 
8713 	return (0);
8714 }
8715 
8716 struct mcx_queuestat {
8717 	char			 name[KSTAT_KV_NAMELEN];
8718 	enum kstat_kv_type	 type;
8719 };
8720 
8721 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
8722 	{ "RQ SW prod",		KSTAT_KV_T_COUNTER64 },
8723 	{ "RQ HW prod",		KSTAT_KV_T_COUNTER64 },
8724 	{ "RQ HW cons",		KSTAT_KV_T_COUNTER64 },
8725 	{ "RQ HW state",	KSTAT_KV_T_ISTR },
8726 
8727 	{ "SQ SW prod",		KSTAT_KV_T_COUNTER64 },
8728 	{ "SQ SW cons",		KSTAT_KV_T_COUNTER64 },
8729 	{ "SQ HW prod",		KSTAT_KV_T_COUNTER64 },
8730 	{ "SQ HW cons",		KSTAT_KV_T_COUNTER64 },
8731 	{ "SQ HW state",	KSTAT_KV_T_ISTR },
8732 
8733 	{ "CQ SW cons",		KSTAT_KV_T_COUNTER64 },
8734 	{ "CQ HW prod",		KSTAT_KV_T_COUNTER64 },
8735 	{ "CQ HW cons",		KSTAT_KV_T_COUNTER64 },
8736 	{ "CQ HW notify",	KSTAT_KV_T_COUNTER64 },
8737 	{ "CQ HW solicit",	KSTAT_KV_T_COUNTER64 },
8738 	{ "CQ HW status",	KSTAT_KV_T_ISTR },
8739 	{ "CQ HW state",	KSTAT_KV_T_ISTR },
8740 
8741 	{ "EQ SW cons",		KSTAT_KV_T_COUNTER64 },
8742 	{ "EQ HW prod",		KSTAT_KV_T_COUNTER64 },
8743 	{ "EQ HW cons",		KSTAT_KV_T_COUNTER64 },
8744 	{ "EQ HW status",	KSTAT_KV_T_ISTR },
8745 	{ "EQ HW state",	KSTAT_KV_T_ISTR },
8746 };
8747 
8748 static int	mcx_kstat_queue_read(struct kstat *);
8749 
8750 static void
mcx_kstat_attach_queues(struct mcx_softc * sc)8751 mcx_kstat_attach_queues(struct mcx_softc *sc)
8752 {
8753 	struct kstat *ks;
8754 	struct kstat_kv *kvs;
8755 	int q, i;
8756 
8757 	for (q = 0; q < sc->sc_nqueues; q++) {
8758 		ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
8759 		    KSTAT_T_KV, 0);
8760 		if (ks == NULL) {
8761 			/* unable to attach queue stats %u, q */
8762 			continue;
8763 		}
8764 
8765 		kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
8766 		    sizeof(*kvs), M_DEVBUF, M_WAITOK);
8767 
8768 		for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
8769 			const struct mcx_queuestat *tpl =
8770 			    &mcx_queue_kstat_tpl[i];
8771 
8772 			kstat_kv_init(&kvs[i], tpl->name, tpl->type);
8773 		}
8774 
8775 		ks->ks_softc = &sc->sc_queues[q];
8776 		ks->ks_data = kvs;
8777 		ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
8778 		ks->ks_read = mcx_kstat_queue_read;
8779 
8780 		sc->sc_queues[q].q_kstat = ks;
8781 		kstat_install(ks);
8782 	}
8783 }
8784 
8785 static int
mcx_kstat_queue_read(struct kstat * ks)8786 mcx_kstat_queue_read(struct kstat *ks)
8787 {
8788 	struct mcx_queues *q = ks->ks_softc;
8789 	struct mcx_softc *sc = q->q_sc;
8790 	struct kstat_kv *kvs = ks->ks_data;
8791 	union {
8792 		struct mcx_rq_ctx rq;
8793 		struct mcx_sq_ctx sq;
8794 		struct mcx_cq_ctx cq;
8795 		struct mcx_eq_ctx eq;
8796 	} u;
8797 	const char *text;
8798 	int error = 0;
8799 
8800 	KERNEL_LOCK();
8801 
8802 	if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
8803 		error = EIO;
8804 		goto out;
8805 	}
8806 
8807 	kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
8808 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
8809 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
8810 	switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
8811 	    MCX_RQ_CTX_STATE_SHIFT) {
8812 	case MCX_RQ_CTX_STATE_RST:
8813 		text = "RST";
8814 		break;
8815 	case MCX_RQ_CTX_STATE_RDY:
8816 		text = "RDY";
8817 		break;
8818 	case MCX_RQ_CTX_STATE_ERR:
8819 		text = "ERR";
8820 		break;
8821 	default:
8822 		text = "unknown";
8823 		break;
8824 	}
8825 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8826 	kvs++;
8827 
8828 	if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
8829 		error = EIO;
8830 		goto out;
8831 	}
8832 
8833 	kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
8834 	kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
8835 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
8836 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
8837 	switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
8838 	    MCX_SQ_CTX_STATE_SHIFT) {
8839 	case MCX_SQ_CTX_STATE_RST:
8840 		text = "RST";
8841 		break;
8842 	case MCX_SQ_CTX_STATE_RDY:
8843 		text = "RDY";
8844 		break;
8845 	case MCX_SQ_CTX_STATE_ERR:
8846 		text = "ERR";
8847 		break;
8848 	default:
8849 		text = "unknown";
8850 		break;
8851 	}
8852 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8853 	kvs++;
8854 
8855 	if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
8856 		error = EIO;
8857 		goto out;
8858 	}
8859 
8860 	kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
8861 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
8862 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
8863 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
8864 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
8865 
8866 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
8867 	    MCX_CQ_CTX_STATUS_SHIFT) {
8868 	case MCX_CQ_CTX_STATUS_OK:
8869 		text = "OK";
8870 		break;
8871 	case MCX_CQ_CTX_STATUS_OVERFLOW:
8872 		text = "overflow";
8873 		break;
8874 	case MCX_CQ_CTX_STATUS_WRITE_FAIL:
8875 		text = "write fail";
8876 		break;
8877 	default:
8878 		text = "unknown";
8879 		break;
8880 	}
8881 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8882 	kvs++;
8883 
8884 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
8885 	    MCX_CQ_CTX_STATE_SHIFT) {
8886 	case MCX_CQ_CTX_STATE_SOLICITED:
8887 		text = "solicited";
8888 		break;
8889 	case MCX_CQ_CTX_STATE_ARMED:
8890 		text = "armed";
8891 		break;
8892 	case MCX_CQ_CTX_STATE_FIRED:
8893 		text = "fired";
8894 		break;
8895 	default:
8896 		text = "unknown";
8897 		break;
8898 	}
8899 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8900 	kvs++;
8901 
8902 	if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
8903 		error = EIO;
8904 		goto out;
8905 	}
8906 
8907 	kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
8908 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
8909 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
8910 
8911 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
8912 	    MCX_EQ_CTX_STATUS_SHIFT) {
8913 	case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
8914 		text = "write fail";
8915 		break;
8916 	case MCX_EQ_CTX_STATUS_OK:
8917 		text = "OK";
8918 		break;
8919 	default:
8920 		text = "unknown";
8921 		break;
8922 	}
8923 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8924 	kvs++;
8925 
8926 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
8927 	    MCX_EQ_CTX_STATE_SHIFT) {
8928 	case MCX_EQ_CTX_STATE_ARMED:
8929 		text = "armed";
8930 		break;
8931 	case MCX_EQ_CTX_STATE_FIRED:
8932 		text = "fired";
8933 		break;
8934 	default:
8935 		text = "unknown";
8936 		break;
8937 	}
8938 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8939 	kvs++;
8940 
8941 	nanouptime(&ks->ks_updated);
8942 out:
8943 	KERNEL_UNLOCK();
8944 	return (error);
8945 }
8946 
8947 #endif /* NKSTAT > 0 */
8948 
8949 static unsigned int
mcx_timecounter_read(struct timecounter * tc)8950 mcx_timecounter_read(struct timecounter *tc)
8951 {
8952 	struct mcx_softc *sc = tc->tc_priv;
8953 
8954 	return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
8955 }
8956 
8957 static void
mcx_timecounter_attach(struct mcx_softc * sc)8958 mcx_timecounter_attach(struct mcx_softc *sc)
8959 {
8960 	struct timecounter *tc = &sc->sc_timecounter;
8961 
8962 	tc->tc_get_timecount = mcx_timecounter_read;
8963 	tc->tc_counter_mask = ~0U;
8964 	tc->tc_frequency = sc->sc_khz * 1000;
8965 	tc->tc_name = device_xname(sc->sc_dev);
8966 	tc->tc_quality = -100;
8967 	tc->tc_priv = sc;
8968 
8969 	tc_init(tc);
8970 }
8971