1 /* $NetBSD: if_mcx.c,v 1.25 2022/08/28 07:54:03 skrll Exp $ */ 2 /* $OpenBSD: if_mcx.c,v 1.101 2021/06/02 19:16:11 patrick Exp $ */ 3 4 /* 5 * Copyright (c) 2017 David Gwynne <dlg@openbsd.org> 6 * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #ifdef _KERNEL_OPT 22 #include "opt_net_mpsafe.h" 23 #endif 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: if_mcx.c,v 1.25 2022/08/28 07:54:03 skrll Exp $"); 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/sockio.h> 31 #include <sys/mbuf.h> 32 #include <sys/kernel.h> 33 #include <sys/socket.h> 34 #include <sys/device.h> 35 #include <sys/pool.h> 36 #include <sys/queue.h> 37 #include <sys/callout.h> 38 #include <sys/workqueue.h> 39 #include <sys/atomic.h> 40 #include <sys/timetc.h> 41 #include <sys/kmem.h> 42 #include <sys/bus.h> 43 #include <sys/interrupt.h> 44 #include <sys/pcq.h> 45 #include <sys/cpu.h> 46 #include <sys/bitops.h> 47 48 #include <machine/intr.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_ether.h> 53 #include <net/if_media.h> 54 #include <net/if_vlanvar.h> 55 #include <net/toeplitz.h> 56 57 #include <net/bpf.h> 58 59 #include <netinet/in.h> 60 61 #include <dev/pci/pcireg.h> 62 #include <dev/pci/pcivar.h> 63 #include <dev/pci/pcidevs.h> 64 65 /* TODO: Port kstat key/value stuff to evcnt/sysmon */ 66 #define NKSTAT 0 67 68 /* XXX This driver is not yet MP-safe; don't claim to be! */ 69 /* #ifdef NET_MPSAFE */ 70 /* #define MCX_MPSAFE 1 */ 71 /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */ 72 /* #else */ 73 #define CALLOUT_FLAGS 0 74 /* #endif */ 75 76 #define MCX_TXQ_NUM 2048 77 78 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) 79 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) 80 81 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */ 82 83 #define MCX_FW_VER 0x0000 84 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff) 85 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16) 86 #define MCX_CMDIF_FW_SUBVER 0x0004 87 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff) 88 #define MCX_CMDIF(_v) ((_v) >> 16) 89 90 #define MCX_ISSI 1 /* as per the PRM */ 91 #define MCX_CMD_IF_SUPPORTED 5 92 93 #define MCX_HARDMTU 9500 94 95 #define MCX_PAGE_SHIFT 12 96 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT) 97 98 /* queue sizes */ 99 #define MCX_LOG_EQ_SIZE 7 100 #define MCX_LOG_CQ_SIZE 12 101 #define MCX_LOG_RQ_SIZE 10 102 #define MCX_LOG_SQ_SIZE 11 103 104 #define MCX_MAX_QUEUES 16 105 106 /* completion event moderation - about 10khz, or 90% of the cq */ 107 #define MCX_CQ_MOD_PERIOD 50 108 #define MCX_CQ_MOD_COUNTER \ 109 (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10) 110 111 #define MCX_LOG_SQ_ENTRY_SIZE 6 112 #define MCX_SQ_ENTRY_MAX_SLOTS 4 113 #define MCX_SQ_SEGS_PER_SLOT \ 114 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg)) 115 #define MCX_SQ_MAX_SEGMENTS \ 116 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT) 117 118 #define MCX_LOG_FLOW_TABLE_SIZE 5 119 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */ 120 #define MCX_NUM_MCAST_FLOWS \ 121 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS) 122 123 #define MCX_SQ_INLINE_SIZE 18 124 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE); 125 126 /* doorbell offsets */ 127 #define MCX_DOORBELL_AREA_SIZE MCX_PAGE_SIZE 128 129 #define MCX_CQ_DOORBELL_BASE 0 130 #define MCX_CQ_DOORBELL_STRIDE 64 131 132 #define MCX_WQ_DOORBELL_BASE MCX_PAGE_SIZE/2 133 #define MCX_WQ_DOORBELL_STRIDE 64 134 /* make sure the doorbells fit */ 135 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE); 136 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE < 137 MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE); 138 139 #define MCX_WQ_DOORBELL_MASK 0xffff 140 141 /* uar registers */ 142 #define MCX_UAR_CQ_DOORBELL 0x20 143 #define MCX_UAR_EQ_DOORBELL_ARM 0x40 144 #define MCX_UAR_EQ_DOORBELL 0x48 145 #define MCX_UAR_BF 0x800 146 147 #define MCX_CMDQ_ADDR_HI 0x0010 148 #define MCX_CMDQ_ADDR_LO 0x0014 149 #define MCX_CMDQ_ADDR_NMASK 0xfff 150 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf) 151 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf) 152 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8) 153 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8) 154 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8) 155 156 #define MCX_CMDQ_DOORBELL 0x0018 157 158 #define MCX_STATE 0x01fc 159 #define MCX_STATE_MASK (1U << 31) 160 #define MCX_STATE_INITIALIZING (1 << 31) 161 #define MCX_STATE_READY (0 << 31) 162 #define MCX_STATE_INTERFACE_MASK (0x3 << 24) 163 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24) 164 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24) 165 166 #define MCX_INTERNAL_TIMER 0x1000 167 #define MCX_INTERNAL_TIMER_H 0x1000 168 #define MCX_INTERNAL_TIMER_L 0x1004 169 170 #define MCX_CLEAR_INT 0x100c 171 172 #define MCX_REG_OP_WRITE 0 173 #define MCX_REG_OP_READ 1 174 175 #define MCX_REG_PMLP 0x5002 176 #define MCX_REG_PMTU 0x5003 177 #define MCX_REG_PTYS 0x5004 178 #define MCX_REG_PAOS 0x5006 179 #define MCX_REG_PFCC 0x5007 180 #define MCX_REG_PPCNT 0x5008 181 #define MCX_REG_MTCAP 0x9009 /* mgmt temp capabilities */ 182 #define MCX_REG_MTMP 0x900a /* mgmt temp */ 183 #define MCX_REG_MCIA 0x9014 184 #define MCX_REG_MCAM 0x907f 185 186 #define MCX_ETHER_CAP_SGMII 0 187 #define MCX_ETHER_CAP_1000_KX 1 188 #define MCX_ETHER_CAP_10G_CX4 2 189 #define MCX_ETHER_CAP_10G_KX4 3 190 #define MCX_ETHER_CAP_10G_KR 4 191 #define MCX_ETHER_CAP_20G_KR2 5 192 #define MCX_ETHER_CAP_40G_CR4 6 193 #define MCX_ETHER_CAP_40G_KR4 7 194 #define MCX_ETHER_CAP_56G_R4 8 195 #define MCX_ETHER_CAP_10G_CR 12 196 #define MCX_ETHER_CAP_10G_SR 13 197 #define MCX_ETHER_CAP_10G_LR 14 198 #define MCX_ETHER_CAP_40G_SR4 15 199 #define MCX_ETHER_CAP_40G_LR4 16 200 #define MCX_ETHER_CAP_50G_SR2 18 201 #define MCX_ETHER_CAP_100G_CR4 20 202 #define MCX_ETHER_CAP_100G_SR4 21 203 #define MCX_ETHER_CAP_100G_KR4 22 204 #define MCX_ETHER_CAP_100G_LR4 23 205 #define MCX_ETHER_CAP_100_TX 24 206 #define MCX_ETHER_CAP_1000_T 25 207 #define MCX_ETHER_CAP_10G_T 26 208 #define MCX_ETHER_CAP_25G_CR 27 209 #define MCX_ETHER_CAP_25G_KR 28 210 #define MCX_ETHER_CAP_25G_SR 29 211 #define MCX_ETHER_CAP_50G_CR2 30 212 #define MCX_ETHER_CAP_50G_KR2 31 213 214 #define MCX_MAX_CQE 32 215 216 #define MCX_CMD_QUERY_HCA_CAP 0x100 217 #define MCX_CMD_QUERY_ADAPTER 0x101 218 #define MCX_CMD_INIT_HCA 0x102 219 #define MCX_CMD_TEARDOWN_HCA 0x103 220 #define MCX_CMD_ENABLE_HCA 0x104 221 #define MCX_CMD_DISABLE_HCA 0x105 222 #define MCX_CMD_QUERY_PAGES 0x107 223 #define MCX_CMD_MANAGE_PAGES 0x108 224 #define MCX_CMD_SET_HCA_CAP 0x109 225 #define MCX_CMD_QUERY_ISSI 0x10a 226 #define MCX_CMD_SET_ISSI 0x10b 227 #define MCX_CMD_SET_DRIVER_VERSION 0x10d 228 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS 0x203 229 #define MCX_CMD_CREATE_EQ 0x301 230 #define MCX_CMD_DESTROY_EQ 0x302 231 #define MCX_CMD_QUERY_EQ 0x303 232 #define MCX_CMD_CREATE_CQ 0x400 233 #define MCX_CMD_DESTROY_CQ 0x401 234 #define MCX_CMD_QUERY_CQ 0x402 235 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT 0x754 236 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \ 237 0x755 238 #define MCX_CMD_QUERY_VPORT_COUNTERS 0x770 239 #define MCX_CMD_ALLOC_PD 0x800 240 #define MCX_CMD_ALLOC_UAR 0x802 241 #define MCX_CMD_ACCESS_REG 0x805 242 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN 0x816 243 #define MCX_CMD_CREATE_TIR 0x900 244 #define MCX_CMD_DESTROY_TIR 0x902 245 #define MCX_CMD_CREATE_SQ 0x904 246 #define MCX_CMD_MODIFY_SQ 0x905 247 #define MCX_CMD_DESTROY_SQ 0x906 248 #define MCX_CMD_QUERY_SQ 0x907 249 #define MCX_CMD_CREATE_RQ 0x908 250 #define MCX_CMD_MODIFY_RQ 0x909 251 #define MCX_CMD_DESTROY_RQ 0x90a 252 #define MCX_CMD_QUERY_RQ 0x90b 253 #define MCX_CMD_CREATE_TIS 0x912 254 #define MCX_CMD_DESTROY_TIS 0x914 255 #define MCX_CMD_CREATE_RQT 0x916 256 #define MCX_CMD_DESTROY_RQT 0x918 257 #define MCX_CMD_SET_FLOW_TABLE_ROOT 0x92f 258 #define MCX_CMD_CREATE_FLOW_TABLE 0x930 259 #define MCX_CMD_DESTROY_FLOW_TABLE 0x931 260 #define MCX_CMD_QUERY_FLOW_TABLE 0x932 261 #define MCX_CMD_CREATE_FLOW_GROUP 0x933 262 #define MCX_CMD_DESTROY_FLOW_GROUP 0x934 263 #define MCX_CMD_QUERY_FLOW_GROUP 0x935 264 #define MCX_CMD_SET_FLOW_TABLE_ENTRY 0x936 265 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY 0x937 266 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY 0x938 267 #define MCX_CMD_ALLOC_FLOW_COUNTER 0x939 268 #define MCX_CMD_QUERY_FLOW_COUNTER 0x93b 269 270 #define MCX_QUEUE_STATE_RST 0 271 #define MCX_QUEUE_STATE_RDY 1 272 #define MCX_QUEUE_STATE_ERR 3 273 274 #define MCX_FLOW_TABLE_TYPE_RX 0 275 #define MCX_FLOW_TABLE_TYPE_TX 1 276 277 #define MCX_CMDQ_INLINE_DATASIZE 16 278 279 struct mcx_cmdq_entry { 280 uint8_t cq_type; 281 #define MCX_CMDQ_TYPE_PCIE 0x7 282 uint8_t cq_reserved0[3]; 283 284 uint32_t cq_input_length; 285 uint64_t cq_input_ptr; 286 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE]; 287 288 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE]; 289 uint64_t cq_output_ptr; 290 uint32_t cq_output_length; 291 292 uint8_t cq_token; 293 uint8_t cq_signature; 294 uint8_t cq_reserved1[1]; 295 uint8_t cq_status; 296 #define MCX_CQ_STATUS_SHIFT 1 297 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT) 298 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT) 299 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT) 300 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT) 301 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT) 302 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT) 303 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT) 304 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT) 305 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT) 306 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT) 307 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT) 308 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT) 309 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT) 310 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT) 311 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \ 312 (0x10 << MCX_CQ_STATUS_SHIFT) 313 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT) 314 #define MCX_CQ_STATUS_OWN_MASK 0x1 315 #define MCX_CQ_STATUS_OWN_SW 0x0 316 #define MCX_CQ_STATUS_OWN_HW 0x1 317 } __packed __aligned(8); 318 319 #define MCX_CMDQ_MAILBOX_DATASIZE 512 320 321 struct mcx_cmdq_mailbox { 322 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE]; 323 uint8_t mb_reserved0[48]; 324 uint64_t mb_next_ptr; 325 uint32_t mb_block_number; 326 uint8_t mb_reserved1[1]; 327 uint8_t mb_token; 328 uint8_t mb_ctrl_signature; 329 uint8_t mb_signature; 330 } __packed __aligned(8); 331 332 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10) 333 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \ 334 MCX_CMDQ_MAILBOX_ALIGN) 335 /* 336 * command mailbox structures 337 */ 338 339 struct mcx_cmd_enable_hca_in { 340 uint16_t cmd_opcode; 341 uint8_t cmd_reserved0[4]; 342 uint16_t cmd_op_mod; 343 uint8_t cmd_reserved1[2]; 344 uint16_t cmd_function_id; 345 uint8_t cmd_reserved2[4]; 346 } __packed __aligned(4); 347 348 struct mcx_cmd_enable_hca_out { 349 uint8_t cmd_status; 350 uint8_t cmd_reserved0[3]; 351 uint32_t cmd_syndrome; 352 uint8_t cmd_reserved1[4]; 353 } __packed __aligned(4); 354 355 struct mcx_cmd_init_hca_in { 356 uint16_t cmd_opcode; 357 uint8_t cmd_reserved0[4]; 358 uint16_t cmd_op_mod; 359 uint8_t cmd_reserved1[8]; 360 } __packed __aligned(4); 361 362 struct mcx_cmd_init_hca_out { 363 uint8_t cmd_status; 364 uint8_t cmd_reserved0[3]; 365 uint32_t cmd_syndrome; 366 uint8_t cmd_reserved1[8]; 367 } __packed __aligned(4); 368 369 struct mcx_cmd_teardown_hca_in { 370 uint16_t cmd_opcode; 371 uint8_t cmd_reserved0[4]; 372 uint16_t cmd_op_mod; 373 uint8_t cmd_reserved1[2]; 374 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0 375 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1 376 uint16_t cmd_profile; 377 uint8_t cmd_reserved2[4]; 378 } __packed __aligned(4); 379 380 struct mcx_cmd_teardown_hca_out { 381 uint8_t cmd_status; 382 uint8_t cmd_reserved0[3]; 383 uint32_t cmd_syndrome; 384 uint8_t cmd_reserved1[8]; 385 } __packed __aligned(4); 386 387 struct mcx_cmd_access_reg_in { 388 uint16_t cmd_opcode; 389 uint8_t cmd_reserved0[4]; 390 uint16_t cmd_op_mod; 391 uint8_t cmd_reserved1[2]; 392 uint16_t cmd_register_id; 393 uint32_t cmd_argument; 394 } __packed __aligned(4); 395 396 struct mcx_cmd_access_reg_out { 397 uint8_t cmd_status; 398 uint8_t cmd_reserved0[3]; 399 uint32_t cmd_syndrome; 400 uint8_t cmd_reserved1[8]; 401 } __packed __aligned(4); 402 403 struct mcx_reg_pmtu { 404 uint8_t rp_reserved1; 405 uint8_t rp_local_port; 406 uint8_t rp_reserved2[2]; 407 uint16_t rp_max_mtu; 408 uint8_t rp_reserved3[2]; 409 uint16_t rp_admin_mtu; 410 uint8_t rp_reserved4[2]; 411 uint16_t rp_oper_mtu; 412 uint8_t rp_reserved5[2]; 413 } __packed __aligned(4); 414 415 struct mcx_reg_ptys { 416 uint8_t rp_reserved1; 417 uint8_t rp_local_port; 418 uint8_t rp_reserved2; 419 uint8_t rp_proto_mask; 420 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2) 421 uint8_t rp_reserved3[8]; 422 uint32_t rp_eth_proto_cap; 423 uint8_t rp_reserved4[8]; 424 uint32_t rp_eth_proto_admin; 425 uint8_t rp_reserved5[8]; 426 uint32_t rp_eth_proto_oper; 427 uint8_t rp_reserved6[24]; 428 } __packed __aligned(4); 429 430 struct mcx_reg_paos { 431 uint8_t rp_reserved1; 432 uint8_t rp_local_port; 433 uint8_t rp_admin_status; 434 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1 435 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2 436 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3 437 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4 438 uint8_t rp_oper_status; 439 #define MCX_REG_PAOS_OPER_STATUS_UP 1 440 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2 441 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4 442 uint8_t rp_admin_state_update; 443 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7) 444 uint8_t rp_reserved2[11]; 445 } __packed __aligned(4); 446 447 struct mcx_reg_pfcc { 448 uint8_t rp_reserved1; 449 uint8_t rp_local_port; 450 uint8_t rp_reserved2[3]; 451 uint8_t rp_prio_mask_tx; 452 uint8_t rp_reserved3; 453 uint8_t rp_prio_mask_rx; 454 uint8_t rp_pptx_aptx; 455 uint8_t rp_pfctx; 456 uint8_t rp_fctx_dis; 457 uint8_t rp_reserved4; 458 uint8_t rp_pprx_aprx; 459 uint8_t rp_pfcrx; 460 uint8_t rp_reserved5[2]; 461 uint16_t rp_dev_stall_min; 462 uint16_t rp_dev_stall_crit; 463 uint8_t rp_reserved6[12]; 464 } __packed __aligned(4); 465 466 #define MCX_PMLP_MODULE_NUM_MASK 0xff 467 struct mcx_reg_pmlp { 468 uint8_t rp_rxtx; 469 uint8_t rp_local_port; 470 uint8_t rp_reserved0; 471 uint8_t rp_width; 472 uint32_t rp_lane0_mapping; 473 uint32_t rp_lane1_mapping; 474 uint32_t rp_lane2_mapping; 475 uint32_t rp_lane3_mapping; 476 uint8_t rp_reserved1[44]; 477 } __packed __aligned(4); 478 479 struct mcx_reg_ppcnt { 480 uint8_t ppcnt_swid; 481 uint8_t ppcnt_local_port; 482 uint8_t ppcnt_pnat; 483 uint8_t ppcnt_grp; 484 #define MCX_REG_PPCNT_GRP_IEEE8023 0x00 485 #define MCX_REG_PPCNT_GRP_RFC2863 0x01 486 #define MCX_REG_PPCNT_GRP_RFC2819 0x02 487 #define MCX_REG_PPCNT_GRP_RFC3635 0x03 488 #define MCX_REG_PPCNT_GRP_PER_PRIO 0x10 489 #define MCX_REG_PPCNT_GRP_PER_TC 0x11 490 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER 0x11 491 492 uint8_t ppcnt_clr; 493 uint8_t ppcnt_reserved1[2]; 494 uint8_t ppcnt_prio_tc; 495 #define MCX_REG_PPCNT_CLR (1 << 7) 496 497 uint8_t ppcnt_counter_set[248]; 498 } __packed __aligned(8); 499 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256); 500 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) % 501 sizeof(uint64_t)) == 0); 502 503 enum mcx_ppcnt_ieee8023 { 504 frames_transmitted_ok, 505 frames_received_ok, 506 frame_check_sequence_errors, 507 alignment_errors, 508 octets_transmitted_ok, 509 octets_received_ok, 510 multicast_frames_xmitted_ok, 511 broadcast_frames_xmitted_ok, 512 multicast_frames_received_ok, 513 broadcast_frames_received_ok, 514 in_range_length_errors, 515 out_of_range_length_field, 516 frame_too_long_errors, 517 symbol_error_during_carrier, 518 mac_control_frames_transmitted, 519 mac_control_frames_received, 520 unsupported_opcodes_received, 521 pause_mac_ctrl_frames_received, 522 pause_mac_ctrl_frames_transmitted, 523 524 mcx_ppcnt_ieee8023_count 525 }; 526 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98); 527 528 enum mcx_ppcnt_rfc2863 { 529 in_octets, 530 in_ucast_pkts, 531 in_discards, 532 in_errors, 533 in_unknown_protos, 534 out_octets, 535 out_ucast_pkts, 536 out_discards, 537 out_errors, 538 in_multicast_pkts, 539 in_broadcast_pkts, 540 out_multicast_pkts, 541 out_broadcast_pkts, 542 543 mcx_ppcnt_rfc2863_count 544 }; 545 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68); 546 547 enum mcx_ppcnt_rfc2819 { 548 drop_events, 549 octets, 550 pkts, 551 broadcast_pkts, 552 multicast_pkts, 553 crc_align_errors, 554 undersize_pkts, 555 oversize_pkts, 556 fragments, 557 jabbers, 558 collisions, 559 pkts64octets, 560 pkts65to127octets, 561 pkts128to255octets, 562 pkts256to511octets, 563 pkts512to1023octets, 564 pkts1024to1518octets, 565 pkts1519to2047octets, 566 pkts2048to4095octets, 567 pkts4096to8191octets, 568 pkts8192to10239octets, 569 570 mcx_ppcnt_rfc2819_count 571 }; 572 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8); 573 574 enum mcx_ppcnt_rfc3635 { 575 dot3stats_alignment_errors, 576 dot3stats_fcs_errors, 577 dot3stats_single_collision_frames, 578 dot3stats_multiple_collision_frames, 579 dot3stats_sqe_test_errors, 580 dot3stats_deferred_transmissions, 581 dot3stats_late_collisions, 582 dot3stats_excessive_collisions, 583 dot3stats_internal_mac_transmit_errors, 584 dot3stats_carrier_sense_errors, 585 dot3stats_frame_too_longs, 586 dot3stats_internal_mac_receive_errors, 587 dot3stats_symbol_errors, 588 dot3control_in_unknown_opcodes, 589 dot3in_pause_frames, 590 dot3out_pause_frames, 591 592 mcx_ppcnt_rfc3635_count 593 }; 594 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80); 595 596 struct mcx_reg_mcam { 597 uint8_t _reserved1[1]; 598 uint8_t mcam_feature_group; 599 uint8_t _reserved2[1]; 600 uint8_t mcam_access_reg_group; 601 uint8_t _reserved3[4]; 602 uint8_t mcam_access_reg_cap_mask[16]; 603 uint8_t _reserved4[16]; 604 uint8_t mcam_feature_cap_mask[16]; 605 uint8_t _reserved5[16]; 606 } __packed __aligned(4); 607 608 #define MCX_BITFIELD_BIT(bf, b) (bf[(sizeof bf - 1) - (b / 8)] & (b % 8)) 609 610 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP 6 611 612 struct mcx_reg_mtcap { 613 uint8_t _reserved1[3]; 614 uint8_t mtcap_sensor_count; 615 uint8_t _reserved2[4]; 616 617 uint64_t mtcap_sensor_map; 618 }; 619 620 struct mcx_reg_mtmp { 621 uint8_t _reserved1[2]; 622 uint16_t mtmp_sensor_index; 623 624 uint8_t _reserved2[2]; 625 uint16_t mtmp_temperature; 626 627 uint16_t mtmp_mte_mtr; 628 #define MCX_REG_MTMP_MTE (1 << 15) 629 #define MCX_REG_MTMP_MTR (1 << 14) 630 uint16_t mtmp_max_temperature; 631 632 uint16_t mtmp_tee; 633 #define MCX_REG_MTMP_TEE_NOPE (0 << 14) 634 #define MCX_REG_MTMP_TEE_GENERATE (1 << 14) 635 #define MCX_REG_MTMP_TEE_GENERATE_ONE (2 << 14) 636 uint16_t mtmp_temperature_threshold_hi; 637 638 uint8_t _reserved3[2]; 639 uint16_t mtmp_temperature_threshold_lo; 640 641 uint8_t _reserved4[4]; 642 643 uint8_t mtmp_sensor_name[8]; 644 }; 645 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20); 646 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18); 647 648 #define MCX_MCIA_EEPROM_BYTES 32 649 struct mcx_reg_mcia { 650 uint8_t rm_l; 651 uint8_t rm_module; 652 uint8_t rm_reserved0; 653 uint8_t rm_status; 654 uint8_t rm_i2c_addr; 655 uint8_t rm_page_num; 656 uint16_t rm_dev_addr; 657 uint16_t rm_reserved1; 658 uint16_t rm_size; 659 uint32_t rm_reserved2; 660 uint8_t rm_data[48]; 661 } __packed __aligned(4); 662 663 struct mcx_cmd_query_issi_in { 664 uint16_t cmd_opcode; 665 uint8_t cmd_reserved0[4]; 666 uint16_t cmd_op_mod; 667 uint8_t cmd_reserved1[8]; 668 } __packed __aligned(4); 669 670 struct mcx_cmd_query_issi_il_out { 671 uint8_t cmd_status; 672 uint8_t cmd_reserved0[3]; 673 uint32_t cmd_syndrome; 674 uint8_t cmd_reserved1[2]; 675 uint16_t cmd_current_issi; 676 uint8_t cmd_reserved2[4]; 677 } __packed __aligned(4); 678 679 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE); 680 681 struct mcx_cmd_query_issi_mb_out { 682 uint8_t cmd_reserved2[16]; 683 uint8_t cmd_supported_issi[80]; /* very big endian */ 684 } __packed __aligned(4); 685 686 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE); 687 688 struct mcx_cmd_set_issi_in { 689 uint16_t cmd_opcode; 690 uint8_t cmd_reserved0[4]; 691 uint16_t cmd_op_mod; 692 uint8_t cmd_reserved1[2]; 693 uint16_t cmd_current_issi; 694 uint8_t cmd_reserved2[4]; 695 } __packed __aligned(4); 696 697 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE); 698 699 struct mcx_cmd_set_issi_out { 700 uint8_t cmd_status; 701 uint8_t cmd_reserved0[3]; 702 uint32_t cmd_syndrome; 703 uint8_t cmd_reserved1[8]; 704 } __packed __aligned(4); 705 706 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE); 707 708 struct mcx_cmd_query_pages_in { 709 uint16_t cmd_opcode; 710 uint8_t cmd_reserved0[4]; 711 uint16_t cmd_op_mod; 712 #define MCX_CMD_QUERY_PAGES_BOOT 0x01 713 #define MCX_CMD_QUERY_PAGES_INIT 0x02 714 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03 715 uint8_t cmd_reserved1[8]; 716 } __packed __aligned(4); 717 718 struct mcx_cmd_query_pages_out { 719 uint8_t cmd_status; 720 uint8_t cmd_reserved0[3]; 721 uint32_t cmd_syndrome; 722 uint8_t cmd_reserved1[2]; 723 uint16_t cmd_func_id; 724 int32_t cmd_num_pages; 725 } __packed __aligned(4); 726 727 struct mcx_cmd_manage_pages_in { 728 uint16_t cmd_opcode; 729 uint8_t cmd_reserved0[4]; 730 uint16_t cmd_op_mod; 731 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \ 732 0x00 733 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \ 734 0x01 735 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \ 736 0x02 737 uint8_t cmd_reserved1[2]; 738 uint16_t cmd_func_id; 739 uint32_t cmd_input_num_entries; 740 } __packed __aligned(4); 741 742 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE); 743 744 struct mcx_cmd_manage_pages_out { 745 uint8_t cmd_status; 746 uint8_t cmd_reserved0[3]; 747 uint32_t cmd_syndrome; 748 uint32_t cmd_output_num_entries; 749 uint8_t cmd_reserved1[4]; 750 } __packed __aligned(4); 751 752 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE); 753 754 struct mcx_cmd_query_hca_cap_in { 755 uint16_t cmd_opcode; 756 uint8_t cmd_reserved0[4]; 757 uint16_t cmd_op_mod; 758 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0) 759 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0) 760 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1) 761 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1) 762 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1) 763 uint8_t cmd_reserved1[8]; 764 } __packed __aligned(4); 765 766 struct mcx_cmd_query_hca_cap_out { 767 uint8_t cmd_status; 768 uint8_t cmd_reserved0[3]; 769 uint32_t cmd_syndrome; 770 uint8_t cmd_reserved1[8]; 771 } __packed __aligned(4); 772 773 #define MCX_HCA_CAP_LEN 0x1000 774 #define MCX_HCA_CAP_NMAILBOXES \ 775 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE) 776 777 #if __GNUC_PREREQ__(4, 3) 778 #define __counter__ __COUNTER__ 779 #else 780 #define __counter__ __LINE__ 781 #endif 782 783 #define __token(_tok, _num) _tok##_num 784 #define _token(_tok, _num) __token(_tok, _num) 785 #define __reserved__ _token(__reserved, __counter__) 786 787 struct mcx_cap_device { 788 uint8_t reserved0[16]; 789 790 uint8_t log_max_srq_sz; 791 uint8_t log_max_qp_sz; 792 uint8_t __reserved__[1]; 793 uint8_t log_max_qp; /* 5 bits */ 794 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f 795 796 uint8_t __reserved__[1]; 797 uint8_t log_max_srq; /* 5 bits */ 798 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f 799 uint8_t __reserved__[2]; 800 801 uint8_t __reserved__[1]; 802 uint8_t log_max_cq_sz; 803 uint8_t __reserved__[1]; 804 uint8_t log_max_cq; /* 5 bits */ 805 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f 806 807 uint8_t log_max_eq_sz; 808 uint8_t log_max_mkey; /* 6 bits */ 809 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f 810 uint8_t __reserved__[1]; 811 uint8_t log_max_eq; /* 4 bits */ 812 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f 813 814 uint8_t max_indirection; 815 uint8_t log_max_mrw_sz; /* 7 bits */ 816 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f 817 uint8_t teardown_log_max_msf_list_size; 818 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80 819 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \ 820 0x3f 821 uint8_t log_max_klm_list_size; /* 6 bits */ 822 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \ 823 0x3f 824 825 uint8_t __reserved__[1]; 826 uint8_t log_max_ra_req_dc; /* 6 bits */ 827 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f 828 uint8_t __reserved__[1]; 829 uint8_t log_max_ra_res_dc; /* 6 bits */ 830 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \ 831 0x3f 832 833 uint8_t __reserved__[1]; 834 uint8_t log_max_ra_req_qp; /* 6 bits */ 835 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \ 836 0x3f 837 uint8_t __reserved__[1]; 838 uint8_t log_max_ra_res_qp; /* 6 bits */ 839 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \ 840 0x3f 841 842 uint8_t flags1; 843 #define MCX_CAP_DEVICE_END_PAD 0x80 844 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40 845 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \ 846 0x20 847 #define MCX_CAP_DEVICE_START_PAD 0x10 848 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \ 849 0x08 850 uint8_t __reserved__[1]; 851 uint16_t gid_table_size; 852 853 uint16_t flags2; 854 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000 855 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000 856 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \ 857 0x2000 858 #define MCX_CAP_DEVICE_DEBUG 0x1000 859 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \ 860 0x8000 861 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000 862 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff 863 uint16_t pkey_table_size; 864 865 uint8_t flags3; 866 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \ 867 0x80 868 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \ 869 0x40 870 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20 871 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10 872 #define MCX_CAP_DEVICE_ETS 0x04 873 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02 874 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \ 875 0x01 876 uint8_t local_ca_ack_delay; /* 5 bits */ 877 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \ 878 0x1f 879 #define MCX_CAP_DEVICE_MCAM_REG 0x40 880 uint8_t port_type; 881 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \ 882 0x80 883 #define MCX_CAP_DEVICE_PORT_TYPE 0x03 884 #define MCX_CAP_DEVICE_PORT_TYPE_ETH 0x01 885 uint8_t num_ports; 886 887 uint8_t snapshot_log_max_msg; 888 #define MCX_CAP_DEVICE_SNAPSHOT 0x80 889 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f 890 uint8_t max_tc; /* 4 bits */ 891 #define MCX_CAP_DEVICE_MAX_TC 0x0f 892 uint8_t flags4; 893 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80 894 #define MCX_CAP_DEVICE_DCBX 0x40 895 #define MCX_CAP_DEVICE_ROL_S 0x02 896 #define MCX_CAP_DEVICE_ROL_G 0x01 897 uint8_t wol; 898 #define MCX_CAP_DEVICE_WOL_S 0x40 899 #define MCX_CAP_DEVICE_WOL_G 0x20 900 #define MCX_CAP_DEVICE_WOL_A 0x10 901 #define MCX_CAP_DEVICE_WOL_B 0x08 902 #define MCX_CAP_DEVICE_WOL_M 0x04 903 #define MCX_CAP_DEVICE_WOL_U 0x02 904 #define MCX_CAP_DEVICE_WOL_P 0x01 905 906 uint16_t stat_rate_support; 907 uint8_t __reserved__[1]; 908 uint8_t cqe_version; /* 4 bits */ 909 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f 910 911 uint32_t flags5; 912 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \ 913 0x80000000 914 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000 915 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \ 916 0x10000000 917 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \ 918 0x08000000 919 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000 920 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000 921 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000 922 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000 923 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000 924 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000 925 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800 926 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400 927 #define MCX_CAP_DEVICE_SHO 0x00000100 928 #define MCX_CAP_DEVICE_TPH 0x00000080 929 #define MCX_CAP_DEVICE_RF 0x00000040 930 #define MCX_CAP_DEVICE_DCT 0x00000020 931 #define MCX_CAP_DEVICE_QOS 0x00000010 932 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008 933 #define MCX_CAP_DEVICE_ROCE 0x00000004 934 #define MCX_CAP_DEVICE_ATOMIC 0x00000002 935 936 uint32_t flags6; 937 #define MCX_CAP_DEVICE_CQ_OI 0x80000000 938 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000 939 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000 940 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \ 941 0x10000000 942 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000 943 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000 944 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000 945 #define MCX_CAP_DEVICE_PG 0x01000000 946 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000 947 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \ 948 0x00400000 949 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \ 950 0x00200000 951 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \ 952 0x00100000 953 #define MCX_CAP_DEVICE_CD 0x00080000 954 #define MCX_CAP_DEVICE_ATM 0x00040000 955 #define MCX_CAP_DEVICE_APM 0x00020000 956 #define MCX_CAP_DEVICE_IMAICL 0x00010000 957 #define MCX_CAP_DEVICE_QKV 0x00000200 958 #define MCX_CAP_DEVICE_PKV 0x00000100 959 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080 960 #define MCX_CAP_DEVICE_XRC 0x00000008 961 #define MCX_CAP_DEVICE_UD 0x00000004 962 #define MCX_CAP_DEVICE_UC 0x00000002 963 #define MCX_CAP_DEVICE_RC 0x00000001 964 965 uint8_t uar_flags; 966 #define MCX_CAP_DEVICE_UAR_4K 0x80 967 uint8_t uar_sz; /* 6 bits */ 968 #define MCX_CAP_DEVICE_UAR_SZ 0x3f 969 uint8_t __reserved__[1]; 970 uint8_t log_pg_sz; 971 972 uint8_t flags7; 973 #define MCX_CAP_DEVICE_BF 0x80 974 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40 975 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \ 976 0x20 977 uint8_t log_bf_reg_size; /* 5 bits */ 978 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f 979 uint8_t __reserved__[2]; 980 981 uint16_t num_of_diagnostic_counters; 982 uint16_t max_wqe_sz_sq; 983 984 uint8_t __reserved__[2]; 985 uint16_t max_wqe_sz_rq; 986 987 uint8_t __reserved__[2]; 988 uint16_t max_wqe_sz_sq_dc; 989 990 uint32_t max_qp_mcg; /* 25 bits */ 991 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff 992 993 uint8_t __reserved__[3]; 994 uint8_t log_max_mcq; 995 996 uint8_t log_max_transport_domain; /* 5 bits */ 997 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \ 998 0x1f 999 uint8_t log_max_pd; /* 5 bits */ 1000 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f 1001 uint8_t __reserved__[1]; 1002 uint8_t log_max_xrcd; /* 5 bits */ 1003 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f 1004 1005 uint8_t __reserved__[2]; 1006 uint16_t max_flow_counter; 1007 1008 uint8_t log_max_rq; /* 5 bits */ 1009 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f 1010 uint8_t log_max_sq; /* 5 bits */ 1011 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f 1012 uint8_t log_max_tir; /* 5 bits */ 1013 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f 1014 uint8_t log_max_tis; /* 5 bits */ 1015 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f 1016 1017 uint8_t flags8; 1018 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \ 1019 0x80 1020 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f 1021 uint8_t log_max_rqt; /* 5 bits */ 1022 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f 1023 uint8_t log_max_rqt_size; /* 5 bits */ 1024 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f 1025 uint8_t log_max_tis_per_sq; /* 5 bits */ 1026 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \ 1027 0x1f 1028 1029 uint8_t flags9; 1030 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \ 1031 0x80 1032 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \ 1033 0x1f 1034 uint8_t log_min_stride_sz_rq; /* 5 bits */ 1035 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \ 1036 0x1f 1037 uint8_t log_max_stride_sz_sq; /* 5 bits */ 1038 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \ 1039 0x1f 1040 uint8_t log_min_stride_sz_sq; /* 5 bits */ 1041 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \ 1042 0x1f 1043 1044 uint8_t log_max_hairpin_queues; 1045 #define MXC_CAP_DEVICE_HAIRPIN 0x80 1046 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \ 1047 0x1f 1048 uint8_t log_min_hairpin_queues; 1049 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \ 1050 0x1f 1051 uint8_t log_max_hairpin_num_packets; 1052 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \ 1053 0x1f 1054 uint8_t log_max_mq_sz; 1055 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \ 1056 0x1f 1057 1058 uint8_t log_min_hairpin_wq_data_sz; 1059 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \ 1060 0x80 1061 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \ 1062 0x40 1063 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \ 1064 0x20 1065 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \ 1066 0x1f 1067 uint8_t log_max_vlan_list; 1068 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \ 1069 0x80 1070 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \ 1071 0x1f 1072 uint8_t log_max_current_mc_list; 1073 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \ 1074 0x1f 1075 uint8_t log_max_current_uc_list; 1076 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \ 1077 0x1f 1078 1079 uint8_t __reserved__[4]; 1080 1081 uint32_t create_qp_start_hint; /* 24 bits */ 1082 1083 uint8_t log_max_uctx; /* 5 bits */ 1084 #define MXC_CAP_DEVICE_LOG_MAX_UCTX 0x1f 1085 uint8_t log_max_umem; /* 5 bits */ 1086 #define MXC_CAP_DEVICE_LOG_MAX_UMEM 0x1f 1087 uint16_t max_num_eqs; 1088 1089 uint8_t log_max_l2_table; /* 5 bits */ 1090 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE 0x1f 1091 uint8_t __reserved__[1]; 1092 uint16_t log_uar_page_sz; 1093 1094 uint8_t __reserved__[8]; 1095 1096 uint32_t device_frequency_mhz; 1097 uint32_t device_frequency_khz; 1098 } __packed __aligned(8); 1099 1100 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20); 1101 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c); 1102 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30); 1103 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38); 1104 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40); 1105 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c); 1106 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98); 1107 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c); 1108 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE); 1109 1110 struct mcx_cmd_set_driver_version_in { 1111 uint16_t cmd_opcode; 1112 uint8_t cmd_reserved0[4]; 1113 uint16_t cmd_op_mod; 1114 uint8_t cmd_reserved1[8]; 1115 } __packed __aligned(4); 1116 1117 struct mcx_cmd_set_driver_version_out { 1118 uint8_t cmd_status; 1119 uint8_t cmd_reserved0[3]; 1120 uint32_t cmd_syndrome; 1121 uint8_t cmd_reserved1[8]; 1122 } __packed __aligned(4); 1123 1124 struct mcx_cmd_set_driver_version { 1125 uint8_t cmd_driver_version[64]; 1126 } __packed __aligned(8); 1127 1128 struct mcx_cmd_modify_nic_vport_context_in { 1129 uint16_t cmd_opcode; 1130 uint8_t cmd_reserved0[4]; 1131 uint16_t cmd_op_mod; 1132 uint8_t cmd_reserved1[4]; 1133 uint32_t cmd_field_select; 1134 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04 1135 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10 1136 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40 1137 } __packed __aligned(4); 1138 1139 struct mcx_cmd_modify_nic_vport_context_out { 1140 uint8_t cmd_status; 1141 uint8_t cmd_reserved0[3]; 1142 uint32_t cmd_syndrome; 1143 uint8_t cmd_reserved1[8]; 1144 } __packed __aligned(4); 1145 1146 struct mcx_cmd_query_nic_vport_context_in { 1147 uint16_t cmd_opcode; 1148 uint8_t cmd_reserved0[4]; 1149 uint16_t cmd_op_mod; 1150 uint8_t cmd_reserved1[4]; 1151 uint8_t cmd_allowed_list_type; 1152 uint8_t cmd_reserved2[3]; 1153 } __packed __aligned(4); 1154 1155 struct mcx_cmd_query_nic_vport_context_out { 1156 uint8_t cmd_status; 1157 uint8_t cmd_reserved0[3]; 1158 uint32_t cmd_syndrome; 1159 uint8_t cmd_reserved1[8]; 1160 } __packed __aligned(4); 1161 1162 struct mcx_nic_vport_ctx { 1163 uint32_t vp_min_wqe_inline_mode; 1164 uint8_t vp_reserved0[32]; 1165 uint32_t vp_mtu; 1166 uint8_t vp_reserved1[200]; 1167 uint16_t vp_flags; 1168 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0) 1169 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24) 1170 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24) 1171 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13) 1172 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14) 1173 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15) 1174 uint16_t vp_allowed_list_size; 1175 uint64_t vp_perm_addr; 1176 uint8_t vp_reserved2[4]; 1177 /* allowed list follows */ 1178 } __packed __aligned(4); 1179 1180 struct mcx_counter { 1181 uint64_t packets; 1182 uint64_t octets; 1183 } __packed __aligned(4); 1184 1185 struct mcx_nic_vport_counters { 1186 struct mcx_counter rx_err; 1187 struct mcx_counter tx_err; 1188 uint8_t reserved0[64]; /* 0x30 */ 1189 struct mcx_counter rx_bcast; 1190 struct mcx_counter tx_bcast; 1191 struct mcx_counter rx_ucast; 1192 struct mcx_counter tx_ucast; 1193 struct mcx_counter rx_mcast; 1194 struct mcx_counter tx_mcast; 1195 uint8_t reserved1[0x210 - 0xd0]; 1196 } __packed __aligned(4); 1197 1198 struct mcx_cmd_query_vport_counters_in { 1199 uint16_t cmd_opcode; 1200 uint8_t cmd_reserved0[4]; 1201 uint16_t cmd_op_mod; 1202 uint8_t cmd_reserved1[8]; 1203 } __packed __aligned(4); 1204 1205 struct mcx_cmd_query_vport_counters_mb_in { 1206 uint8_t cmd_reserved0[8]; 1207 uint8_t cmd_clear; 1208 uint8_t cmd_reserved1[7]; 1209 } __packed __aligned(4); 1210 1211 struct mcx_cmd_query_vport_counters_out { 1212 uint8_t cmd_status; 1213 uint8_t cmd_reserved0[3]; 1214 uint32_t cmd_syndrome; 1215 uint8_t cmd_reserved1[8]; 1216 } __packed __aligned(4); 1217 1218 struct mcx_cmd_query_flow_counter_in { 1219 uint16_t cmd_opcode; 1220 uint8_t cmd_reserved0[4]; 1221 uint16_t cmd_op_mod; 1222 uint8_t cmd_reserved1[8]; 1223 } __packed __aligned(4); 1224 1225 struct mcx_cmd_query_flow_counter_mb_in { 1226 uint8_t cmd_reserved0[8]; 1227 uint8_t cmd_clear; 1228 uint8_t cmd_reserved1[5]; 1229 uint16_t cmd_flow_counter_id; 1230 } __packed __aligned(4); 1231 1232 struct mcx_cmd_query_flow_counter_out { 1233 uint8_t cmd_status; 1234 uint8_t cmd_reserved0[3]; 1235 uint32_t cmd_syndrome; 1236 uint8_t cmd_reserved1[8]; 1237 } __packed __aligned(4); 1238 1239 struct mcx_cmd_alloc_uar_in { 1240 uint16_t cmd_opcode; 1241 uint8_t cmd_reserved0[4]; 1242 uint16_t cmd_op_mod; 1243 uint8_t cmd_reserved1[8]; 1244 } __packed __aligned(4); 1245 1246 struct mcx_cmd_alloc_uar_out { 1247 uint8_t cmd_status; 1248 uint8_t cmd_reserved0[3]; 1249 uint32_t cmd_syndrome; 1250 uint32_t cmd_uar; 1251 uint8_t cmd_reserved1[4]; 1252 } __packed __aligned(4); 1253 1254 struct mcx_cmd_query_special_ctx_in { 1255 uint16_t cmd_opcode; 1256 uint8_t cmd_reserved0[4]; 1257 uint16_t cmd_op_mod; 1258 uint8_t cmd_reserved1[8]; 1259 } __packed __aligned(4); 1260 1261 struct mcx_cmd_query_special_ctx_out { 1262 uint8_t cmd_status; 1263 uint8_t cmd_reserved0[3]; 1264 uint32_t cmd_syndrome; 1265 uint8_t cmd_reserved1[4]; 1266 uint32_t cmd_resd_lkey; 1267 } __packed __aligned(4); 1268 1269 struct mcx_eq_ctx { 1270 uint32_t eq_status; 1271 #define MCX_EQ_CTX_STATE_SHIFT 8 1272 #define MCX_EQ_CTX_STATE_MASK (0xf << MCX_EQ_CTX_STATE_SHIFT) 1273 #define MCX_EQ_CTX_STATE_ARMED 0x9 1274 #define MCX_EQ_CTX_STATE_FIRED 0xa 1275 #define MCX_EQ_CTX_OI_SHIFT 17 1276 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT) 1277 #define MCX_EQ_CTX_EC_SHIFT 18 1278 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT) 1279 #define MCX_EQ_CTX_STATUS_SHIFT 28 1280 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT) 1281 #define MCX_EQ_CTX_STATUS_OK 0x0 1282 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa 1283 uint32_t eq_reserved1; 1284 uint32_t eq_page_offset; 1285 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5 1286 uint32_t eq_uar_size; 1287 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff 1288 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24 1289 uint32_t eq_reserved2; 1290 uint8_t eq_reserved3[3]; 1291 uint8_t eq_intr; 1292 uint32_t eq_log_page_size; 1293 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24 1294 uint32_t eq_reserved4[3]; 1295 uint32_t eq_consumer_counter; 1296 uint32_t eq_producer_counter; 1297 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff 1298 uint32_t eq_reserved5[4]; 1299 } __packed __aligned(4); 1300 1301 CTASSERT(sizeof(struct mcx_eq_ctx) == 64); 1302 1303 struct mcx_cmd_create_eq_in { 1304 uint16_t cmd_opcode; 1305 uint8_t cmd_reserved0[4]; 1306 uint16_t cmd_op_mod; 1307 uint8_t cmd_reserved1[8]; 1308 } __packed __aligned(4); 1309 1310 struct mcx_cmd_create_eq_mb_in { 1311 struct mcx_eq_ctx cmd_eq_ctx; 1312 uint8_t cmd_reserved0[8]; 1313 uint64_t cmd_event_bitmask; 1314 #define MCX_EVENT_TYPE_COMPLETION 0x00 1315 #define MCX_EVENT_TYPE_CQ_ERROR 0x04 1316 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08 1317 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09 1318 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a 1319 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b 1320 #define MCX_EVENT_TYPE_LAST_WQE 0x13 1321 uint8_t cmd_reserved1[176]; 1322 } __packed __aligned(4); 1323 1324 struct mcx_cmd_create_eq_out { 1325 uint8_t cmd_status; 1326 uint8_t cmd_reserved0[3]; 1327 uint32_t cmd_syndrome; 1328 uint32_t cmd_eqn; 1329 uint8_t cmd_reserved1[4]; 1330 } __packed __aligned(4); 1331 1332 struct mcx_cmd_query_eq_in { 1333 uint16_t cmd_opcode; 1334 uint8_t cmd_reserved0[4]; 1335 uint16_t cmd_op_mod; 1336 uint32_t cmd_eqn; 1337 uint8_t cmd_reserved1[4]; 1338 } __packed __aligned(4); 1339 1340 struct mcx_cmd_query_eq_out { 1341 uint8_t cmd_status; 1342 uint8_t cmd_reserved0[3]; 1343 uint32_t cmd_syndrome; 1344 uint8_t cmd_reserved1[8]; 1345 } __packed __aligned(4); 1346 1347 struct mcx_eq_entry { 1348 uint8_t eq_reserved1; 1349 uint8_t eq_event_type; 1350 uint8_t eq_reserved2; 1351 uint8_t eq_event_sub_type; 1352 1353 uint8_t eq_reserved3[28]; 1354 uint32_t eq_event_data[7]; 1355 uint8_t eq_reserved4[2]; 1356 uint8_t eq_signature; 1357 uint8_t eq_owner; 1358 #define MCX_EQ_ENTRY_OWNER_INIT 1 1359 } __packed __aligned(4); 1360 1361 CTASSERT(sizeof(struct mcx_eq_entry) == 64); 1362 1363 struct mcx_cmd_alloc_pd_in { 1364 uint16_t cmd_opcode; 1365 uint8_t cmd_reserved0[4]; 1366 uint16_t cmd_op_mod; 1367 uint8_t cmd_reserved1[8]; 1368 } __packed __aligned(4); 1369 1370 struct mcx_cmd_alloc_pd_out { 1371 uint8_t cmd_status; 1372 uint8_t cmd_reserved0[3]; 1373 uint32_t cmd_syndrome; 1374 uint32_t cmd_pd; 1375 uint8_t cmd_reserved1[4]; 1376 } __packed __aligned(4); 1377 1378 struct mcx_cmd_alloc_td_in { 1379 uint16_t cmd_opcode; 1380 uint8_t cmd_reserved0[4]; 1381 uint16_t cmd_op_mod; 1382 uint8_t cmd_reserved1[8]; 1383 } __packed __aligned(4); 1384 1385 struct mcx_cmd_alloc_td_out { 1386 uint8_t cmd_status; 1387 uint8_t cmd_reserved0[3]; 1388 uint32_t cmd_syndrome; 1389 uint32_t cmd_tdomain; 1390 uint8_t cmd_reserved1[4]; 1391 } __packed __aligned(4); 1392 1393 struct mcx_cmd_create_tir_in { 1394 uint16_t cmd_opcode; 1395 uint8_t cmd_reserved0[4]; 1396 uint16_t cmd_op_mod; 1397 uint8_t cmd_reserved1[8]; 1398 } __packed __aligned(4); 1399 1400 struct mcx_cmd_create_tir_mb_in { 1401 uint8_t cmd_reserved0[20]; 1402 uint32_t cmd_disp_type; 1403 #define MCX_TIR_CTX_DISP_TYPE_DIRECT 0 1404 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT 1 1405 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28 1406 uint8_t cmd_reserved1[8]; 1407 uint32_t cmd_lro; 1408 uint8_t cmd_reserved2[8]; 1409 uint32_t cmd_inline_rqn; 1410 uint32_t cmd_indir_table; 1411 uint32_t cmd_tdomain; 1412 #define MCX_TIR_CTX_HASH_TOEPLITZ 2 1413 #define MCX_TIR_CTX_HASH_SHIFT 28 1414 uint8_t cmd_rx_hash_key[40]; 1415 uint32_t cmd_rx_hash_sel_outer; 1416 #define MCX_TIR_CTX_HASH_SEL_SRC_IP (1 << 0) 1417 #define MCX_TIR_CTX_HASH_SEL_DST_IP (1 << 1) 1418 #define MCX_TIR_CTX_HASH_SEL_SPORT (1 << 2) 1419 #define MCX_TIR_CTX_HASH_SEL_DPORT (1 << 3) 1420 #define MCX_TIR_CTX_HASH_SEL_IPV4 (0 << 31) 1421 #define MCX_TIR_CTX_HASH_SEL_IPV6 (1 << 31) 1422 #define MCX_TIR_CTX_HASH_SEL_TCP (0 << 30) 1423 #define MCX_TIR_CTX_HASH_SEL_UDP (1 << 30) 1424 uint32_t cmd_rx_hash_sel_inner; 1425 uint8_t cmd_reserved3[152]; 1426 } __packed __aligned(4); 1427 1428 struct mcx_cmd_create_tir_out { 1429 uint8_t cmd_status; 1430 uint8_t cmd_reserved0[3]; 1431 uint32_t cmd_syndrome; 1432 uint32_t cmd_tirn; 1433 uint8_t cmd_reserved1[4]; 1434 } __packed __aligned(4); 1435 1436 struct mcx_cmd_destroy_tir_in { 1437 uint16_t cmd_opcode; 1438 uint8_t cmd_reserved0[4]; 1439 uint16_t cmd_op_mod; 1440 uint32_t cmd_tirn; 1441 uint8_t cmd_reserved1[4]; 1442 } __packed __aligned(4); 1443 1444 struct mcx_cmd_destroy_tir_out { 1445 uint8_t cmd_status; 1446 uint8_t cmd_reserved0[3]; 1447 uint32_t cmd_syndrome; 1448 uint8_t cmd_reserved1[8]; 1449 } __packed __aligned(4); 1450 1451 struct mcx_cmd_create_tis_in { 1452 uint16_t cmd_opcode; 1453 uint8_t cmd_reserved0[4]; 1454 uint16_t cmd_op_mod; 1455 uint8_t cmd_reserved1[8]; 1456 } __packed __aligned(4); 1457 1458 struct mcx_cmd_create_tis_mb_in { 1459 uint8_t cmd_reserved[16]; 1460 uint32_t cmd_prio; 1461 uint8_t cmd_reserved1[32]; 1462 uint32_t cmd_tdomain; 1463 uint8_t cmd_reserved2[120]; 1464 } __packed __aligned(4); 1465 1466 struct mcx_cmd_create_tis_out { 1467 uint8_t cmd_status; 1468 uint8_t cmd_reserved0[3]; 1469 uint32_t cmd_syndrome; 1470 uint32_t cmd_tisn; 1471 uint8_t cmd_reserved1[4]; 1472 } __packed __aligned(4); 1473 1474 struct mcx_cmd_destroy_tis_in { 1475 uint16_t cmd_opcode; 1476 uint8_t cmd_reserved0[4]; 1477 uint16_t cmd_op_mod; 1478 uint32_t cmd_tisn; 1479 uint8_t cmd_reserved1[4]; 1480 } __packed __aligned(4); 1481 1482 struct mcx_cmd_destroy_tis_out { 1483 uint8_t cmd_status; 1484 uint8_t cmd_reserved0[3]; 1485 uint32_t cmd_syndrome; 1486 uint8_t cmd_reserved1[8]; 1487 } __packed __aligned(4); 1488 1489 struct mcx_cmd_create_rqt_in { 1490 uint16_t cmd_opcode; 1491 uint8_t cmd_reserved0[4]; 1492 uint16_t cmd_op_mod; 1493 uint8_t cmd_reserved1[8]; 1494 } __packed __aligned(4); 1495 1496 struct mcx_rqt_ctx { 1497 uint8_t cmd_reserved0[20]; 1498 uint16_t cmd_reserved1; 1499 uint16_t cmd_rqt_max_size; 1500 uint16_t cmd_reserved2; 1501 uint16_t cmd_rqt_actual_size; 1502 uint8_t cmd_reserved3[212]; 1503 } __packed __aligned(4); 1504 1505 struct mcx_cmd_create_rqt_mb_in { 1506 uint8_t cmd_reserved0[16]; 1507 struct mcx_rqt_ctx cmd_rqt; 1508 } __packed __aligned(4); 1509 1510 struct mcx_cmd_create_rqt_out { 1511 uint8_t cmd_status; 1512 uint8_t cmd_reserved0[3]; 1513 uint32_t cmd_syndrome; 1514 uint32_t cmd_rqtn; 1515 uint8_t cmd_reserved1[4]; 1516 } __packed __aligned(4); 1517 1518 struct mcx_cmd_destroy_rqt_in { 1519 uint16_t cmd_opcode; 1520 uint8_t cmd_reserved0[4]; 1521 uint16_t cmd_op_mod; 1522 uint32_t cmd_rqtn; 1523 uint8_t cmd_reserved1[4]; 1524 } __packed __aligned(4); 1525 1526 struct mcx_cmd_destroy_rqt_out { 1527 uint8_t cmd_status; 1528 uint8_t cmd_reserved0[3]; 1529 uint32_t cmd_syndrome; 1530 uint8_t cmd_reserved1[8]; 1531 } __packed __aligned(4); 1532 1533 struct mcx_cq_ctx { 1534 uint32_t cq_status; 1535 #define MCX_CQ_CTX_STATUS_SHIFT 28 1536 #define MCX_CQ_CTX_STATUS_MASK (0xf << MCX_CQ_CTX_STATUS_SHIFT) 1537 #define MCX_CQ_CTX_STATUS_OK 0x0 1538 #define MCX_CQ_CTX_STATUS_OVERFLOW 0x9 1539 #define MCX_CQ_CTX_STATUS_WRITE_FAIL 0xa 1540 #define MCX_CQ_CTX_STATE_SHIFT 8 1541 #define MCX_CQ_CTX_STATE_MASK (0xf << MCX_CQ_CTX_STATE_SHIFT) 1542 #define MCX_CQ_CTX_STATE_SOLICITED 0x6 1543 #define MCX_CQ_CTX_STATE_ARMED 0x9 1544 #define MCX_CQ_CTX_STATE_FIRED 0xa 1545 uint32_t cq_reserved1; 1546 uint32_t cq_page_offset; 1547 uint32_t cq_uar_size; 1548 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff 1549 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24 1550 uint32_t cq_period_max_count; 1551 #define MCX_CQ_CTX_PERIOD_SHIFT 16 1552 uint32_t cq_eqn; 1553 uint32_t cq_log_page_size; 1554 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24 1555 uint32_t cq_reserved2; 1556 uint32_t cq_last_notified; 1557 uint32_t cq_last_solicit; 1558 uint32_t cq_consumer_counter; 1559 uint32_t cq_producer_counter; 1560 uint8_t cq_reserved3[8]; 1561 uint64_t cq_doorbell; 1562 } __packed __aligned(4); 1563 1564 CTASSERT(sizeof(struct mcx_cq_ctx) == 64); 1565 1566 struct mcx_cmd_create_cq_in { 1567 uint16_t cmd_opcode; 1568 uint8_t cmd_reserved0[4]; 1569 uint16_t cmd_op_mod; 1570 uint8_t cmd_reserved1[8]; 1571 } __packed __aligned(4); 1572 1573 struct mcx_cmd_create_cq_mb_in { 1574 struct mcx_cq_ctx cmd_cq_ctx; 1575 uint8_t cmd_reserved1[192]; 1576 } __packed __aligned(4); 1577 1578 struct mcx_cmd_create_cq_out { 1579 uint8_t cmd_status; 1580 uint8_t cmd_reserved0[3]; 1581 uint32_t cmd_syndrome; 1582 uint32_t cmd_cqn; 1583 uint8_t cmd_reserved1[4]; 1584 } __packed __aligned(4); 1585 1586 struct mcx_cmd_destroy_cq_in { 1587 uint16_t cmd_opcode; 1588 uint8_t cmd_reserved0[4]; 1589 uint16_t cmd_op_mod; 1590 uint32_t cmd_cqn; 1591 uint8_t cmd_reserved1[4]; 1592 } __packed __aligned(4); 1593 1594 struct mcx_cmd_destroy_cq_out { 1595 uint8_t cmd_status; 1596 uint8_t cmd_reserved0[3]; 1597 uint32_t cmd_syndrome; 1598 uint8_t cmd_reserved1[8]; 1599 } __packed __aligned(4); 1600 1601 struct mcx_cmd_query_cq_in { 1602 uint16_t cmd_opcode; 1603 uint8_t cmd_reserved0[4]; 1604 uint16_t cmd_op_mod; 1605 uint32_t cmd_cqn; 1606 uint8_t cmd_reserved1[4]; 1607 } __packed __aligned(4); 1608 1609 struct mcx_cmd_query_cq_out { 1610 uint8_t cmd_status; 1611 uint8_t cmd_reserved0[3]; 1612 uint32_t cmd_syndrome; 1613 uint8_t cmd_reserved1[8]; 1614 } __packed __aligned(4); 1615 1616 struct mcx_cq_entry { 1617 uint32_t __reserved__; 1618 uint32_t cq_lro; 1619 uint32_t cq_lro_ack_seq_num; 1620 uint32_t cq_rx_hash; 1621 uint8_t cq_rx_hash_type; 1622 uint8_t cq_ml_path; 1623 uint16_t __reserved__; 1624 uint32_t cq_checksum; 1625 uint32_t __reserved__; 1626 uint32_t cq_flags; 1627 #define MCX_CQ_ENTRY_FLAGS_L4_OK (1 << 26) 1628 #define MCX_CQ_ENTRY_FLAGS_L3_OK (1 << 25) 1629 #define MCX_CQ_ENTRY_FLAGS_L2_OK (1 << 24) 1630 #define MCX_CQ_ENTRY_FLAGS_CV (1 << 16) 1631 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK (0xffff) 1632 1633 uint32_t cq_lro_srqn; 1634 uint32_t __reserved__[2]; 1635 uint32_t cq_byte_cnt; 1636 uint64_t cq_timestamp; 1637 uint8_t cq_rx_drops; 1638 uint8_t cq_flow_tag[3]; 1639 uint16_t cq_wqe_count; 1640 uint8_t cq_signature; 1641 uint8_t cq_opcode_owner; 1642 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0) 1643 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1) 1644 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2 1645 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4 1646 1647 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0 1648 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1 1649 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2 1650 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3 1651 1652 #define MCX_CQ_ENTRY_OPCODE_REQ 0 1653 #define MCX_CQ_ENTRY_OPCODE_SEND 2 1654 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13 1655 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14 1656 #define MCX_CQ_ENTRY_OPCODE_INVALID 15 1657 1658 } __packed __aligned(4); 1659 1660 CTASSERT(sizeof(struct mcx_cq_entry) == 64); 1661 1662 struct mcx_cq_doorbell { 1663 uint32_t db_update_ci; 1664 uint32_t db_arm_ci; 1665 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28 1666 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24) 1667 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff) 1668 } __packed __aligned(8); 1669 1670 struct mcx_wq_ctx { 1671 uint8_t wq_type; 1672 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4) 1673 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3) 1674 uint8_t wq_reserved0[5]; 1675 uint16_t wq_lwm; 1676 uint32_t wq_pd; 1677 uint32_t wq_uar_page; 1678 uint64_t wq_doorbell; 1679 uint32_t wq_hw_counter; 1680 uint32_t wq_sw_counter; 1681 uint16_t wq_log_stride; 1682 uint8_t wq_log_page_sz; 1683 uint8_t wq_log_size; 1684 uint8_t wq_reserved1[156]; 1685 } __packed __aligned(4); 1686 1687 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0); 1688 1689 struct mcx_sq_ctx { 1690 uint32_t sq_flags; 1691 #define MCX_SQ_CTX_RLKEY (1U << 31) 1692 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29) 1693 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28) 1694 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24 1695 #define MCX_SQ_CTX_STATE_SHIFT 20 1696 #define MCX_SQ_CTX_STATE_MASK (0xf << 20) 1697 #define MCX_SQ_CTX_STATE_RST 0 1698 #define MCX_SQ_CTX_STATE_RDY 1 1699 #define MCX_SQ_CTX_STATE_ERR 3 1700 uint32_t sq_user_index; 1701 uint32_t sq_cqn; 1702 uint32_t sq_reserved1[5]; 1703 uint32_t sq_tis_lst_sz; 1704 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16 1705 uint32_t sq_reserved2[2]; 1706 uint32_t sq_tis_num; 1707 struct mcx_wq_ctx sq_wq; 1708 } __packed __aligned(4); 1709 1710 struct mcx_sq_entry_seg { 1711 uint32_t sqs_byte_count; 1712 uint32_t sqs_lkey; 1713 uint64_t sqs_addr; 1714 } __packed __aligned(4); 1715 1716 struct mcx_sq_entry { 1717 /* control segment */ 1718 uint32_t sqe_opcode_index; 1719 #define MCX_SQE_WQE_INDEX_SHIFT 8 1720 #define MCX_SQE_WQE_OPCODE_NOP 0x00 1721 #define MCX_SQE_WQE_OPCODE_SEND 0x0a 1722 uint32_t sqe_ds_sq_num; 1723 #define MCX_SQE_SQ_NUM_SHIFT 8 1724 uint32_t sqe_signature; 1725 #define MCX_SQE_SIGNATURE_SHIFT 24 1726 #define MCX_SQE_SOLICITED_EVENT 0x02 1727 #define MCX_SQE_CE_CQE_ON_ERR 0x00 1728 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04 1729 #define MCX_SQE_CE_CQE_ALWAYS 0x08 1730 #define MCX_SQE_CE_CQE_SOLICIT 0x0C 1731 #define MCX_SQE_FM_NO_FENCE 0x00 1732 #define MCX_SQE_FM_SMALL_FENCE 0x40 1733 uint32_t sqe_mkey; 1734 1735 /* ethernet segment */ 1736 uint32_t sqe_reserved1; 1737 uint32_t sqe_mss_csum; 1738 #define MCX_SQE_L4_CSUM (1 << 31) 1739 #define MCX_SQE_L3_CSUM (1 << 30) 1740 uint32_t sqe_reserved2; 1741 uint16_t sqe_inline_header_size; 1742 uint16_t sqe_inline_headers[9]; 1743 1744 /* data segment */ 1745 struct mcx_sq_entry_seg sqe_segs[1]; 1746 } __packed __aligned(64); 1747 1748 CTASSERT(sizeof(struct mcx_sq_entry) == 64); 1749 1750 struct mcx_cmd_create_sq_in { 1751 uint16_t cmd_opcode; 1752 uint8_t cmd_reserved0[4]; 1753 uint16_t cmd_op_mod; 1754 uint8_t cmd_reserved1[8]; 1755 } __packed __aligned(4); 1756 1757 struct mcx_cmd_create_sq_out { 1758 uint8_t cmd_status; 1759 uint8_t cmd_reserved0[3]; 1760 uint32_t cmd_syndrome; 1761 uint32_t cmd_sqn; 1762 uint8_t cmd_reserved1[4]; 1763 } __packed __aligned(4); 1764 1765 struct mcx_cmd_modify_sq_in { 1766 uint16_t cmd_opcode; 1767 uint8_t cmd_reserved0[4]; 1768 uint16_t cmd_op_mod; 1769 uint32_t cmd_sq_state; 1770 uint8_t cmd_reserved1[4]; 1771 } __packed __aligned(4); 1772 1773 struct mcx_cmd_modify_sq_mb_in { 1774 uint32_t cmd_modify_hi; 1775 uint32_t cmd_modify_lo; 1776 uint8_t cmd_reserved0[8]; 1777 struct mcx_sq_ctx cmd_sq_ctx; 1778 } __packed __aligned(4); 1779 1780 struct mcx_cmd_modify_sq_out { 1781 uint8_t cmd_status; 1782 uint8_t cmd_reserved0[3]; 1783 uint32_t cmd_syndrome; 1784 uint8_t cmd_reserved1[8]; 1785 } __packed __aligned(4); 1786 1787 struct mcx_cmd_destroy_sq_in { 1788 uint16_t cmd_opcode; 1789 uint8_t cmd_reserved0[4]; 1790 uint16_t cmd_op_mod; 1791 uint32_t cmd_sqn; 1792 uint8_t cmd_reserved1[4]; 1793 } __packed __aligned(4); 1794 1795 struct mcx_cmd_destroy_sq_out { 1796 uint8_t cmd_status; 1797 uint8_t cmd_reserved0[3]; 1798 uint32_t cmd_syndrome; 1799 uint8_t cmd_reserved1[8]; 1800 } __packed __aligned(4); 1801 1802 1803 struct mcx_rq_ctx { 1804 uint32_t rq_flags; 1805 #define MCX_RQ_CTX_RLKEY (1U << 31) 1806 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28) 1807 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24 1808 #define MCX_RQ_CTX_STATE_SHIFT 20 1809 #define MCX_RQ_CTX_STATE_MASK (0xf << 20) 1810 #define MCX_RQ_CTX_STATE_RST 0 1811 #define MCX_RQ_CTX_STATE_RDY 1 1812 #define MCX_RQ_CTX_STATE_ERR 3 1813 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18) 1814 uint32_t rq_user_index; 1815 uint32_t rq_cqn; 1816 uint32_t rq_reserved1; 1817 uint32_t rq_rmpn; 1818 uint32_t rq_reserved2[7]; 1819 struct mcx_wq_ctx rq_wq; 1820 } __packed __aligned(4); 1821 1822 struct mcx_rq_entry { 1823 uint32_t rqe_byte_count; 1824 uint32_t rqe_lkey; 1825 uint64_t rqe_addr; 1826 } __packed __aligned(16); 1827 1828 struct mcx_cmd_create_rq_in { 1829 uint16_t cmd_opcode; 1830 uint8_t cmd_reserved0[4]; 1831 uint16_t cmd_op_mod; 1832 uint8_t cmd_reserved1[8]; 1833 } __packed __aligned(4); 1834 1835 struct mcx_cmd_create_rq_out { 1836 uint8_t cmd_status; 1837 uint8_t cmd_reserved0[3]; 1838 uint32_t cmd_syndrome; 1839 uint32_t cmd_rqn; 1840 uint8_t cmd_reserved1[4]; 1841 } __packed __aligned(4); 1842 1843 struct mcx_cmd_modify_rq_in { 1844 uint16_t cmd_opcode; 1845 uint8_t cmd_reserved0[4]; 1846 uint16_t cmd_op_mod; 1847 uint32_t cmd_rq_state; 1848 uint8_t cmd_reserved1[4]; 1849 } __packed __aligned(4); 1850 1851 struct mcx_cmd_modify_rq_mb_in { 1852 uint32_t cmd_modify_hi; 1853 uint32_t cmd_modify_lo; 1854 uint8_t cmd_reserved0[8]; 1855 struct mcx_rq_ctx cmd_rq_ctx; 1856 } __packed __aligned(4); 1857 1858 struct mcx_cmd_modify_rq_out { 1859 uint8_t cmd_status; 1860 uint8_t cmd_reserved0[3]; 1861 uint32_t cmd_syndrome; 1862 uint8_t cmd_reserved1[8]; 1863 } __packed __aligned(4); 1864 1865 struct mcx_cmd_destroy_rq_in { 1866 uint16_t cmd_opcode; 1867 uint8_t cmd_reserved0[4]; 1868 uint16_t cmd_op_mod; 1869 uint32_t cmd_rqn; 1870 uint8_t cmd_reserved1[4]; 1871 } __packed __aligned(4); 1872 1873 struct mcx_cmd_destroy_rq_out { 1874 uint8_t cmd_status; 1875 uint8_t cmd_reserved0[3]; 1876 uint32_t cmd_syndrome; 1877 uint8_t cmd_reserved1[8]; 1878 } __packed __aligned(4); 1879 1880 struct mcx_cmd_create_flow_table_in { 1881 uint16_t cmd_opcode; 1882 uint8_t cmd_reserved0[4]; 1883 uint16_t cmd_op_mod; 1884 uint8_t cmd_reserved1[8]; 1885 } __packed __aligned(4); 1886 1887 struct mcx_flow_table_ctx { 1888 uint8_t ft_miss_action; 1889 uint8_t ft_level; 1890 uint8_t ft_reserved0; 1891 uint8_t ft_log_size; 1892 uint32_t ft_table_miss_id; 1893 uint8_t ft_reserved1[28]; 1894 } __packed __aligned(4); 1895 1896 struct mcx_cmd_create_flow_table_mb_in { 1897 uint8_t cmd_table_type; 1898 uint8_t cmd_reserved0[7]; 1899 struct mcx_flow_table_ctx cmd_ctx; 1900 } __packed __aligned(4); 1901 1902 struct mcx_cmd_create_flow_table_out { 1903 uint8_t cmd_status; 1904 uint8_t cmd_reserved0[3]; 1905 uint32_t cmd_syndrome; 1906 uint32_t cmd_table_id; 1907 uint8_t cmd_reserved1[4]; 1908 } __packed __aligned(4); 1909 1910 struct mcx_cmd_destroy_flow_table_in { 1911 uint16_t cmd_opcode; 1912 uint8_t cmd_reserved0[4]; 1913 uint16_t cmd_op_mod; 1914 uint8_t cmd_reserved1[8]; 1915 } __packed __aligned(4); 1916 1917 struct mcx_cmd_destroy_flow_table_mb_in { 1918 uint8_t cmd_table_type; 1919 uint8_t cmd_reserved0[3]; 1920 uint32_t cmd_table_id; 1921 uint8_t cmd_reserved1[40]; 1922 } __packed __aligned(4); 1923 1924 struct mcx_cmd_destroy_flow_table_out { 1925 uint8_t cmd_status; 1926 uint8_t cmd_reserved0[3]; 1927 uint32_t cmd_syndrome; 1928 uint8_t cmd_reserved1[8]; 1929 } __packed __aligned(4); 1930 1931 struct mcx_cmd_set_flow_table_root_in { 1932 uint16_t cmd_opcode; 1933 uint8_t cmd_reserved0[4]; 1934 uint16_t cmd_op_mod; 1935 uint8_t cmd_reserved1[8]; 1936 } __packed __aligned(4); 1937 1938 struct mcx_cmd_set_flow_table_root_mb_in { 1939 uint8_t cmd_table_type; 1940 uint8_t cmd_reserved0[3]; 1941 uint32_t cmd_table_id; 1942 uint8_t cmd_reserved1[56]; 1943 } __packed __aligned(4); 1944 1945 struct mcx_cmd_set_flow_table_root_out { 1946 uint8_t cmd_status; 1947 uint8_t cmd_reserved0[3]; 1948 uint32_t cmd_syndrome; 1949 uint8_t cmd_reserved1[8]; 1950 } __packed __aligned(4); 1951 1952 struct mcx_flow_match { 1953 /* outer headers */ 1954 uint8_t mc_src_mac[6]; 1955 uint16_t mc_ethertype; 1956 uint8_t mc_dest_mac[6]; 1957 uint16_t mc_first_vlan; 1958 uint8_t mc_ip_proto; 1959 uint8_t mc_ip_dscp_ecn; 1960 uint8_t mc_vlan_flags; 1961 #define MCX_FLOW_MATCH_IP_FRAG (1 << 5) 1962 uint8_t mc_tcp_flags; 1963 uint16_t mc_tcp_sport; 1964 uint16_t mc_tcp_dport; 1965 uint32_t mc_reserved0; 1966 uint16_t mc_udp_sport; 1967 uint16_t mc_udp_dport; 1968 uint8_t mc_src_ip[16]; 1969 uint8_t mc_dest_ip[16]; 1970 1971 /* misc parameters */ 1972 uint8_t mc_reserved1[8]; 1973 uint16_t mc_second_vlan; 1974 uint8_t mc_reserved2[2]; 1975 uint8_t mc_second_vlan_flags; 1976 uint8_t mc_reserved3[15]; 1977 uint32_t mc_outer_ipv6_flow_label; 1978 uint8_t mc_reserved4[32]; 1979 1980 uint8_t mc_reserved[384]; 1981 } __packed __aligned(4); 1982 1983 CTASSERT(sizeof(struct mcx_flow_match) == 512); 1984 1985 struct mcx_cmd_create_flow_group_in { 1986 uint16_t cmd_opcode; 1987 uint8_t cmd_reserved0[4]; 1988 uint16_t cmd_op_mod; 1989 uint8_t cmd_reserved1[8]; 1990 } __packed __aligned(4); 1991 1992 struct mcx_cmd_create_flow_group_mb_in { 1993 uint8_t cmd_table_type; 1994 uint8_t cmd_reserved0[3]; 1995 uint32_t cmd_table_id; 1996 uint8_t cmd_reserved1[4]; 1997 uint32_t cmd_start_flow_index; 1998 uint8_t cmd_reserved2[4]; 1999 uint32_t cmd_end_flow_index; 2000 uint8_t cmd_reserved3[23]; 2001 uint8_t cmd_match_criteria_enable; 2002 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0) 2003 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1) 2004 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2) 2005 struct mcx_flow_match cmd_match_criteria; 2006 uint8_t cmd_reserved4[448]; 2007 } __packed __aligned(4); 2008 2009 struct mcx_cmd_create_flow_group_out { 2010 uint8_t cmd_status; 2011 uint8_t cmd_reserved0[3]; 2012 uint32_t cmd_syndrome; 2013 uint32_t cmd_group_id; 2014 uint8_t cmd_reserved1[4]; 2015 } __packed __aligned(4); 2016 2017 struct mcx_flow_ctx { 2018 uint8_t fc_reserved0[4]; 2019 uint32_t fc_group_id; 2020 uint32_t fc_flow_tag; 2021 uint32_t fc_action; 2022 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0) 2023 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1) 2024 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2) 2025 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3) 2026 uint32_t fc_dest_list_size; 2027 uint32_t fc_counter_list_size; 2028 uint8_t fc_reserved1[40]; 2029 struct mcx_flow_match fc_match_value; 2030 uint8_t fc_reserved2[192]; 2031 } __packed __aligned(4); 2032 2033 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24) 2034 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24) 2035 2036 struct mcx_cmd_destroy_flow_group_in { 2037 uint16_t cmd_opcode; 2038 uint8_t cmd_reserved0[4]; 2039 uint16_t cmd_op_mod; 2040 uint8_t cmd_reserved1[8]; 2041 } __packed __aligned(4); 2042 2043 struct mcx_cmd_destroy_flow_group_mb_in { 2044 uint8_t cmd_table_type; 2045 uint8_t cmd_reserved0[3]; 2046 uint32_t cmd_table_id; 2047 uint32_t cmd_group_id; 2048 uint8_t cmd_reserved1[36]; 2049 } __packed __aligned(4); 2050 2051 struct mcx_cmd_destroy_flow_group_out { 2052 uint8_t cmd_status; 2053 uint8_t cmd_reserved0[3]; 2054 uint32_t cmd_syndrome; 2055 uint8_t cmd_reserved1[8]; 2056 } __packed __aligned(4); 2057 2058 struct mcx_cmd_set_flow_table_entry_in { 2059 uint16_t cmd_opcode; 2060 uint8_t cmd_reserved0[4]; 2061 uint16_t cmd_op_mod; 2062 uint8_t cmd_reserved1[8]; 2063 } __packed __aligned(4); 2064 2065 struct mcx_cmd_set_flow_table_entry_mb_in { 2066 uint8_t cmd_table_type; 2067 uint8_t cmd_reserved0[3]; 2068 uint32_t cmd_table_id; 2069 uint32_t cmd_modify_enable_mask; 2070 uint8_t cmd_reserved1[4]; 2071 uint32_t cmd_flow_index; 2072 uint8_t cmd_reserved2[28]; 2073 struct mcx_flow_ctx cmd_flow_ctx; 2074 } __packed __aligned(4); 2075 2076 struct mcx_cmd_set_flow_table_entry_out { 2077 uint8_t cmd_status; 2078 uint8_t cmd_reserved0[3]; 2079 uint32_t cmd_syndrome; 2080 uint8_t cmd_reserved1[8]; 2081 } __packed __aligned(4); 2082 2083 struct mcx_cmd_query_flow_table_entry_in { 2084 uint16_t cmd_opcode; 2085 uint8_t cmd_reserved0[4]; 2086 uint16_t cmd_op_mod; 2087 uint8_t cmd_reserved1[8]; 2088 } __packed __aligned(4); 2089 2090 struct mcx_cmd_query_flow_table_entry_mb_in { 2091 uint8_t cmd_table_type; 2092 uint8_t cmd_reserved0[3]; 2093 uint32_t cmd_table_id; 2094 uint8_t cmd_reserved1[8]; 2095 uint32_t cmd_flow_index; 2096 uint8_t cmd_reserved2[28]; 2097 } __packed __aligned(4); 2098 2099 struct mcx_cmd_query_flow_table_entry_out { 2100 uint8_t cmd_status; 2101 uint8_t cmd_reserved0[3]; 2102 uint32_t cmd_syndrome; 2103 uint8_t cmd_reserved1[8]; 2104 } __packed __aligned(4); 2105 2106 struct mcx_cmd_query_flow_table_entry_mb_out { 2107 uint8_t cmd_reserved0[48]; 2108 struct mcx_flow_ctx cmd_flow_ctx; 2109 } __packed __aligned(4); 2110 2111 struct mcx_cmd_delete_flow_table_entry_in { 2112 uint16_t cmd_opcode; 2113 uint8_t cmd_reserved0[4]; 2114 uint16_t cmd_op_mod; 2115 uint8_t cmd_reserved1[8]; 2116 } __packed __aligned(4); 2117 2118 struct mcx_cmd_delete_flow_table_entry_mb_in { 2119 uint8_t cmd_table_type; 2120 uint8_t cmd_reserved0[3]; 2121 uint32_t cmd_table_id; 2122 uint8_t cmd_reserved1[8]; 2123 uint32_t cmd_flow_index; 2124 uint8_t cmd_reserved2[28]; 2125 } __packed __aligned(4); 2126 2127 struct mcx_cmd_delete_flow_table_entry_out { 2128 uint8_t cmd_status; 2129 uint8_t cmd_reserved0[3]; 2130 uint32_t cmd_syndrome; 2131 uint8_t cmd_reserved1[8]; 2132 } __packed __aligned(4); 2133 2134 struct mcx_cmd_query_flow_group_in { 2135 uint16_t cmd_opcode; 2136 uint8_t cmd_reserved0[4]; 2137 uint16_t cmd_op_mod; 2138 uint8_t cmd_reserved1[8]; 2139 } __packed __aligned(4); 2140 2141 struct mcx_cmd_query_flow_group_mb_in { 2142 uint8_t cmd_table_type; 2143 uint8_t cmd_reserved0[3]; 2144 uint32_t cmd_table_id; 2145 uint32_t cmd_group_id; 2146 uint8_t cmd_reserved1[36]; 2147 } __packed __aligned(4); 2148 2149 struct mcx_cmd_query_flow_group_out { 2150 uint8_t cmd_status; 2151 uint8_t cmd_reserved0[3]; 2152 uint32_t cmd_syndrome; 2153 uint8_t cmd_reserved1[8]; 2154 } __packed __aligned(4); 2155 2156 struct mcx_cmd_query_flow_group_mb_out { 2157 uint8_t cmd_reserved0[12]; 2158 uint32_t cmd_start_flow_index; 2159 uint8_t cmd_reserved1[4]; 2160 uint32_t cmd_end_flow_index; 2161 uint8_t cmd_reserved2[20]; 2162 uint32_t cmd_match_criteria_enable; 2163 uint8_t cmd_match_criteria[512]; 2164 uint8_t cmd_reserved4[448]; 2165 } __packed __aligned(4); 2166 2167 struct mcx_cmd_query_flow_table_in { 2168 uint16_t cmd_opcode; 2169 uint8_t cmd_reserved0[4]; 2170 uint16_t cmd_op_mod; 2171 uint8_t cmd_reserved1[8]; 2172 } __packed __aligned(4); 2173 2174 struct mcx_cmd_query_flow_table_mb_in { 2175 uint8_t cmd_table_type; 2176 uint8_t cmd_reserved0[3]; 2177 uint32_t cmd_table_id; 2178 uint8_t cmd_reserved1[40]; 2179 } __packed __aligned(4); 2180 2181 struct mcx_cmd_query_flow_table_out { 2182 uint8_t cmd_status; 2183 uint8_t cmd_reserved0[3]; 2184 uint32_t cmd_syndrome; 2185 uint8_t cmd_reserved1[8]; 2186 } __packed __aligned(4); 2187 2188 struct mcx_cmd_query_flow_table_mb_out { 2189 uint8_t cmd_reserved0[4]; 2190 struct mcx_flow_table_ctx cmd_ctx; 2191 } __packed __aligned(4); 2192 2193 struct mcx_cmd_alloc_flow_counter_in { 2194 uint16_t cmd_opcode; 2195 uint8_t cmd_reserved0[4]; 2196 uint16_t cmd_op_mod; 2197 uint8_t cmd_reserved1[8]; 2198 } __packed __aligned(4); 2199 2200 struct mcx_cmd_query_rq_in { 2201 uint16_t cmd_opcode; 2202 uint8_t cmd_reserved0[4]; 2203 uint16_t cmd_op_mod; 2204 uint32_t cmd_rqn; 2205 uint8_t cmd_reserved1[4]; 2206 } __packed __aligned(4); 2207 2208 struct mcx_cmd_query_rq_out { 2209 uint8_t cmd_status; 2210 uint8_t cmd_reserved0[3]; 2211 uint32_t cmd_syndrome; 2212 uint8_t cmd_reserved1[8]; 2213 } __packed __aligned(4); 2214 2215 struct mcx_cmd_query_rq_mb_out { 2216 uint8_t cmd_reserved0[16]; 2217 struct mcx_rq_ctx cmd_ctx; 2218 }; 2219 2220 struct mcx_cmd_query_sq_in { 2221 uint16_t cmd_opcode; 2222 uint8_t cmd_reserved0[4]; 2223 uint16_t cmd_op_mod; 2224 uint32_t cmd_sqn; 2225 uint8_t cmd_reserved1[4]; 2226 } __packed __aligned(4); 2227 2228 struct mcx_cmd_query_sq_out { 2229 uint8_t cmd_status; 2230 uint8_t cmd_reserved0[3]; 2231 uint32_t cmd_syndrome; 2232 uint8_t cmd_reserved1[8]; 2233 } __packed __aligned(4); 2234 2235 struct mcx_cmd_query_sq_mb_out { 2236 uint8_t cmd_reserved0[16]; 2237 struct mcx_sq_ctx cmd_ctx; 2238 }; 2239 2240 struct mcx_cmd_alloc_flow_counter_out { 2241 uint8_t cmd_status; 2242 uint8_t cmd_reserved0[3]; 2243 uint32_t cmd_syndrome; 2244 uint8_t cmd_reserved1[2]; 2245 uint16_t cmd_flow_counter_id; 2246 uint8_t cmd_reserved2[4]; 2247 } __packed __aligned(4); 2248 2249 struct mcx_wq_doorbell { 2250 uint32_t db_recv_counter; 2251 uint32_t db_send_counter; 2252 } __packed __aligned(8); 2253 2254 struct mcx_dmamem { 2255 bus_dmamap_t mxm_map; 2256 bus_dma_segment_t mxm_seg; 2257 int mxm_nsegs; 2258 size_t mxm_size; 2259 void *mxm_kva; 2260 }; 2261 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map) 2262 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr) 2263 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva) 2264 #define MCX_DMA_OFF(_mxm, _off) ((void *)((char *)(_mxm)->mxm_kva + (_off))) 2265 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size) 2266 2267 struct mcx_hwmem { 2268 bus_dmamap_t mhm_map; 2269 bus_dma_segment_t *mhm_segs; 2270 unsigned int mhm_seg_count; 2271 unsigned int mhm_npages; 2272 }; 2273 2274 struct mcx_slot { 2275 bus_dmamap_t ms_map; 2276 struct mbuf *ms_m; 2277 }; 2278 2279 struct mcx_eq { 2280 int eq_n; 2281 uint32_t eq_cons; 2282 struct mcx_dmamem eq_mem; 2283 }; 2284 2285 struct mcx_cq { 2286 int cq_n; 2287 struct mcx_dmamem cq_mem; 2288 bus_addr_t cq_doorbell; 2289 uint32_t cq_cons; 2290 uint32_t cq_count; 2291 }; 2292 2293 struct mcx_calibration { 2294 uint64_t c_timestamp; /* previous mcx chip time */ 2295 uint64_t c_uptime; /* previous kernel nanouptime */ 2296 uint64_t c_tbase; /* mcx chip time */ 2297 uint64_t c_ubase; /* kernel nanouptime */ 2298 uint64_t c_ratio; 2299 }; 2300 2301 #define MCX_CALIBRATE_FIRST 2 2302 #define MCX_CALIBRATE_NORMAL 32 2303 2304 struct mcx_rxring { 2305 u_int rxr_total; 2306 u_int rxr_inuse; 2307 }; 2308 2309 MBUFQ_HEAD(mcx_mbufq); 2310 2311 struct mcx_rx { 2312 struct mcx_softc *rx_softc; 2313 2314 int rx_rqn; 2315 struct mcx_dmamem rx_rq_mem; 2316 struct mcx_slot *rx_slots; 2317 bus_addr_t rx_doorbell; 2318 2319 uint32_t rx_prod; 2320 callout_t rx_refill; 2321 struct mcx_rxring rx_rxr; 2322 } __aligned(64); 2323 2324 struct mcx_tx { 2325 struct mcx_softc *tx_softc; 2326 kmutex_t tx_lock; 2327 pcq_t *tx_pcq; 2328 void *tx_softint; 2329 2330 int tx_uar; 2331 int tx_sqn; 2332 struct mcx_dmamem tx_sq_mem; 2333 struct mcx_slot *tx_slots; 2334 bus_addr_t tx_doorbell; 2335 int tx_bf_offset; 2336 2337 uint32_t tx_cons; 2338 uint32_t tx_prod; 2339 } __aligned(64); 2340 2341 struct mcx_queues { 2342 void *q_ihc; 2343 struct mcx_softc *q_sc; 2344 int q_uar; 2345 int q_index; 2346 struct mcx_rx q_rx; 2347 struct mcx_tx q_tx; 2348 struct mcx_cq q_cq; 2349 struct mcx_eq q_eq; 2350 #if NKSTAT > 0 2351 struct kstat *q_kstat; 2352 #endif 2353 }; 2354 2355 struct mcx_flow_group { 2356 int g_id; 2357 int g_table; 2358 int g_start; 2359 int g_size; 2360 }; 2361 2362 #define MCX_FLOW_GROUP_PROMISC 0 2363 #define MCX_FLOW_GROUP_ALLMULTI 1 2364 #define MCX_FLOW_GROUP_MAC 2 2365 #define MCX_FLOW_GROUP_RSS_L4 3 2366 #define MCX_FLOW_GROUP_RSS_L3 4 2367 #define MCX_FLOW_GROUP_RSS_NONE 5 2368 #define MCX_NUM_FLOW_GROUPS 6 2369 2370 #define MCX_HASH_SEL_L3 MCX_TIR_CTX_HASH_SEL_SRC_IP | \ 2371 MCX_TIR_CTX_HASH_SEL_DST_IP 2372 #define MCX_HASH_SEL_L4 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \ 2373 MCX_TIR_CTX_HASH_SEL_DPORT 2374 2375 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP |\ 2376 MCX_TIR_CTX_HASH_SEL_IPV4 2377 #define MCX_RSS_HASH_SEL_V6_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \ 2378 MCX_TIR_CTX_HASH_SEL_IPV6 2379 #define MCX_RSS_HASH_SEL_V4_UDP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \ 2380 MCX_TIR_CTX_HASH_SEL_IPV4 2381 #define MCX_RSS_HASH_SEL_V6_UDP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \ 2382 MCX_TIR_CTX_HASH_SEL_IPV6 2383 #define MCX_RSS_HASH_SEL_V4 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4 2384 #define MCX_RSS_HASH_SEL_V6 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6 2385 2386 /* 2387 * There are a few different pieces involved in configuring RSS. 2388 * A Receive Queue Table (RQT) is the indirection table that maps packets to 2389 * different rx queues based on a hash value. We only create one, because 2390 * we want to scatter any traffic we can apply RSS to across all our rx 2391 * queues. Anything else will only be delivered to the first rx queue, 2392 * which doesn't require an RQT. 2393 * 2394 * A Transport Interface Receive (TIR) delivers packets to either a single rx 2395 * queue or an RQT, and in the latter case, specifies the set of fields 2396 * hashed, the hash function, and the hash key. We need one of these for each 2397 * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6, 2398 * and one for non-RSS traffic. 2399 * 2400 * Flow tables hold flow table entries in sequence. The first entry that 2401 * matches a packet is applied, sending the packet to either another flow 2402 * table or a TIR. We use one flow table to select packets based on 2403 * destination MAC address, and a second to apply RSS. The entries in the 2404 * first table send matching packets to the second, and the entries in the 2405 * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR. 2406 * 2407 * The flow table entry that delivers packets to an RSS TIR must include match 2408 * criteria that ensure packets delivered to the TIR include all the fields 2409 * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must 2410 * only accept v4 TCP packets. Accordingly, we need flow table entries for 2411 * each TIR. 2412 * 2413 * All of this is a lot more flexible than we need, and we can describe most 2414 * of the stuff we need with a simple array. 2415 * 2416 * An RSS config creates a TIR with hashing enabled on a set of fields, 2417 * pointing to either the first rx queue or the RQT containing all the rx 2418 * queues, and a flow table entry that matches on an ether type and 2419 * optionally an ip proto, that delivers packets to the TIR. 2420 */ 2421 static struct mcx_rss_rule { 2422 int hash_sel; 2423 int flow_group; 2424 int ethertype; 2425 int ip_proto; 2426 } mcx_rss_config[] = { 2427 /* udp and tcp for v4/v6 */ 2428 { MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4, 2429 ETHERTYPE_IP, IPPROTO_TCP }, 2430 { MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4, 2431 ETHERTYPE_IPV6, IPPROTO_TCP }, 2432 { MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4, 2433 ETHERTYPE_IP, IPPROTO_UDP }, 2434 { MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4, 2435 ETHERTYPE_IPV6, IPPROTO_UDP }, 2436 2437 /* other v4/v6 */ 2438 { MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3, 2439 ETHERTYPE_IP, 0 }, 2440 { MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3, 2441 ETHERTYPE_IPV6, 0 }, 2442 2443 /* non v4/v6 */ 2444 { 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 } 2445 }; 2446 2447 struct mcx_softc { 2448 device_t sc_dev; 2449 struct ethercom sc_ec; 2450 struct ifmedia sc_media; 2451 uint64_t sc_media_status; 2452 uint64_t sc_media_active; 2453 kmutex_t sc_media_mutex; 2454 2455 pci_chipset_tag_t sc_pc; 2456 pci_intr_handle_t *sc_intrs; 2457 void *sc_ihc; 2458 pcitag_t sc_tag; 2459 2460 bus_dma_tag_t sc_dmat; 2461 bus_space_tag_t sc_memt; 2462 bus_space_handle_t sc_memh; 2463 bus_size_t sc_mems; 2464 2465 struct mcx_dmamem sc_cmdq_mem; 2466 unsigned int sc_cmdq_mask; 2467 unsigned int sc_cmdq_size; 2468 2469 unsigned int sc_cmdq_token; 2470 2471 struct mcx_hwmem sc_boot_pages; 2472 struct mcx_hwmem sc_init_pages; 2473 struct mcx_hwmem sc_regular_pages; 2474 2475 int sc_uar; 2476 int sc_pd; 2477 int sc_tdomain; 2478 uint32_t sc_lkey; 2479 int sc_tis; 2480 int sc_tir[__arraycount(mcx_rss_config)]; 2481 int sc_rqt; 2482 2483 struct mcx_dmamem sc_doorbell_mem; 2484 2485 struct mcx_eq sc_admin_eq; 2486 struct mcx_eq sc_queue_eq; 2487 2488 int sc_hardmtu; 2489 int sc_rxbufsz; 2490 2491 int sc_bf_size; 2492 int sc_max_rqt_size; 2493 2494 struct workqueue *sc_workq; 2495 struct work sc_port_change; 2496 2497 int sc_mac_flow_table_id; 2498 int sc_rss_flow_table_id; 2499 struct mcx_flow_group sc_flow_group[MCX_NUM_FLOW_GROUPS]; 2500 int sc_promisc_flow_enabled; 2501 int sc_allmulti_flow_enabled; 2502 int sc_mcast_flow_base; 2503 int sc_extra_mcast; 2504 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN]; 2505 2506 struct mcx_calibration sc_calibration[2]; 2507 unsigned int sc_calibration_gen; 2508 callout_t sc_calibrate; 2509 uint32_t sc_mhz; 2510 uint32_t sc_khz; 2511 2512 struct mcx_queues *sc_queues; 2513 unsigned int sc_nqueues; 2514 2515 int sc_mcam_reg; 2516 2517 #if NKSTAT > 0 2518 struct kstat *sc_kstat_ieee8023; 2519 struct kstat *sc_kstat_rfc2863; 2520 struct kstat *sc_kstat_rfc2819; 2521 struct kstat *sc_kstat_rfc3635; 2522 unsigned int sc_kstat_mtmp_count; 2523 struct kstat **sc_kstat_mtmp; 2524 #endif 2525 2526 struct timecounter sc_timecounter; 2527 }; 2528 #define DEVNAME(_sc) device_xname((_sc)->sc_dev) 2529 2530 static int mcx_match(device_t, cfdata_t, void *); 2531 static void mcx_attach(device_t, device_t, void *); 2532 2533 static void * mcx_establish_intr(struct mcx_softc *, int, kcpuset_t *, 2534 int (*)(void *), void *, const char *); 2535 2536 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int); 2537 static u_int mcx_rxr_get(struct mcx_rxring *, u_int); 2538 static void mcx_rxr_put(struct mcx_rxring *, u_int); 2539 static u_int mcx_rxr_inuse(struct mcx_rxring *); 2540 2541 #if NKSTAT > 0 2542 static void mcx_kstat_attach(struct mcx_softc *); 2543 #endif 2544 2545 static void mcx_timecounter_attach(struct mcx_softc *); 2546 2547 static int mcx_version(struct mcx_softc *); 2548 static int mcx_init_wait(struct mcx_softc *); 2549 static int mcx_enable_hca(struct mcx_softc *); 2550 static int mcx_teardown_hca(struct mcx_softc *, uint16_t); 2551 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *, 2552 int); 2553 static int mcx_issi(struct mcx_softc *); 2554 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t); 2555 static int mcx_hca_max_caps(struct mcx_softc *); 2556 static int mcx_hca_set_caps(struct mcx_softc *); 2557 static int mcx_init_hca(struct mcx_softc *); 2558 static int mcx_set_driver_version(struct mcx_softc *); 2559 static int mcx_iff(struct mcx_softc *); 2560 static int mcx_alloc_uar(struct mcx_softc *, int *); 2561 static int mcx_alloc_pd(struct mcx_softc *); 2562 static int mcx_alloc_tdomain(struct mcx_softc *); 2563 static int mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int, 2564 uint64_t, int); 2565 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *); 2566 static int mcx_query_special_contexts(struct mcx_softc *); 2567 static int mcx_set_port_mtu(struct mcx_softc *, int); 2568 static int mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int, 2569 int); 2570 static int mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *); 2571 static int mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int, 2572 int); 2573 static int mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *); 2574 static int mcx_ready_sq(struct mcx_softc *, struct mcx_tx *); 2575 static int mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int); 2576 static int mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *); 2577 static int mcx_ready_rq(struct mcx_softc *, struct mcx_rx *); 2578 static int mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *, 2579 int *); 2580 static int mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t, 2581 int *); 2582 static int mcx_destroy_tir(struct mcx_softc *, int); 2583 static int mcx_create_tis(struct mcx_softc *, int *); 2584 static int mcx_destroy_tis(struct mcx_softc *, int); 2585 static int mcx_create_rqt(struct mcx_softc *, int, int *, int *); 2586 static int mcx_destroy_rqt(struct mcx_softc *, int); 2587 static int mcx_create_flow_table(struct mcx_softc *, int, int, int *); 2588 static int mcx_set_flow_table_root(struct mcx_softc *, int); 2589 static int mcx_destroy_flow_table(struct mcx_softc *, int); 2590 static int mcx_create_flow_group(struct mcx_softc *, int, int, int, 2591 int, int, struct mcx_flow_match *); 2592 static int mcx_destroy_flow_group(struct mcx_softc *, int); 2593 static int mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int, 2594 const uint8_t *, uint32_t); 2595 static int mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int, 2596 int, int, uint32_t); 2597 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int); 2598 2599 #if NKSTAT > 0 2600 static int mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *); 2601 static int mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *); 2602 static int mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *); 2603 static int mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *); 2604 #endif 2605 2606 #if 0 2607 static int mcx_dump_flow_table(struct mcx_softc *, int); 2608 static int mcx_dump_flow_table_entry(struct mcx_softc *, int, int); 2609 static int mcx_dump_flow_group(struct mcx_softc *, int); 2610 #endif 2611 2612 2613 /* 2614 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *); 2615 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int); 2616 */ 2617 static void mcx_refill(void *); 2618 static int mcx_process_rx(struct mcx_softc *, struct mcx_rx *, 2619 struct mcx_cq_entry *, struct mcx_mbufq *, 2620 const struct mcx_calibration *); 2621 static int mcx_process_txeof(struct mcx_softc *, struct mcx_tx *, 2622 struct mcx_cq_entry *); 2623 static void mcx_process_cq(struct mcx_softc *, struct mcx_queues *, 2624 struct mcx_cq *); 2625 2626 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int); 2627 static void mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int); 2628 static int mcx_admin_intr(void *); 2629 static int mcx_cq_intr(void *); 2630 2631 static int mcx_init(struct ifnet *); 2632 static void mcx_stop(struct ifnet *, int); 2633 static int mcx_ioctl(struct ifnet *, u_long, void *); 2634 static void mcx_start(struct ifnet *); 2635 static int mcx_transmit(struct ifnet *, struct mbuf *); 2636 static void mcx_deferred_transmit(void *); 2637 static void mcx_media_add_types(struct mcx_softc *); 2638 static void mcx_media_status(struct ifnet *, struct ifmediareq *); 2639 static int mcx_media_change(struct ifnet *); 2640 #if 0 2641 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *); 2642 #endif 2643 static void mcx_port_change(struct work *, void *); 2644 2645 static void mcx_calibrate_first(struct mcx_softc *); 2646 static void mcx_calibrate(void *); 2647 2648 static inline uint32_t 2649 mcx_rd(struct mcx_softc *, bus_size_t); 2650 static inline void 2651 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t); 2652 static inline void 2653 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int); 2654 2655 static uint64_t mcx_timer(struct mcx_softc *); 2656 2657 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *, 2658 bus_size_t, u_int align); 2659 static void mcx_dmamem_zero(struct mcx_dmamem *); 2660 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *); 2661 2662 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *, 2663 unsigned int); 2664 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *); 2665 2666 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL); 2667 2668 static const struct { 2669 pci_vendor_id_t vendor; 2670 pci_product_id_t product; 2671 } mcx_devices[] = { 2672 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 }, 2673 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700VF }, 2674 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 }, 2675 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710VF }, 2676 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 }, 2677 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800VF }, 2678 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 }, 2679 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800VF }, 2680 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28908 }, 2681 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT2892 }, 2682 }; 2683 2684 struct mcx_eth_proto_capability { 2685 uint64_t cap_media; 2686 uint64_t cap_baudrate; 2687 }; 2688 2689 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = { 2690 [MCX_ETHER_CAP_SGMII] = { IFM_1000_SGMII, IF_Gbps(1) }, 2691 [MCX_ETHER_CAP_1000_KX] = { IFM_1000_KX, IF_Gbps(1) }, 2692 [MCX_ETHER_CAP_10G_CX4] = { IFM_10G_CX4, IF_Gbps(10) }, 2693 [MCX_ETHER_CAP_10G_KX4] = { IFM_10G_KX4, IF_Gbps(10) }, 2694 [MCX_ETHER_CAP_10G_KR] = { IFM_10G_KR, IF_Gbps(10) }, 2695 [MCX_ETHER_CAP_20G_KR2] = { IFM_20G_KR2, IF_Gbps(20) }, 2696 [MCX_ETHER_CAP_40G_CR4] = { IFM_40G_CR4, IF_Gbps(40) }, 2697 [MCX_ETHER_CAP_40G_KR4] = { IFM_40G_KR4, IF_Gbps(40) }, 2698 [MCX_ETHER_CAP_56G_R4] = { IFM_56G_R4, IF_Gbps(56) }, 2699 [MCX_ETHER_CAP_10G_CR] = { IFM_10G_CR1, IF_Gbps(10) }, 2700 [MCX_ETHER_CAP_10G_SR] = { IFM_10G_SR, IF_Gbps(10) }, 2701 [MCX_ETHER_CAP_10G_LR] = { IFM_10G_LR, IF_Gbps(10) }, 2702 [MCX_ETHER_CAP_40G_SR4] = { IFM_40G_SR4, IF_Gbps(40) }, 2703 [MCX_ETHER_CAP_40G_LR4] = { IFM_40G_LR4, IF_Gbps(40) }, 2704 [MCX_ETHER_CAP_50G_SR2] = { IFM_50G_SR2, IF_Gbps(50) }, 2705 [MCX_ETHER_CAP_100G_CR4] = { IFM_100G_CR4, IF_Gbps(100) }, 2706 [MCX_ETHER_CAP_100G_SR4] = { IFM_100G_SR4, IF_Gbps(100) }, 2707 [MCX_ETHER_CAP_100G_KR4] = { IFM_100G_KR4, IF_Gbps(100) }, 2708 [MCX_ETHER_CAP_100G_LR4] = { IFM_100G_LR4, IF_Gbps(100) }, 2709 [MCX_ETHER_CAP_100_TX] = { IFM_100_TX, IF_Mbps(100) }, 2710 [MCX_ETHER_CAP_1000_T] = { IFM_1000_T, IF_Gbps(1) }, 2711 [MCX_ETHER_CAP_10G_T] = { IFM_10G_T, IF_Gbps(10) }, 2712 [MCX_ETHER_CAP_25G_CR] = { IFM_25G_CR, IF_Gbps(25) }, 2713 [MCX_ETHER_CAP_25G_KR] = { IFM_25G_KR, IF_Gbps(25) }, 2714 [MCX_ETHER_CAP_25G_SR] = { IFM_25G_SR, IF_Gbps(25) }, 2715 [MCX_ETHER_CAP_50G_CR2] = { IFM_50G_CR2, IF_Gbps(50) }, 2716 [MCX_ETHER_CAP_50G_KR2] = { IFM_50G_KR2, IF_Gbps(50) }, 2717 }; 2718 2719 static int 2720 mcx_get_id(uint32_t val) 2721 { 2722 return be32toh(val) & 0x00ffffff; 2723 } 2724 2725 static int 2726 mcx_match(device_t parent, cfdata_t cf, void *aux) 2727 { 2728 struct pci_attach_args *pa = aux; 2729 int n; 2730 2731 for (n = 0; n < __arraycount(mcx_devices); n++) { 2732 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor && 2733 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product) 2734 return 1; 2735 } 2736 2737 return 0; 2738 } 2739 2740 void 2741 mcx_attach(device_t parent, device_t self, void *aux) 2742 { 2743 struct mcx_softc *sc = device_private(self); 2744 struct ifnet *ifp = &sc->sc_ec.ec_if; 2745 struct pci_attach_args *pa = aux; 2746 struct ifcapreq ifcr; 2747 uint8_t enaddr[ETHER_ADDR_LEN]; 2748 int counts[PCI_INTR_TYPE_SIZE]; 2749 char intrxname[32]; 2750 pcireg_t memtype; 2751 uint32_t r; 2752 unsigned int cq_stride; 2753 unsigned int cq_size; 2754 int i, msix; 2755 kcpuset_t *affinity; 2756 2757 sc->sc_dev = self; 2758 sc->sc_pc = pa->pa_pc; 2759 sc->sc_tag = pa->pa_tag; 2760 if (pci_dma64_available(pa)) 2761 sc->sc_dmat = pa->pa_dmat64; 2762 else 2763 sc->sc_dmat = pa->pa_dmat; 2764 2765 /* Map the PCI memory space */ 2766 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR); 2767 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype, 2768 #ifdef __NetBSD__ 2769 0, 2770 #else 2771 BUS_SPACE_MAP_PREFETCHABLE, 2772 #endif 2773 &sc->sc_memt, &sc->sc_memh, 2774 NULL, &sc->sc_mems)) { 2775 aprint_error(": unable to map register memory\n"); 2776 return; 2777 } 2778 2779 pci_aprint_devinfo(pa, "Ethernet controller"); 2780 2781 mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET); 2782 2783 if (mcx_version(sc) != 0) { 2784 /* error printed by mcx_version */ 2785 goto unmap; 2786 } 2787 2788 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO); 2789 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */ 2790 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */ 2791 if (cq_size > MCX_MAX_CQE) { 2792 aprint_error_dev(self, 2793 "command queue size overflow %u\n", cq_size); 2794 goto unmap; 2795 } 2796 if (cq_stride < sizeof(struct mcx_cmdq_entry)) { 2797 aprint_error_dev(self, 2798 "command queue entry size underflow %u\n", cq_stride); 2799 goto unmap; 2800 } 2801 if (cq_stride * cq_size > MCX_PAGE_SIZE) { 2802 aprint_error_dev(self, "command queue page overflow\n"); 2803 goto unmap; 2804 } 2805 2806 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE, 2807 MCX_PAGE_SIZE) != 0) { 2808 aprint_error_dev(self, "unable to allocate doorbell memory\n"); 2809 goto unmap; 2810 } 2811 2812 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE, 2813 MCX_PAGE_SIZE) != 0) { 2814 aprint_error_dev(self, "unable to allocate command queue\n"); 2815 goto dbfree; 2816 } 2817 2818 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32); 2819 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), 2820 BUS_SPACE_BARRIER_WRITE); 2821 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem)); 2822 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), 2823 BUS_SPACE_BARRIER_WRITE); 2824 2825 if (mcx_init_wait(sc) != 0) { 2826 aprint_error_dev(self, "timeout waiting for init\n"); 2827 goto cqfree; 2828 } 2829 2830 sc->sc_cmdq_mask = cq_size - 1; 2831 sc->sc_cmdq_size = cq_stride; 2832 2833 if (mcx_enable_hca(sc) != 0) { 2834 /* error printed by mcx_enable_hca */ 2835 goto cqfree; 2836 } 2837 2838 if (mcx_issi(sc) != 0) { 2839 /* error printed by mcx_issi */ 2840 goto teardown; 2841 } 2842 2843 if (mcx_pages(sc, &sc->sc_boot_pages, 2844 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) { 2845 /* error printed by mcx_pages */ 2846 goto teardown; 2847 } 2848 2849 if (mcx_hca_max_caps(sc) != 0) { 2850 /* error printed by mcx_hca_max_caps */ 2851 goto teardown; 2852 } 2853 2854 if (mcx_hca_set_caps(sc) != 0) { 2855 /* error printed by mcx_hca_set_caps */ 2856 goto teardown; 2857 } 2858 2859 if (mcx_pages(sc, &sc->sc_init_pages, 2860 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) { 2861 /* error printed by mcx_pages */ 2862 goto teardown; 2863 } 2864 2865 if (mcx_init_hca(sc) != 0) { 2866 /* error printed by mcx_init_hca */ 2867 goto teardown; 2868 } 2869 2870 if (mcx_pages(sc, &sc->sc_regular_pages, 2871 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) { 2872 /* error printed by mcx_pages */ 2873 goto teardown; 2874 } 2875 2876 /* apparently not necessary? */ 2877 if (mcx_set_driver_version(sc) != 0) { 2878 /* error printed by mcx_set_driver_version */ 2879 goto teardown; 2880 } 2881 2882 if (mcx_iff(sc) != 0) { /* modify nic vport context */ 2883 /* error printed by mcx_iff? */ 2884 goto teardown; 2885 } 2886 2887 if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) { 2888 /* error printed by mcx_alloc_uar */ 2889 goto teardown; 2890 } 2891 2892 if (mcx_alloc_pd(sc) != 0) { 2893 /* error printed by mcx_alloc_pd */ 2894 goto teardown; 2895 } 2896 2897 if (mcx_alloc_tdomain(sc) != 0) { 2898 /* error printed by mcx_alloc_tdomain */ 2899 goto teardown; 2900 } 2901 2902 /* 2903 * PRM makes no mention of msi interrupts, just legacy and msi-x. 2904 * mellanox support tells me legacy interrupts are not supported, 2905 * so we're stuck with just msi-x. 2906 */ 2907 counts[PCI_INTR_TYPE_MSIX] = -1; 2908 counts[PCI_INTR_TYPE_MSI] = 0; 2909 counts[PCI_INTR_TYPE_INTX] = 0; 2910 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) { 2911 aprint_error_dev(self, "unable to allocate interrupt\n"); 2912 goto teardown; 2913 } 2914 if (counts[PCI_INTR_TYPE_MSIX] < 2) { 2915 aprint_error_dev(self, "not enough MSI-X vectors\n"); 2916 goto teardown; 2917 } 2918 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX); 2919 snprintf(intrxname, sizeof(intrxname), "%s adminq", DEVNAME(sc)); 2920 sc->sc_ihc = mcx_establish_intr(sc, 0, NULL, mcx_admin_intr, sc, 2921 intrxname); 2922 if (sc->sc_ihc == NULL) { 2923 aprint_error_dev(self, "couldn't establish adminq interrupt\n"); 2924 goto teardown; 2925 } 2926 2927 if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar, 2928 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) | 2929 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) | 2930 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) | 2931 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) { 2932 /* error printed by mcx_create_eq */ 2933 goto teardown; 2934 } 2935 2936 if (mcx_query_nic_vport_context(sc, enaddr) != 0) { 2937 /* error printed by mcx_query_nic_vport_context */ 2938 goto teardown; 2939 } 2940 2941 if (mcx_query_special_contexts(sc) != 0) { 2942 /* error printed by mcx_query_special_contexts */ 2943 goto teardown; 2944 } 2945 2946 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) { 2947 /* error printed by mcx_set_port_mtu */ 2948 goto teardown; 2949 } 2950 2951 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 2952 ether_sprintf(enaddr)); 2953 2954 msix = counts[PCI_INTR_TYPE_MSIX]; 2955 msix--; /* admin ops took one */ 2956 2957 sc->sc_nqueues = uimin(MCX_MAX_QUEUES, msix); 2958 sc->sc_nqueues = uimin(sc->sc_nqueues, ncpu); 2959 /* Round down to a power of two. */ 2960 sc->sc_nqueues = 1U << ilog2(sc->sc_nqueues); 2961 sc->sc_queues = kmem_zalloc(sc->sc_nqueues * sizeof(*sc->sc_queues), 2962 KM_SLEEP); 2963 2964 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 2965 ifp->if_softc = sc; 2966 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 2967 #ifdef MCX_MPSAFE 2968 ifp->if_extflags = IFEF_MPSAFE; 2969 #endif 2970 ifp->if_init = mcx_init; 2971 ifp->if_stop = mcx_stop; 2972 ifp->if_ioctl = mcx_ioctl; 2973 ifp->if_start = mcx_start; 2974 if (sc->sc_nqueues > 1) { 2975 ifp->if_transmit = mcx_transmit; 2976 } 2977 ifp->if_mtu = sc->sc_hardmtu; 2978 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx | 2979 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx | 2980 IFCAP_CSUM_UDPv6_Rx | IFCAP_CSUM_UDPv6_Tx | 2981 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx | 2982 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_TCPv6_Tx; 2983 IFQ_SET_MAXLEN(&ifp->if_snd, 1024); 2984 IFQ_SET_READY(&ifp->if_snd); 2985 2986 sc->sc_ec.ec_capabilities = ETHERCAP_JUMBO_MTU | 2987 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 2988 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 2989 2990 sc->sc_ec.ec_ifmedia = &sc->sc_media; 2991 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change, 2992 mcx_media_status, &sc->sc_media_mutex); 2993 mcx_media_add_types(sc); 2994 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 2995 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 2996 2997 if_attach(ifp); 2998 2999 /* Enable hardware offload by default */ 3000 memset(&ifcr, 0, sizeof(ifcr)); 3001 ifcr.ifcr_capenable = ifp->if_capabilities; 3002 ifioctl_common(ifp, SIOCSIFCAP, &ifcr); 3003 3004 if_deferred_start_init(ifp, NULL); 3005 3006 ether_ifattach(ifp, enaddr); 3007 3008 kcpuset_create(&affinity, false); 3009 kcpuset_set(affinity, 0); 3010 3011 for (i = 0; i < sc->sc_nqueues; i++) { 3012 struct mcx_queues *q = &sc->sc_queues[i]; 3013 struct mcx_rx *rx = &q->q_rx; 3014 struct mcx_tx *tx = &q->q_tx; 3015 int vec; 3016 3017 vec = i + 1; 3018 q->q_sc = sc; 3019 q->q_index = i; 3020 3021 if (mcx_alloc_uar(sc, &q->q_uar) != 0) { 3022 aprint_error_dev(self, "unable to alloc uar %d\n", i); 3023 goto teardown; 3024 } 3025 3026 if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) { 3027 aprint_error_dev(self, 3028 "unable to create event queue %d\n", i); 3029 goto teardown; 3030 } 3031 3032 rx->rx_softc = sc; 3033 callout_init(&rx->rx_refill, CALLOUT_FLAGS); 3034 callout_setfunc(&rx->rx_refill, mcx_refill, rx); 3035 3036 tx->tx_softc = sc; 3037 mutex_init(&tx->tx_lock, MUTEX_DEFAULT, IPL_NET); 3038 tx->tx_pcq = pcq_create(MCX_TXQ_NUM, KM_SLEEP); 3039 tx->tx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, 3040 mcx_deferred_transmit, tx); 3041 3042 snprintf(intrxname, sizeof(intrxname), "%s queue %d", 3043 DEVNAME(sc), i); 3044 q->q_ihc = mcx_establish_intr(sc, vec, affinity, mcx_cq_intr, 3045 q, intrxname); 3046 } 3047 3048 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS); 3049 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc); 3050 3051 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc, 3052 PRI_NONE, IPL_NET, 0) != 0) { 3053 aprint_error_dev(self, "couldn't create port change workq\n"); 3054 goto teardown; 3055 } 3056 3057 mcx_port_change(&sc->sc_port_change, sc); 3058 3059 sc->sc_mac_flow_table_id = -1; 3060 sc->sc_rss_flow_table_id = -1; 3061 sc->sc_rqt = -1; 3062 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) { 3063 struct mcx_flow_group *mfg = &sc->sc_flow_group[i]; 3064 mfg->g_id = -1; 3065 mfg->g_table = -1; 3066 mfg->g_size = 0; 3067 mfg->g_start = 0; 3068 } 3069 sc->sc_extra_mcast = 0; 3070 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows)); 3071 3072 #if NKSTAT > 0 3073 mcx_kstat_attach(sc); 3074 #endif 3075 mcx_timecounter_attach(sc); 3076 return; 3077 3078 teardown: 3079 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL)); 3080 /* error printed by mcx_teardown_hca, and we're already unwinding */ 3081 cqfree: 3082 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32); 3083 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), 3084 BUS_SPACE_BARRIER_WRITE); 3085 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) | 3086 MCX_CMDQ_INTERFACE_DISABLED); 3087 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), 3088 BUS_SPACE_BARRIER_WRITE); 3089 3090 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0); 3091 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), 3092 BUS_SPACE_BARRIER_WRITE); 3093 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED); 3094 3095 mcx_dmamem_free(sc, &sc->sc_cmdq_mem); 3096 dbfree: 3097 mcx_dmamem_free(sc, &sc->sc_doorbell_mem); 3098 unmap: 3099 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 3100 sc->sc_mems = 0; 3101 } 3102 3103 static void * 3104 mcx_establish_intr(struct mcx_softc *sc, int index, kcpuset_t *affinity, 3105 int (*func)(void *), void *arg, const char *xname) 3106 { 3107 char intrbuf[PCI_INTRSTR_LEN]; 3108 const char *intrstr; 3109 void *ih; 3110 3111 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[index], PCI_INTR_MPSAFE, 3112 true); 3113 3114 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[index], intrbuf, 3115 sizeof(intrbuf)); 3116 ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[index], IPL_NET, 3117 func, arg, xname); 3118 if (ih == NULL) { 3119 aprint_error_dev(sc->sc_dev, 3120 "unable to establish interrupt%s%s\n", 3121 intrstr ? " at " : "", 3122 intrstr ? intrstr : ""); 3123 return NULL; 3124 } 3125 3126 if (affinity != NULL && index > 0) { 3127 /* Round-robin affinity */ 3128 kcpuset_zero(affinity); 3129 kcpuset_set(affinity, (index - 1) % ncpu); 3130 interrupt_distribute(ih, affinity, NULL); 3131 } 3132 3133 return ih; 3134 } 3135 3136 static void 3137 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm) 3138 { 3139 rxr->rxr_total = hwm; 3140 rxr->rxr_inuse = 0; 3141 } 3142 3143 static u_int 3144 mcx_rxr_get(struct mcx_rxring *rxr, u_int max) 3145 { 3146 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse); 3147 3148 rxr->rxr_inuse += taken; 3149 3150 return taken; 3151 } 3152 3153 static void 3154 mcx_rxr_put(struct mcx_rxring *rxr, u_int n) 3155 { 3156 rxr->rxr_inuse -= n; 3157 } 3158 3159 static u_int 3160 mcx_rxr_inuse(struct mcx_rxring *rxr) 3161 { 3162 return rxr->rxr_inuse; 3163 } 3164 3165 static int 3166 mcx_version(struct mcx_softc *sc) 3167 { 3168 uint32_t fw0, fw1; 3169 uint16_t cmdif; 3170 3171 fw0 = mcx_rd(sc, MCX_FW_VER); 3172 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER); 3173 3174 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0), 3175 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1)); 3176 3177 cmdif = MCX_CMDIF(fw1); 3178 if (cmdif != MCX_CMD_IF_SUPPORTED) { 3179 aprint_error_dev(sc->sc_dev, 3180 "unsupported command interface %u\n", cmdif); 3181 return (-1); 3182 } 3183 3184 return (0); 3185 } 3186 3187 static int 3188 mcx_init_wait(struct mcx_softc *sc) 3189 { 3190 unsigned int i; 3191 uint32_t r; 3192 3193 for (i = 0; i < 2000; i++) { 3194 r = mcx_rd(sc, MCX_STATE); 3195 if ((r & MCX_STATE_MASK) == MCX_STATE_READY) 3196 return (0); 3197 3198 delay(1000); 3199 mcx_bar(sc, MCX_STATE, sizeof(uint32_t), 3200 BUS_SPACE_BARRIER_READ); 3201 } 3202 3203 return (-1); 3204 } 3205 3206 static uint8_t 3207 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, 3208 unsigned int msec) 3209 { 3210 unsigned int i; 3211 3212 for (i = 0; i < msec; i++) { 3213 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem), 3214 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW); 3215 3216 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) == 3217 MCX_CQ_STATUS_OWN_SW) 3218 return (0); 3219 3220 delay(1000); 3221 } 3222 3223 return (ETIMEDOUT); 3224 } 3225 3226 static uint32_t 3227 mcx_mix_u64(uint32_t xor, uint64_t u64) 3228 { 3229 xor ^= u64 >> 32; 3230 xor ^= u64; 3231 3232 return (xor); 3233 } 3234 3235 static uint32_t 3236 mcx_mix_u32(uint32_t xor, uint32_t u32) 3237 { 3238 xor ^= u32; 3239 3240 return (xor); 3241 } 3242 3243 static uint32_t 3244 mcx_mix_u8(uint32_t xor, uint8_t u8) 3245 { 3246 xor ^= u8; 3247 3248 return (xor); 3249 } 3250 3251 static uint8_t 3252 mcx_mix_done(uint32_t xor) 3253 { 3254 xor ^= xor >> 16; 3255 xor ^= xor >> 8; 3256 3257 return (xor); 3258 } 3259 3260 static uint8_t 3261 mcx_xor(const void *buf, size_t len) 3262 { 3263 const uint32_t *dwords = buf; 3264 uint32_t xor = 0xff; 3265 size_t i; 3266 3267 len /= sizeof(*dwords); 3268 3269 for (i = 0; i < len; i++) 3270 xor ^= dwords[i]; 3271 3272 return (mcx_mix_done(xor)); 3273 } 3274 3275 static uint8_t 3276 mcx_cmdq_token(struct mcx_softc *sc) 3277 { 3278 uint8_t token; 3279 3280 do { 3281 token = ++sc->sc_cmdq_token; 3282 } while (token == 0); 3283 3284 return (token); 3285 } 3286 3287 static void 3288 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, 3289 uint32_t ilen, uint32_t olen, uint8_t token) 3290 { 3291 memset(cqe, 0, sc->sc_cmdq_size); 3292 3293 cqe->cq_type = MCX_CMDQ_TYPE_PCIE; 3294 be32enc(&cqe->cq_input_length, ilen); 3295 be32enc(&cqe->cq_output_length, olen); 3296 cqe->cq_token = token; 3297 cqe->cq_status = MCX_CQ_STATUS_OWN_HW; 3298 } 3299 3300 static void 3301 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe) 3302 { 3303 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe)); 3304 } 3305 3306 static int 3307 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe) 3308 { 3309 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */ 3310 return (0); 3311 } 3312 3313 static void * 3314 mcx_cmdq_in(struct mcx_cmdq_entry *cqe) 3315 { 3316 return (&cqe->cq_input_data); 3317 } 3318 3319 static void * 3320 mcx_cmdq_out(struct mcx_cmdq_entry *cqe) 3321 { 3322 return (&cqe->cq_output_data); 3323 } 3324 3325 static void 3326 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, 3327 unsigned int slot) 3328 { 3329 mcx_cmdq_sign(cqe); 3330 3331 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem), 3332 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW); 3333 3334 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot); 3335 mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t), 3336 BUS_SPACE_BARRIER_WRITE); 3337 } 3338 3339 static int 3340 mcx_enable_hca(struct mcx_softc *sc) 3341 { 3342 struct mcx_cmdq_entry *cqe; 3343 struct mcx_cmd_enable_hca_in *in; 3344 struct mcx_cmd_enable_hca_out *out; 3345 int error; 3346 uint8_t status; 3347 3348 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3349 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 3350 3351 in = mcx_cmdq_in(cqe); 3352 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA); 3353 in->cmd_op_mod = htobe16(0); 3354 in->cmd_function_id = htobe16(0); 3355 3356 mcx_cmdq_post(sc, cqe, 0); 3357 3358 error = mcx_cmdq_poll(sc, cqe, 1000); 3359 if (error != 0) { 3360 printf(", hca enable timeout\n"); 3361 return (-1); 3362 } 3363 if (mcx_cmdq_verify(cqe) != 0) { 3364 printf(", hca enable command corrupt\n"); 3365 return (-1); 3366 } 3367 3368 status = cqe->cq_output_data[0]; 3369 if (status != MCX_CQ_STATUS_OK) { 3370 printf(", hca enable failed (%x)\n", status); 3371 return (-1); 3372 } 3373 3374 return (0); 3375 } 3376 3377 static int 3378 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile) 3379 { 3380 struct mcx_cmdq_entry *cqe; 3381 struct mcx_cmd_teardown_hca_in *in; 3382 struct mcx_cmd_teardown_hca_out *out; 3383 int error; 3384 uint8_t status; 3385 3386 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3387 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 3388 3389 in = mcx_cmdq_in(cqe); 3390 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA); 3391 in->cmd_op_mod = htobe16(0); 3392 in->cmd_profile = profile; 3393 3394 mcx_cmdq_post(sc, cqe, 0); 3395 3396 error = mcx_cmdq_poll(sc, cqe, 1000); 3397 if (error != 0) { 3398 printf(", hca teardown timeout\n"); 3399 return (-1); 3400 } 3401 if (mcx_cmdq_verify(cqe) != 0) { 3402 printf(", hca teardown command corrupt\n"); 3403 return (-1); 3404 } 3405 3406 status = cqe->cq_output_data[0]; 3407 if (status != MCX_CQ_STATUS_OK) { 3408 printf(", hca teardown failed (%x)\n", status); 3409 return (-1); 3410 } 3411 3412 return (0); 3413 } 3414 3415 static int 3416 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm, 3417 unsigned int nmb, uint64_t *ptr, uint8_t token) 3418 { 3419 uint8_t *kva; 3420 uint64_t dva; 3421 int i; 3422 int error; 3423 3424 error = mcx_dmamem_alloc(sc, mxm, 3425 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN); 3426 if (error != 0) 3427 return (error); 3428 3429 mcx_dmamem_zero(mxm); 3430 3431 dva = MCX_DMA_DVA(mxm); 3432 kva = MCX_DMA_KVA(mxm); 3433 for (i = 0; i < nmb; i++) { 3434 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva; 3435 3436 /* patch the cqe or mbox pointing at this one */ 3437 be64enc(ptr, dva); 3438 3439 /* fill in this mbox */ 3440 be32enc(&mbox->mb_block_number, i); 3441 mbox->mb_token = token; 3442 3443 /* move to the next one */ 3444 ptr = &mbox->mb_next_ptr; 3445 3446 dva += MCX_CMDQ_MAILBOX_SIZE; 3447 kva += MCX_CMDQ_MAILBOX_SIZE; 3448 } 3449 3450 return (0); 3451 } 3452 3453 static uint32_t 3454 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb) 3455 { 3456 uint32_t xor = 0xff; 3457 3458 /* only 3 fields get set, so mix them directly */ 3459 xor = mcx_mix_u64(xor, mb->mb_next_ptr); 3460 xor = mcx_mix_u32(xor, mb->mb_block_number); 3461 xor = mcx_mix_u8(xor, mb->mb_token); 3462 3463 return (mcx_mix_done(xor)); 3464 } 3465 3466 static void 3467 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb) 3468 { 3469 uint8_t *kva; 3470 int i; 3471 3472 kva = MCX_DMA_KVA(mxm); 3473 3474 for (i = 0; i < nmb; i++) { 3475 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva; 3476 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb); 3477 mb->mb_ctrl_signature = sig; 3478 mb->mb_signature = sig ^ 3479 mcx_xor(mb->mb_data, sizeof(mb->mb_data)); 3480 3481 kva += MCX_CMDQ_MAILBOX_SIZE; 3482 } 3483 } 3484 3485 static void 3486 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops) 3487 { 3488 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm), 3489 0, MCX_DMA_LEN(mxm), ops); 3490 } 3491 3492 static struct mcx_cmdq_mailbox * 3493 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i) 3494 { 3495 uint8_t *kva; 3496 3497 kva = MCX_DMA_KVA(mxm); 3498 kva += i * MCX_CMDQ_MAILBOX_SIZE; 3499 3500 return ((struct mcx_cmdq_mailbox *)kva); 3501 } 3502 3503 static inline void * 3504 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb) 3505 { 3506 return (&mb->mb_data); 3507 } 3508 3509 static void 3510 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb, 3511 void *b, size_t len) 3512 { 3513 uint8_t *buf = b; 3514 struct mcx_cmdq_mailbox *mb; 3515 int i; 3516 3517 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm); 3518 for (i = 0; i < nmb; i++) { 3519 3520 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len)); 3521 3522 if (sizeof(mb->mb_data) >= len) 3523 break; 3524 3525 buf += sizeof(mb->mb_data); 3526 len -= sizeof(mb->mb_data); 3527 mb++; 3528 } 3529 } 3530 3531 static void 3532 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages, 3533 struct mcx_dmamem *buf) 3534 { 3535 uint64_t *pas; 3536 int mbox, mbox_pages, i; 3537 3538 mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE; 3539 offset %= MCX_CMDQ_MAILBOX_DATASIZE; 3540 3541 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox)); 3542 pas += (offset / sizeof(*pas)); 3543 mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas); 3544 for (i = 0; i < npages; i++) { 3545 if (i == mbox_pages) { 3546 mbox++; 3547 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox)); 3548 mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas); 3549 } 3550 *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE)); 3551 pas++; 3552 } 3553 } 3554 3555 static void 3556 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len) 3557 { 3558 uint8_t *buf = b; 3559 struct mcx_cmdq_mailbox *mb; 3560 int i; 3561 3562 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm); 3563 for (i = 0; i < nmb; i++) { 3564 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len)); 3565 3566 if (sizeof(mb->mb_data) >= len) 3567 break; 3568 3569 buf += sizeof(mb->mb_data); 3570 len -= sizeof(mb->mb_data); 3571 mb++; 3572 } 3573 } 3574 3575 static void 3576 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm) 3577 { 3578 mcx_dmamem_free(sc, mxm); 3579 } 3580 3581 #if 0 3582 static void 3583 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe) 3584 { 3585 unsigned int i; 3586 3587 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type, 3588 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr)); 3589 3590 printf(", idata "); 3591 for (i = 0; i < sizeof(cqe->cq_input_data); i++) 3592 printf("%02x", cqe->cq_input_data[i]); 3593 3594 printf(", odata "); 3595 for (i = 0; i < sizeof(cqe->cq_output_data); i++) 3596 printf("%02x", cqe->cq_output_data[i]); 3597 3598 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x", 3599 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length), 3600 cqe->cq_token, cqe->cq_signature, cqe->cq_status); 3601 } 3602 3603 static void 3604 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num) 3605 { 3606 int i, j; 3607 uint8_t *d; 3608 3609 for (i = 0; i < num; i++) { 3610 struct mcx_cmdq_mailbox *mbox; 3611 mbox = mcx_cq_mbox(mboxes, i); 3612 3613 d = mcx_cq_mbox_data(mbox); 3614 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) { 3615 if (j != 0 && (j % 16 == 0)) 3616 printf("\n"); 3617 printf("%.2x ", d[j]); 3618 } 3619 } 3620 } 3621 #endif 3622 3623 static int 3624 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data, 3625 int len) 3626 { 3627 struct mcx_dmamem mxm; 3628 struct mcx_cmdq_entry *cqe; 3629 struct mcx_cmd_access_reg_in *in; 3630 struct mcx_cmd_access_reg_out *out; 3631 uint8_t token = mcx_cmdq_token(sc); 3632 int error, nmb; 3633 3634 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3635 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len, 3636 token); 3637 3638 in = mcx_cmdq_in(cqe); 3639 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG); 3640 in->cmd_op_mod = htobe16(op); 3641 in->cmd_register_id = htobe16(reg); 3642 3643 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE); 3644 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, 3645 &cqe->cq_output_ptr, token) != 0) { 3646 printf(", unable to allocate access reg mailboxen\n"); 3647 return (-1); 3648 } 3649 cqe->cq_input_ptr = cqe->cq_output_ptr; 3650 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len); 3651 mcx_cmdq_mboxes_sign(&mxm, nmb); 3652 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW); 3653 3654 mcx_cmdq_post(sc, cqe, 0); 3655 error = mcx_cmdq_poll(sc, cqe, 1000); 3656 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW); 3657 3658 if (error != 0) { 3659 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc), 3660 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg); 3661 goto free; 3662 } 3663 error = mcx_cmdq_verify(cqe); 3664 if (error != 0) { 3665 printf("%s: access reg (%s %x) reply corrupt\n", 3666 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc), 3667 reg); 3668 goto free; 3669 } 3670 3671 out = mcx_cmdq_out(cqe); 3672 if (out->cmd_status != MCX_CQ_STATUS_OK) { 3673 printf("%s: access reg (%s %x) failed (%x, %.6x)\n", 3674 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"), 3675 reg, out->cmd_status, be32toh(out->cmd_syndrome)); 3676 error = -1; 3677 goto free; 3678 } 3679 3680 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len); 3681 free: 3682 mcx_dmamem_free(sc, &mxm); 3683 3684 return (error); 3685 } 3686 3687 static int 3688 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, 3689 unsigned int slot) 3690 { 3691 struct mcx_cmd_set_issi_in *in; 3692 struct mcx_cmd_set_issi_out *out; 3693 uint8_t status; 3694 3695 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 3696 3697 in = mcx_cmdq_in(cqe); 3698 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI); 3699 in->cmd_op_mod = htobe16(0); 3700 in->cmd_current_issi = htobe16(MCX_ISSI); 3701 3702 mcx_cmdq_post(sc, cqe, slot); 3703 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) 3704 return (-1); 3705 if (mcx_cmdq_verify(cqe) != 0) 3706 return (-1); 3707 3708 status = cqe->cq_output_data[0]; 3709 if (status != MCX_CQ_STATUS_OK) 3710 return (-1); 3711 3712 return (0); 3713 } 3714 3715 static int 3716 mcx_issi(struct mcx_softc *sc) 3717 { 3718 struct mcx_dmamem mxm; 3719 struct mcx_cmdq_entry *cqe; 3720 struct mcx_cmd_query_issi_in *in; 3721 struct mcx_cmd_query_issi_il_out *out; 3722 struct mcx_cmd_query_issi_mb_out *mb; 3723 uint8_t token = mcx_cmdq_token(sc); 3724 uint8_t status; 3725 int error; 3726 3727 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3728 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token); 3729 3730 in = mcx_cmdq_in(cqe); 3731 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI); 3732 in->cmd_op_mod = htobe16(0); 3733 3734 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE); 3735 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 3736 &cqe->cq_output_ptr, token) != 0) { 3737 printf(", unable to allocate query issi mailbox\n"); 3738 return (-1); 3739 } 3740 mcx_cmdq_mboxes_sign(&mxm, 1); 3741 3742 mcx_cmdq_post(sc, cqe, 0); 3743 error = mcx_cmdq_poll(sc, cqe, 1000); 3744 if (error != 0) { 3745 printf(", query issi timeout\n"); 3746 goto free; 3747 } 3748 error = mcx_cmdq_verify(cqe); 3749 if (error != 0) { 3750 printf(", query issi reply corrupt\n"); 3751 goto free; 3752 } 3753 3754 status = cqe->cq_output_data[0]; 3755 switch (status) { 3756 case MCX_CQ_STATUS_OK: 3757 break; 3758 case MCX_CQ_STATUS_BAD_OPCODE: 3759 /* use ISSI 0 */ 3760 goto free; 3761 default: 3762 printf(", query issi failed (%x)\n", status); 3763 error = -1; 3764 goto free; 3765 } 3766 3767 out = mcx_cmdq_out(cqe); 3768 if (out->cmd_current_issi == htobe16(MCX_ISSI)) { 3769 /* use ISSI 1 */ 3770 goto free; 3771 } 3772 3773 /* don't need to read cqe anymore, can be used for SET ISSI */ 3774 3775 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 3776 CTASSERT(MCX_ISSI < NBBY); 3777 /* XXX math is hard */ 3778 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) { 3779 /* use ISSI 0 */ 3780 goto free; 3781 } 3782 3783 if (mcx_set_issi(sc, cqe, 0) != 0) { 3784 /* ignore the error, just use ISSI 0 */ 3785 } else { 3786 /* use ISSI 1 */ 3787 } 3788 3789 free: 3790 mcx_cq_mboxes_free(sc, &mxm); 3791 return (error); 3792 } 3793 3794 static int 3795 mcx_query_pages(struct mcx_softc *sc, uint16_t type, 3796 int32_t *npages, uint16_t *func_id) 3797 { 3798 struct mcx_cmdq_entry *cqe; 3799 struct mcx_cmd_query_pages_in *in; 3800 struct mcx_cmd_query_pages_out *out; 3801 3802 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3803 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 3804 3805 in = mcx_cmdq_in(cqe); 3806 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES); 3807 in->cmd_op_mod = type; 3808 3809 mcx_cmdq_post(sc, cqe, 0); 3810 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) { 3811 printf(", query pages timeout\n"); 3812 return (-1); 3813 } 3814 if (mcx_cmdq_verify(cqe) != 0) { 3815 printf(", query pages reply corrupt\n"); 3816 return (-1); 3817 } 3818 3819 out = mcx_cmdq_out(cqe); 3820 if (out->cmd_status != MCX_CQ_STATUS_OK) { 3821 printf(", query pages failed (%x)\n", out->cmd_status); 3822 return (-1); 3823 } 3824 3825 *func_id = out->cmd_func_id; 3826 *npages = be32dec(&out->cmd_num_pages); 3827 3828 return (0); 3829 } 3830 3831 struct bus_dma_iter { 3832 bus_dmamap_t i_map; 3833 bus_size_t i_offset; 3834 unsigned int i_index; 3835 }; 3836 3837 static void 3838 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map) 3839 { 3840 i->i_map = map; 3841 i->i_offset = 0; 3842 i->i_index = 0; 3843 } 3844 3845 static bus_addr_t 3846 bus_dma_iter_addr(struct bus_dma_iter *i) 3847 { 3848 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset); 3849 } 3850 3851 static void 3852 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size) 3853 { 3854 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index; 3855 bus_size_t diff; 3856 3857 do { 3858 diff = seg->ds_len - i->i_offset; 3859 if (size < diff) 3860 break; 3861 3862 size -= diff; 3863 3864 seg++; 3865 3866 i->i_offset = 0; 3867 i->i_index++; 3868 } while (size > 0); 3869 3870 i->i_offset += size; 3871 } 3872 3873 static int 3874 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id) 3875 { 3876 struct mcx_dmamem mxm; 3877 struct mcx_cmdq_entry *cqe; 3878 struct mcx_cmd_manage_pages_in *in; 3879 struct mcx_cmd_manage_pages_out *out; 3880 unsigned int paslen, nmb, i, j, npages; 3881 struct bus_dma_iter iter; 3882 uint64_t *pas; 3883 uint8_t status; 3884 uint8_t token = mcx_cmdq_token(sc); 3885 int error; 3886 3887 npages = mhm->mhm_npages; 3888 3889 paslen = sizeof(*pas) * npages; 3890 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE); 3891 3892 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3893 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token); 3894 3895 in = mcx_cmdq_in(cqe); 3896 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES); 3897 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS); 3898 in->cmd_func_id = func_id; 3899 be32enc(&in->cmd_input_num_entries, npages); 3900 3901 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, 3902 &cqe->cq_input_ptr, token) != 0) { 3903 printf(", unable to allocate manage pages mailboxen\n"); 3904 return (-1); 3905 } 3906 3907 bus_dma_iter_init(&iter, mhm->mhm_map); 3908 for (i = 0; i < nmb; i++) { 3909 unsigned int lim; 3910 3911 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i)); 3912 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages); 3913 3914 for (j = 0; j < lim; j++) { 3915 be64enc(&pas[j], bus_dma_iter_addr(&iter)); 3916 bus_dma_iter_add(&iter, MCX_PAGE_SIZE); 3917 } 3918 3919 npages -= lim; 3920 } 3921 3922 mcx_cmdq_mboxes_sign(&mxm, nmb); 3923 3924 mcx_cmdq_post(sc, cqe, 0); 3925 error = mcx_cmdq_poll(sc, cqe, 1000); 3926 if (error != 0) { 3927 printf(", manage pages timeout\n"); 3928 goto free; 3929 } 3930 error = mcx_cmdq_verify(cqe); 3931 if (error != 0) { 3932 printf(", manage pages reply corrupt\n"); 3933 goto free; 3934 } 3935 3936 status = cqe->cq_output_data[0]; 3937 if (status != MCX_CQ_STATUS_OK) { 3938 printf(", manage pages failed (%x)\n", status); 3939 error = -1; 3940 goto free; 3941 } 3942 3943 free: 3944 mcx_dmamem_free(sc, &mxm); 3945 3946 return (error); 3947 } 3948 3949 static int 3950 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type) 3951 { 3952 int32_t npages; 3953 uint16_t func_id; 3954 3955 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) { 3956 /* error printed by mcx_query_pages */ 3957 return (-1); 3958 } 3959 3960 if (npages < 1) 3961 return (0); 3962 3963 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) { 3964 printf(", unable to allocate hwmem\n"); 3965 return (-1); 3966 } 3967 3968 if (mcx_add_pages(sc, mhm, func_id) != 0) { 3969 printf(", unable to add hwmem\n"); 3970 goto free; 3971 } 3972 3973 return (0); 3974 3975 free: 3976 mcx_hwmem_free(sc, mhm); 3977 3978 return (-1); 3979 } 3980 3981 static int 3982 mcx_hca_max_caps(struct mcx_softc *sc) 3983 { 3984 struct mcx_dmamem mxm; 3985 struct mcx_cmdq_entry *cqe; 3986 struct mcx_cmd_query_hca_cap_in *in; 3987 struct mcx_cmd_query_hca_cap_out *out; 3988 struct mcx_cmdq_mailbox *mb; 3989 struct mcx_cap_device *hca; 3990 uint8_t status; 3991 uint8_t token = mcx_cmdq_token(sc); 3992 int error; 3993 3994 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 3995 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN, 3996 token); 3997 3998 in = mcx_cmdq_in(cqe); 3999 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP); 4000 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX | 4001 MCX_CMD_QUERY_HCA_CAP_DEVICE); 4002 4003 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES, 4004 &cqe->cq_output_ptr, token) != 0) { 4005 printf(", unable to allocate query hca caps mailboxen\n"); 4006 return (-1); 4007 } 4008 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES); 4009 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW); 4010 4011 mcx_cmdq_post(sc, cqe, 0); 4012 error = mcx_cmdq_poll(sc, cqe, 1000); 4013 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW); 4014 4015 if (error != 0) { 4016 printf(", query hca caps timeout\n"); 4017 goto free; 4018 } 4019 error = mcx_cmdq_verify(cqe); 4020 if (error != 0) { 4021 printf(", query hca caps reply corrupt\n"); 4022 goto free; 4023 } 4024 4025 status = cqe->cq_output_data[0]; 4026 if (status != MCX_CQ_STATUS_OK) { 4027 printf(", query hca caps failed (%x)\n", status); 4028 error = -1; 4029 goto free; 4030 } 4031 4032 mb = mcx_cq_mbox(&mxm, 0); 4033 hca = mcx_cq_mbox_data(mb); 4034 4035 if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE) 4036 != MCX_CAP_DEVICE_PORT_TYPE_ETH) { 4037 printf(", not in ethernet mode\n"); 4038 error = -1; 4039 goto free; 4040 } 4041 if (hca->log_pg_sz > PAGE_SHIFT) { 4042 printf(", minimum system page shift %u is too large\n", 4043 hca->log_pg_sz); 4044 error = -1; 4045 goto free; 4046 } 4047 /* 4048 * blueflame register is split into two buffers, and we must alternate 4049 * between the two of them. 4050 */ 4051 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2; 4052 sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size); 4053 4054 if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG) 4055 sc->sc_mcam_reg = 1; 4056 4057 sc->sc_mhz = be32dec(&hca->device_frequency_mhz); 4058 sc->sc_khz = be32dec(&hca->device_frequency_khz); 4059 4060 free: 4061 mcx_dmamem_free(sc, &mxm); 4062 4063 return (error); 4064 } 4065 4066 static int 4067 mcx_hca_set_caps(struct mcx_softc *sc) 4068 { 4069 struct mcx_dmamem mxm; 4070 struct mcx_cmdq_entry *cqe; 4071 struct mcx_cmd_query_hca_cap_in *in; 4072 struct mcx_cmd_query_hca_cap_out *out; 4073 struct mcx_cmdq_mailbox *mb; 4074 struct mcx_cap_device *hca; 4075 uint8_t status; 4076 uint8_t token = mcx_cmdq_token(sc); 4077 int error; 4078 4079 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4080 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN, 4081 token); 4082 4083 in = mcx_cmdq_in(cqe); 4084 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP); 4085 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT | 4086 MCX_CMD_QUERY_HCA_CAP_DEVICE); 4087 4088 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES, 4089 &cqe->cq_output_ptr, token) != 0) { 4090 printf(", unable to allocate manage pages mailboxen\n"); 4091 return (-1); 4092 } 4093 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES); 4094 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW); 4095 4096 mcx_cmdq_post(sc, cqe, 0); 4097 error = mcx_cmdq_poll(sc, cqe, 1000); 4098 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW); 4099 4100 if (error != 0) { 4101 printf(", query hca caps timeout\n"); 4102 goto free; 4103 } 4104 error = mcx_cmdq_verify(cqe); 4105 if (error != 0) { 4106 printf(", query hca caps reply corrupt\n"); 4107 goto free; 4108 } 4109 4110 status = cqe->cq_output_data[0]; 4111 if (status != MCX_CQ_STATUS_OK) { 4112 printf(", query hca caps failed (%x)\n", status); 4113 error = -1; 4114 goto free; 4115 } 4116 4117 mb = mcx_cq_mbox(&mxm, 0); 4118 hca = mcx_cq_mbox_data(mb); 4119 4120 hca->log_pg_sz = PAGE_SHIFT; 4121 4122 free: 4123 mcx_dmamem_free(sc, &mxm); 4124 4125 return (error); 4126 } 4127 4128 4129 static int 4130 mcx_init_hca(struct mcx_softc *sc) 4131 { 4132 struct mcx_cmdq_entry *cqe; 4133 struct mcx_cmd_init_hca_in *in; 4134 struct mcx_cmd_init_hca_out *out; 4135 int error; 4136 uint8_t status; 4137 4138 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4139 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 4140 4141 in = mcx_cmdq_in(cqe); 4142 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA); 4143 in->cmd_op_mod = htobe16(0); 4144 4145 mcx_cmdq_post(sc, cqe, 0); 4146 4147 error = mcx_cmdq_poll(sc, cqe, 1000); 4148 if (error != 0) { 4149 printf(", hca init timeout\n"); 4150 return (-1); 4151 } 4152 if (mcx_cmdq_verify(cqe) != 0) { 4153 printf(", hca init command corrupt\n"); 4154 return (-1); 4155 } 4156 4157 status = cqe->cq_output_data[0]; 4158 if (status != MCX_CQ_STATUS_OK) { 4159 printf(", hca init failed (%x)\n", status); 4160 return (-1); 4161 } 4162 4163 return (0); 4164 } 4165 4166 static int 4167 mcx_set_driver_version(struct mcx_softc *sc) 4168 { 4169 struct mcx_dmamem mxm; 4170 struct mcx_cmdq_entry *cqe; 4171 struct mcx_cmd_set_driver_version_in *in; 4172 struct mcx_cmd_set_driver_version_out *out; 4173 int error; 4174 int token; 4175 uint8_t status; 4176 4177 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4178 token = mcx_cmdq_token(sc); 4179 mcx_cmdq_init(sc, cqe, sizeof(*in) + 4180 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token); 4181 4182 in = mcx_cmdq_in(cqe); 4183 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION); 4184 in->cmd_op_mod = htobe16(0); 4185 4186 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 4187 &cqe->cq_input_ptr, token) != 0) { 4188 printf(", unable to allocate set driver version mailboxen\n"); 4189 return (-1); 4190 } 4191 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)), 4192 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE); 4193 4194 mcx_cmdq_mboxes_sign(&mxm, 1); 4195 mcx_cmdq_post(sc, cqe, 0); 4196 4197 error = mcx_cmdq_poll(sc, cqe, 1000); 4198 if (error != 0) { 4199 printf(", set driver version timeout\n"); 4200 goto free; 4201 } 4202 if (mcx_cmdq_verify(cqe) != 0) { 4203 printf(", set driver version command corrupt\n"); 4204 goto free; 4205 } 4206 4207 status = cqe->cq_output_data[0]; 4208 if (status != MCX_CQ_STATUS_OK) { 4209 printf(", set driver version failed (%x)\n", status); 4210 error = -1; 4211 goto free; 4212 } 4213 4214 free: 4215 mcx_dmamem_free(sc, &mxm); 4216 4217 return (error); 4218 } 4219 4220 static int 4221 mcx_iff(struct mcx_softc *sc) 4222 { 4223 struct ifnet *ifp = &sc->sc_ec.ec_if; 4224 struct mcx_dmamem mxm; 4225 struct mcx_cmdq_entry *cqe; 4226 struct mcx_cmd_modify_nic_vport_context_in *in; 4227 struct mcx_cmd_modify_nic_vport_context_out *out; 4228 struct mcx_nic_vport_ctx *ctx; 4229 int error; 4230 int token; 4231 int insize; 4232 uint32_t dest; 4233 4234 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE | 4235 sc->sc_rss_flow_table_id; 4236 4237 /* enable or disable the promisc flow */ 4238 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 4239 if (sc->sc_promisc_flow_enabled == 0) { 4240 mcx_set_flow_table_entry_mac(sc, 4241 MCX_FLOW_GROUP_PROMISC, 0, NULL, dest); 4242 sc->sc_promisc_flow_enabled = 1; 4243 } 4244 } else if (sc->sc_promisc_flow_enabled != 0) { 4245 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0); 4246 sc->sc_promisc_flow_enabled = 0; 4247 } 4248 4249 /* enable or disable the all-multicast flow */ 4250 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 4251 if (sc->sc_allmulti_flow_enabled == 0) { 4252 uint8_t mcast[ETHER_ADDR_LEN]; 4253 4254 memset(mcast, 0, sizeof(mcast)); 4255 mcast[0] = 0x01; 4256 mcx_set_flow_table_entry_mac(sc, 4257 MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest); 4258 sc->sc_allmulti_flow_enabled = 1; 4259 } 4260 } else if (sc->sc_allmulti_flow_enabled != 0) { 4261 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0); 4262 sc->sc_allmulti_flow_enabled = 0; 4263 } 4264 4265 insize = sizeof(struct mcx_nic_vport_ctx) + 240; 4266 4267 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4268 token = mcx_cmdq_token(sc); 4269 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); 4270 4271 in = mcx_cmdq_in(cqe); 4272 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT); 4273 in->cmd_op_mod = htobe16(0); 4274 in->cmd_field_select = htobe32( 4275 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC | 4276 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU); 4277 4278 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) { 4279 printf(", unable to allocate modify " 4280 "nic vport context mailboxen\n"); 4281 return (-1); 4282 } 4283 ctx = (struct mcx_nic_vport_ctx *) 4284 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240); 4285 ctx->vp_mtu = htobe32(sc->sc_hardmtu); 4286 /* 4287 * always leave promisc-all enabled on the vport since we 4288 * can't give it a vlan list, and we're already doing multicast 4289 * filtering in the flow table. 4290 */ 4291 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL); 4292 4293 mcx_cmdq_mboxes_sign(&mxm, 1); 4294 mcx_cmdq_post(sc, cqe, 0); 4295 4296 error = mcx_cmdq_poll(sc, cqe, 1000); 4297 if (error != 0) { 4298 printf(", modify nic vport context timeout\n"); 4299 goto free; 4300 } 4301 if (mcx_cmdq_verify(cqe) != 0) { 4302 printf(", modify nic vport context command corrupt\n"); 4303 goto free; 4304 } 4305 4306 out = mcx_cmdq_out(cqe); 4307 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4308 printf(", modify nic vport context failed (%x, %x)\n", 4309 out->cmd_status, be32toh(out->cmd_syndrome)); 4310 error = -1; 4311 goto free; 4312 } 4313 4314 free: 4315 mcx_dmamem_free(sc, &mxm); 4316 4317 return (error); 4318 } 4319 4320 static int 4321 mcx_alloc_uar(struct mcx_softc *sc, int *uar) 4322 { 4323 struct mcx_cmdq_entry *cqe; 4324 struct mcx_cmd_alloc_uar_in *in; 4325 struct mcx_cmd_alloc_uar_out *out; 4326 int error; 4327 4328 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4329 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 4330 4331 in = mcx_cmdq_in(cqe); 4332 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR); 4333 in->cmd_op_mod = htobe16(0); 4334 4335 mcx_cmdq_post(sc, cqe, 0); 4336 4337 error = mcx_cmdq_poll(sc, cqe, 1000); 4338 if (error != 0) { 4339 printf(", alloc uar timeout\n"); 4340 return (-1); 4341 } 4342 if (mcx_cmdq_verify(cqe) != 0) { 4343 printf(", alloc uar command corrupt\n"); 4344 return (-1); 4345 } 4346 4347 out = mcx_cmdq_out(cqe); 4348 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4349 printf(", alloc uar failed (%x)\n", out->cmd_status); 4350 return (-1); 4351 } 4352 4353 *uar = mcx_get_id(out->cmd_uar); 4354 return (0); 4355 } 4356 4357 static int 4358 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar, 4359 uint64_t events, int vector) 4360 { 4361 struct mcx_cmdq_entry *cqe; 4362 struct mcx_dmamem mxm; 4363 struct mcx_cmd_create_eq_in *in; 4364 struct mcx_cmd_create_eq_mb_in *mbin; 4365 struct mcx_cmd_create_eq_out *out; 4366 struct mcx_eq_entry *eqe; 4367 int error; 4368 uint64_t *pas; 4369 int insize, npages, paslen, i, token; 4370 4371 eq->eq_cons = 0; 4372 4373 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry), 4374 MCX_PAGE_SIZE); 4375 paslen = npages * sizeof(*pas); 4376 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen; 4377 4378 if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE, 4379 MCX_PAGE_SIZE) != 0) { 4380 printf(", unable to allocate event queue memory\n"); 4381 return (-1); 4382 } 4383 4384 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem); 4385 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) { 4386 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT; 4387 } 4388 4389 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4390 token = mcx_cmdq_token(sc); 4391 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); 4392 4393 in = mcx_cmdq_in(cqe); 4394 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ); 4395 in->cmd_op_mod = htobe16(0); 4396 4397 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 4398 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE), 4399 &cqe->cq_input_ptr, token) != 0) { 4400 printf(", unable to allocate create eq mailboxen\n"); 4401 goto free_eq; 4402 } 4403 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 4404 mbin->cmd_eq_ctx.eq_uar_size = htobe32( 4405 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar); 4406 mbin->cmd_eq_ctx.eq_intr = vector; 4407 mbin->cmd_event_bitmask = htobe64(events); 4408 4409 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem), 4410 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD); 4411 4412 /* physical addresses follow the mailbox in data */ 4413 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem); 4414 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)); 4415 mcx_cmdq_post(sc, cqe, 0); 4416 4417 error = mcx_cmdq_poll(sc, cqe, 1000); 4418 if (error != 0) { 4419 printf(", create eq timeout\n"); 4420 goto free_mxm; 4421 } 4422 if (mcx_cmdq_verify(cqe) != 0) { 4423 printf(", create eq command corrupt\n"); 4424 goto free_mxm; 4425 } 4426 4427 out = mcx_cmdq_out(cqe); 4428 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4429 printf(", create eq failed (%x, %x)\n", out->cmd_status, 4430 be32toh(out->cmd_syndrome)); 4431 goto free_mxm; 4432 } 4433 4434 eq->eq_n = mcx_get_id(out->cmd_eqn); 4435 4436 mcx_dmamem_free(sc, &mxm); 4437 4438 mcx_arm_eq(sc, eq, uar); 4439 4440 return (0); 4441 4442 free_mxm: 4443 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem), 4444 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD); 4445 mcx_dmamem_free(sc, &mxm); 4446 free_eq: 4447 mcx_dmamem_free(sc, &eq->eq_mem); 4448 return (-1); 4449 } 4450 4451 static int 4452 mcx_alloc_pd(struct mcx_softc *sc) 4453 { 4454 struct mcx_cmdq_entry *cqe; 4455 struct mcx_cmd_alloc_pd_in *in; 4456 struct mcx_cmd_alloc_pd_out *out; 4457 int error; 4458 4459 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4460 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 4461 4462 in = mcx_cmdq_in(cqe); 4463 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD); 4464 in->cmd_op_mod = htobe16(0); 4465 4466 mcx_cmdq_post(sc, cqe, 0); 4467 4468 error = mcx_cmdq_poll(sc, cqe, 1000); 4469 if (error != 0) { 4470 printf(", alloc pd timeout\n"); 4471 return (-1); 4472 } 4473 if (mcx_cmdq_verify(cqe) != 0) { 4474 printf(", alloc pd command corrupt\n"); 4475 return (-1); 4476 } 4477 4478 out = mcx_cmdq_out(cqe); 4479 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4480 printf(", alloc pd failed (%x)\n", out->cmd_status); 4481 return (-1); 4482 } 4483 4484 sc->sc_pd = mcx_get_id(out->cmd_pd); 4485 return (0); 4486 } 4487 4488 static int 4489 mcx_alloc_tdomain(struct mcx_softc *sc) 4490 { 4491 struct mcx_cmdq_entry *cqe; 4492 struct mcx_cmd_alloc_td_in *in; 4493 struct mcx_cmd_alloc_td_out *out; 4494 int error; 4495 4496 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4497 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 4498 4499 in = mcx_cmdq_in(cqe); 4500 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN); 4501 in->cmd_op_mod = htobe16(0); 4502 4503 mcx_cmdq_post(sc, cqe, 0); 4504 4505 error = mcx_cmdq_poll(sc, cqe, 1000); 4506 if (error != 0) { 4507 printf(", alloc transport domain timeout\n"); 4508 return (-1); 4509 } 4510 if (mcx_cmdq_verify(cqe) != 0) { 4511 printf(", alloc transport domain command corrupt\n"); 4512 return (-1); 4513 } 4514 4515 out = mcx_cmdq_out(cqe); 4516 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4517 printf(", alloc transport domain failed (%x)\n", 4518 out->cmd_status); 4519 return (-1); 4520 } 4521 4522 sc->sc_tdomain = mcx_get_id(out->cmd_tdomain); 4523 return (0); 4524 } 4525 4526 static int 4527 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr) 4528 { 4529 struct mcx_dmamem mxm; 4530 struct mcx_cmdq_entry *cqe; 4531 struct mcx_cmd_query_nic_vport_context_in *in; 4532 struct mcx_cmd_query_nic_vport_context_out *out; 4533 struct mcx_nic_vport_ctx *ctx; 4534 uint8_t *addr; 4535 int error, token, i; 4536 4537 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4538 token = mcx_cmdq_token(sc); 4539 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token); 4540 4541 in = mcx_cmdq_in(cqe); 4542 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT); 4543 in->cmd_op_mod = htobe16(0); 4544 in->cmd_allowed_list_type = 0; 4545 4546 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 4547 &cqe->cq_output_ptr, token) != 0) { 4548 printf(", unable to allocate " 4549 "query nic vport context mailboxen\n"); 4550 return (-1); 4551 } 4552 mcx_cmdq_mboxes_sign(&mxm, 1); 4553 mcx_cmdq_post(sc, cqe, 0); 4554 4555 error = mcx_cmdq_poll(sc, cqe, 1000); 4556 if (error != 0) { 4557 printf(", query nic vport context timeout\n"); 4558 goto free; 4559 } 4560 if (mcx_cmdq_verify(cqe) != 0) { 4561 printf(", query nic vport context command corrupt\n"); 4562 goto free; 4563 } 4564 4565 out = mcx_cmdq_out(cqe); 4566 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4567 printf(", query nic vport context failed (%x, %x)\n", 4568 out->cmd_status, be32toh(out->cmd_syndrome)); 4569 error = -1; 4570 goto free; 4571 } 4572 4573 ctx = (struct mcx_nic_vport_ctx *) 4574 mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 4575 addr = (uint8_t *)&ctx->vp_perm_addr; 4576 for (i = 0; i < ETHER_ADDR_LEN; i++) { 4577 enaddr[i] = addr[i + 2]; 4578 } 4579 free: 4580 mcx_dmamem_free(sc, &mxm); 4581 4582 return (error); 4583 } 4584 4585 static int 4586 mcx_query_special_contexts(struct mcx_softc *sc) 4587 { 4588 struct mcx_cmdq_entry *cqe; 4589 struct mcx_cmd_query_special_ctx_in *in; 4590 struct mcx_cmd_query_special_ctx_out *out; 4591 int error; 4592 4593 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4594 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 4595 4596 in = mcx_cmdq_in(cqe); 4597 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS); 4598 in->cmd_op_mod = htobe16(0); 4599 4600 mcx_cmdq_post(sc, cqe, 0); 4601 4602 error = mcx_cmdq_poll(sc, cqe, 1000); 4603 if (error != 0) { 4604 printf(", query special contexts timeout\n"); 4605 return (-1); 4606 } 4607 if (mcx_cmdq_verify(cqe) != 0) { 4608 printf(", query special contexts command corrupt\n"); 4609 return (-1); 4610 } 4611 4612 out = mcx_cmdq_out(cqe); 4613 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4614 printf(", query special contexts failed (%x)\n", 4615 out->cmd_status); 4616 return (-1); 4617 } 4618 4619 sc->sc_lkey = be32toh(out->cmd_resd_lkey); 4620 return (0); 4621 } 4622 4623 static int 4624 mcx_set_port_mtu(struct mcx_softc *sc, int mtu) 4625 { 4626 struct mcx_reg_pmtu pmtu; 4627 int error; 4628 4629 /* read max mtu */ 4630 memset(&pmtu, 0, sizeof(pmtu)); 4631 pmtu.rp_local_port = 1; 4632 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu, 4633 sizeof(pmtu)); 4634 if (error != 0) { 4635 printf(", unable to get port MTU\n"); 4636 return error; 4637 } 4638 4639 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu)); 4640 pmtu.rp_admin_mtu = htobe16(mtu); 4641 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu, 4642 sizeof(pmtu)); 4643 if (error != 0) { 4644 printf(", unable to set port MTU\n"); 4645 return error; 4646 } 4647 4648 sc->sc_hardmtu = mtu; 4649 sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long)); 4650 return 0; 4651 } 4652 4653 static int 4654 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn) 4655 { 4656 struct mcx_cmdq_entry *cmde; 4657 struct mcx_cq_entry *cqe; 4658 struct mcx_dmamem mxm; 4659 struct mcx_cmd_create_cq_in *in; 4660 struct mcx_cmd_create_cq_mb_in *mbin; 4661 struct mcx_cmd_create_cq_out *out; 4662 int error; 4663 uint64_t *pas; 4664 int insize, npages, paslen, i, token; 4665 4666 cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db); 4667 4668 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry), 4669 MCX_PAGE_SIZE); 4670 paslen = npages * sizeof(*pas); 4671 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen; 4672 4673 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE, 4674 MCX_PAGE_SIZE) != 0) { 4675 printf("%s: unable to allocate completion queue memory\n", 4676 DEVNAME(sc)); 4677 return (-1); 4678 } 4679 cqe = MCX_DMA_KVA(&cq->cq_mem); 4680 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) { 4681 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER; 4682 } 4683 4684 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4685 token = mcx_cmdq_token(sc); 4686 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token); 4687 4688 in = mcx_cmdq_in(cmde); 4689 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ); 4690 in->cmd_op_mod = htobe16(0); 4691 4692 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 4693 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE), 4694 &cmde->cq_input_ptr, token) != 0) { 4695 printf("%s: unable to allocate create cq mailboxen\n", 4696 DEVNAME(sc)); 4697 goto free_cq; 4698 } 4699 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 4700 mbin->cmd_cq_ctx.cq_uar_size = htobe32( 4701 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar); 4702 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn); 4703 mbin->cmd_cq_ctx.cq_period_max_count = htobe32( 4704 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) | 4705 MCX_CQ_MOD_COUNTER); 4706 mbin->cmd_cq_ctx.cq_doorbell = htobe64( 4707 MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell); 4708 4709 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem), 4710 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD); 4711 4712 /* physical addresses follow the mailbox in data */ 4713 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem); 4714 mcx_cmdq_post(sc, cmde, 0); 4715 4716 error = mcx_cmdq_poll(sc, cmde, 1000); 4717 if (error != 0) { 4718 printf("%s: create cq timeout\n", DEVNAME(sc)); 4719 goto free_mxm; 4720 } 4721 if (mcx_cmdq_verify(cmde) != 0) { 4722 printf("%s: create cq command corrupt\n", DEVNAME(sc)); 4723 goto free_mxm; 4724 } 4725 4726 out = mcx_cmdq_out(cmde); 4727 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4728 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc), 4729 out->cmd_status, be32toh(out->cmd_syndrome)); 4730 goto free_mxm; 4731 } 4732 4733 cq->cq_n = mcx_get_id(out->cmd_cqn); 4734 cq->cq_cons = 0; 4735 cq->cq_count = 0; 4736 4737 mcx_dmamem_free(sc, &mxm); 4738 4739 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 4740 cq->cq_doorbell, sizeof(struct mcx_cq_doorbell), 4741 BUS_DMASYNC_PREWRITE); 4742 4743 mcx_arm_cq(sc, cq, uar); 4744 4745 return (0); 4746 4747 free_mxm: 4748 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem), 4749 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD); 4750 mcx_dmamem_free(sc, &mxm); 4751 free_cq: 4752 mcx_dmamem_free(sc, &cq->cq_mem); 4753 return (-1); 4754 } 4755 4756 static int 4757 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq) 4758 { 4759 struct mcx_cmdq_entry *cqe; 4760 struct mcx_cmd_destroy_cq_in *in; 4761 struct mcx_cmd_destroy_cq_out *out; 4762 int error; 4763 int token; 4764 4765 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4766 token = mcx_cmdq_token(sc); 4767 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); 4768 4769 in = mcx_cmdq_in(cqe); 4770 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ); 4771 in->cmd_op_mod = htobe16(0); 4772 in->cmd_cqn = htobe32(cq->cq_n); 4773 4774 mcx_cmdq_post(sc, cqe, 0); 4775 error = mcx_cmdq_poll(sc, cqe, 1000); 4776 if (error != 0) { 4777 printf("%s: destroy cq timeout\n", DEVNAME(sc)); 4778 return error; 4779 } 4780 if (mcx_cmdq_verify(cqe) != 0) { 4781 printf("%s: destroy cq command corrupt\n", DEVNAME(sc)); 4782 return error; 4783 } 4784 4785 out = mcx_cmdq_out(cqe); 4786 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4787 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc), 4788 out->cmd_status, be32toh(out->cmd_syndrome)); 4789 return -1; 4790 } 4791 4792 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 4793 cq->cq_doorbell, sizeof(struct mcx_cq_doorbell), 4794 BUS_DMASYNC_POSTWRITE); 4795 4796 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem), 4797 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD); 4798 mcx_dmamem_free(sc, &cq->cq_mem); 4799 4800 cq->cq_n = 0; 4801 cq->cq_cons = 0; 4802 cq->cq_count = 0; 4803 return 0; 4804 } 4805 4806 static int 4807 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn) 4808 { 4809 struct mcx_cmdq_entry *cqe; 4810 struct mcx_dmamem mxm; 4811 struct mcx_cmd_create_rq_in *in; 4812 struct mcx_cmd_create_rq_out *out; 4813 struct mcx_rq_ctx *mbin; 4814 int error; 4815 uint64_t *pas; 4816 uint32_t rq_flags; 4817 int insize, npages, paslen, token; 4818 4819 rx->rx_doorbell = MCX_WQ_DOORBELL_BASE + 4820 (db * MCX_WQ_DOORBELL_STRIDE); 4821 4822 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry), 4823 MCX_PAGE_SIZE); 4824 paslen = npages * sizeof(*pas); 4825 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen; 4826 4827 if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE, 4828 MCX_PAGE_SIZE) != 0) { 4829 printf("%s: unable to allocate receive queue memory\n", 4830 DEVNAME(sc)); 4831 return (-1); 4832 } 4833 4834 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4835 token = mcx_cmdq_token(sc); 4836 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); 4837 4838 in = mcx_cmdq_in(cqe); 4839 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ); 4840 in->cmd_op_mod = htobe16(0); 4841 4842 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 4843 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE), 4844 &cqe->cq_input_ptr, token) != 0) { 4845 printf("%s: unable to allocate create rq mailboxen\n", 4846 DEVNAME(sc)); 4847 goto free_rq; 4848 } 4849 mbin = (struct mcx_rq_ctx *) 4850 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10); 4851 rq_flags = MCX_RQ_CTX_RLKEY; 4852 mbin->rq_flags = htobe32(rq_flags); 4853 mbin->rq_cqn = htobe32(cqn); 4854 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC; 4855 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd); 4856 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) + 4857 rx->rx_doorbell); 4858 mbin->rq_wq.wq_log_stride = htobe16(4); 4859 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE; 4860 4861 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem), 4862 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE); 4863 4864 /* physical addresses follow the mailbox in data */ 4865 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem); 4866 mcx_cmdq_post(sc, cqe, 0); 4867 4868 error = mcx_cmdq_poll(sc, cqe, 1000); 4869 if (error != 0) { 4870 printf("%s: create rq timeout\n", DEVNAME(sc)); 4871 goto free_mxm; 4872 } 4873 if (mcx_cmdq_verify(cqe) != 0) { 4874 printf("%s: create rq command corrupt\n", DEVNAME(sc)); 4875 goto free_mxm; 4876 } 4877 4878 out = mcx_cmdq_out(cqe); 4879 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4880 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc), 4881 out->cmd_status, be32toh(out->cmd_syndrome)); 4882 goto free_mxm; 4883 } 4884 4885 rx->rx_rqn = mcx_get_id(out->cmd_rqn); 4886 4887 mcx_dmamem_free(sc, &mxm); 4888 4889 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 4890 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE); 4891 4892 return (0); 4893 4894 free_mxm: 4895 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem), 4896 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE); 4897 mcx_dmamem_free(sc, &mxm); 4898 free_rq: 4899 mcx_dmamem_free(sc, &rx->rx_rq_mem); 4900 return (-1); 4901 } 4902 4903 static int 4904 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx) 4905 { 4906 struct mcx_cmdq_entry *cqe; 4907 struct mcx_dmamem mxm; 4908 struct mcx_cmd_modify_rq_in *in; 4909 struct mcx_cmd_modify_rq_mb_in *mbin; 4910 struct mcx_cmd_modify_rq_out *out; 4911 int error; 4912 int token; 4913 4914 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4915 token = mcx_cmdq_token(sc); 4916 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 4917 sizeof(*out), token); 4918 4919 in = mcx_cmdq_in(cqe); 4920 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ); 4921 in->cmd_op_mod = htobe16(0); 4922 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn); 4923 4924 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 4925 &cqe->cq_input_ptr, token) != 0) { 4926 printf("%s: unable to allocate modify rq mailbox\n", 4927 DEVNAME(sc)); 4928 return (-1); 4929 } 4930 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 4931 mbin->cmd_rq_ctx.rq_flags = htobe32( 4932 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT); 4933 4934 mcx_cmdq_mboxes_sign(&mxm, 1); 4935 mcx_cmdq_post(sc, cqe, 0); 4936 error = mcx_cmdq_poll(sc, cqe, 1000); 4937 if (error != 0) { 4938 printf("%s: modify rq timeout\n", DEVNAME(sc)); 4939 goto free; 4940 } 4941 if (mcx_cmdq_verify(cqe) != 0) { 4942 printf("%s: modify rq command corrupt\n", DEVNAME(sc)); 4943 goto free; 4944 } 4945 4946 out = mcx_cmdq_out(cqe); 4947 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4948 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc), 4949 out->cmd_status, be32toh(out->cmd_syndrome)); 4950 error = -1; 4951 goto free; 4952 } 4953 4954 free: 4955 mcx_dmamem_free(sc, &mxm); 4956 return (error); 4957 } 4958 4959 static int 4960 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx) 4961 { 4962 struct mcx_cmdq_entry *cqe; 4963 struct mcx_cmd_destroy_rq_in *in; 4964 struct mcx_cmd_destroy_rq_out *out; 4965 int error; 4966 int token; 4967 4968 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 4969 token = mcx_cmdq_token(sc); 4970 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); 4971 4972 in = mcx_cmdq_in(cqe); 4973 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ); 4974 in->cmd_op_mod = htobe16(0); 4975 in->cmd_rqn = htobe32(rx->rx_rqn); 4976 4977 mcx_cmdq_post(sc, cqe, 0); 4978 error = mcx_cmdq_poll(sc, cqe, 1000); 4979 if (error != 0) { 4980 printf("%s: destroy rq timeout\n", DEVNAME(sc)); 4981 return error; 4982 } 4983 if (mcx_cmdq_verify(cqe) != 0) { 4984 printf("%s: destroy rq command corrupt\n", DEVNAME(sc)); 4985 return error; 4986 } 4987 4988 out = mcx_cmdq_out(cqe); 4989 if (out->cmd_status != MCX_CQ_STATUS_OK) { 4990 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc), 4991 out->cmd_status, be32toh(out->cmd_syndrome)); 4992 return -1; 4993 } 4994 4995 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 4996 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE); 4997 4998 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem), 4999 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE); 5000 mcx_dmamem_free(sc, &rx->rx_rq_mem); 5001 5002 rx->rx_rqn = 0; 5003 return 0; 5004 } 5005 5006 static int 5007 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn) 5008 { 5009 struct mcx_cmdq_entry *cqe; 5010 struct mcx_dmamem mxm; 5011 struct mcx_cmd_create_tir_in *in; 5012 struct mcx_cmd_create_tir_mb_in *mbin; 5013 struct mcx_cmd_create_tir_out *out; 5014 int error; 5015 int token; 5016 5017 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5018 token = mcx_cmdq_token(sc); 5019 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 5020 sizeof(*out), token); 5021 5022 in = mcx_cmdq_in(cqe); 5023 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR); 5024 in->cmd_op_mod = htobe16(0); 5025 5026 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5027 &cqe->cq_input_ptr, token) != 0) { 5028 printf("%s: unable to allocate create tir mailbox\n", 5029 DEVNAME(sc)); 5030 return (-1); 5031 } 5032 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5033 /* leave disp_type = 0, so packets get sent to the inline rqn */ 5034 mbin->cmd_inline_rqn = htobe32(rx->rx_rqn); 5035 mbin->cmd_tdomain = htobe32(sc->sc_tdomain); 5036 5037 mcx_cmdq_post(sc, cqe, 0); 5038 error = mcx_cmdq_poll(sc, cqe, 1000); 5039 if (error != 0) { 5040 printf("%s: create tir timeout\n", DEVNAME(sc)); 5041 goto free; 5042 } 5043 if (mcx_cmdq_verify(cqe) != 0) { 5044 printf("%s: create tir command corrupt\n", DEVNAME(sc)); 5045 goto free; 5046 } 5047 5048 out = mcx_cmdq_out(cqe); 5049 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5050 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc), 5051 out->cmd_status, be32toh(out->cmd_syndrome)); 5052 error = -1; 5053 goto free; 5054 } 5055 5056 *tirn = mcx_get_id(out->cmd_tirn); 5057 free: 5058 mcx_dmamem_free(sc, &mxm); 5059 return (error); 5060 } 5061 5062 static int 5063 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel, 5064 int *tirn) 5065 { 5066 struct mcx_cmdq_entry *cqe; 5067 struct mcx_dmamem mxm; 5068 struct mcx_cmd_create_tir_in *in; 5069 struct mcx_cmd_create_tir_mb_in *mbin; 5070 struct mcx_cmd_create_tir_out *out; 5071 int error; 5072 int token; 5073 5074 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5075 token = mcx_cmdq_token(sc); 5076 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 5077 sizeof(*out), token); 5078 5079 in = mcx_cmdq_in(cqe); 5080 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR); 5081 in->cmd_op_mod = htobe16(0); 5082 5083 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5084 &cqe->cq_input_ptr, token) != 0) { 5085 printf("%s: unable to allocate create tir mailbox\n", 5086 DEVNAME(sc)); 5087 return (-1); 5088 } 5089 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5090 mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT 5091 << MCX_TIR_CTX_DISP_TYPE_SHIFT); 5092 mbin->cmd_indir_table = htobe32(rqtn); 5093 mbin->cmd_tdomain = htobe32(sc->sc_tdomain | 5094 MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT); 5095 mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel); 5096 stoeplitz_to_key(&mbin->cmd_rx_hash_key, 5097 sizeof(mbin->cmd_rx_hash_key)); 5098 5099 mcx_cmdq_post(sc, cqe, 0); 5100 error = mcx_cmdq_poll(sc, cqe, 1000); 5101 if (error != 0) { 5102 printf("%s: create tir timeout\n", DEVNAME(sc)); 5103 goto free; 5104 } 5105 if (mcx_cmdq_verify(cqe) != 0) { 5106 printf("%s: create tir command corrupt\n", DEVNAME(sc)); 5107 goto free; 5108 } 5109 5110 out = mcx_cmdq_out(cqe); 5111 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5112 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc), 5113 out->cmd_status, be32toh(out->cmd_syndrome)); 5114 error = -1; 5115 goto free; 5116 } 5117 5118 *tirn = mcx_get_id(out->cmd_tirn); 5119 free: 5120 mcx_dmamem_free(sc, &mxm); 5121 return (error); 5122 } 5123 5124 static int 5125 mcx_destroy_tir(struct mcx_softc *sc, int tirn) 5126 { 5127 struct mcx_cmdq_entry *cqe; 5128 struct mcx_cmd_destroy_tir_in *in; 5129 struct mcx_cmd_destroy_tir_out *out; 5130 int error; 5131 int token; 5132 5133 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5134 token = mcx_cmdq_token(sc); 5135 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); 5136 5137 in = mcx_cmdq_in(cqe); 5138 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR); 5139 in->cmd_op_mod = htobe16(0); 5140 in->cmd_tirn = htobe32(tirn); 5141 5142 mcx_cmdq_post(sc, cqe, 0); 5143 error = mcx_cmdq_poll(sc, cqe, 1000); 5144 if (error != 0) { 5145 printf("%s: destroy tir timeout\n", DEVNAME(sc)); 5146 return error; 5147 } 5148 if (mcx_cmdq_verify(cqe) != 0) { 5149 printf("%s: destroy tir command corrupt\n", DEVNAME(sc)); 5150 return error; 5151 } 5152 5153 out = mcx_cmdq_out(cqe); 5154 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5155 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc), 5156 out->cmd_status, be32toh(out->cmd_syndrome)); 5157 return -1; 5158 } 5159 5160 return (0); 5161 } 5162 5163 static int 5164 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db, 5165 int cqn) 5166 { 5167 struct mcx_cmdq_entry *cqe; 5168 struct mcx_dmamem mxm; 5169 struct mcx_cmd_create_sq_in *in; 5170 struct mcx_sq_ctx *mbin; 5171 struct mcx_cmd_create_sq_out *out; 5172 int error; 5173 uint64_t *pas; 5174 int insize, npages, paslen, token; 5175 5176 tx->tx_doorbell = MCX_WQ_DOORBELL_BASE + 5177 (db * MCX_WQ_DOORBELL_STRIDE) + 4; 5178 5179 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry), 5180 MCX_PAGE_SIZE); 5181 paslen = npages * sizeof(*pas); 5182 insize = sizeof(struct mcx_sq_ctx) + paslen; 5183 5184 if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE, 5185 MCX_PAGE_SIZE) != 0) { 5186 printf("%s: unable to allocate send queue memory\n", 5187 DEVNAME(sc)); 5188 return (-1); 5189 } 5190 5191 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5192 token = mcx_cmdq_token(sc); 5193 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out), 5194 token); 5195 5196 in = mcx_cmdq_in(cqe); 5197 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ); 5198 in->cmd_op_mod = htobe16(0); 5199 5200 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 5201 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE), 5202 &cqe->cq_input_ptr, token) != 0) { 5203 printf("%s: unable to allocate create sq mailboxen\n", 5204 DEVNAME(sc)); 5205 goto free_sq; 5206 } 5207 mbin = (struct mcx_sq_ctx *) 5208 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10); 5209 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY | 5210 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT)); 5211 mbin->sq_cqn = htobe32(cqn); 5212 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT); 5213 mbin->sq_tis_num = htobe32(sc->sc_tis); 5214 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC; 5215 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd); 5216 mbin->sq_wq.wq_uar_page = htobe32(uar); 5217 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) + 5218 tx->tx_doorbell); 5219 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE); 5220 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE; 5221 5222 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem), 5223 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE); 5224 5225 /* physical addresses follow the mailbox in data */ 5226 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, 5227 npages, &tx->tx_sq_mem); 5228 mcx_cmdq_post(sc, cqe, 0); 5229 5230 error = mcx_cmdq_poll(sc, cqe, 1000); 5231 if (error != 0) { 5232 printf("%s: create sq timeout\n", DEVNAME(sc)); 5233 goto free_mxm; 5234 } 5235 if (mcx_cmdq_verify(cqe) != 0) { 5236 printf("%s: create sq command corrupt\n", DEVNAME(sc)); 5237 goto free_mxm; 5238 } 5239 5240 out = mcx_cmdq_out(cqe); 5241 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5242 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc), 5243 out->cmd_status, be32toh(out->cmd_syndrome)); 5244 goto free_mxm; 5245 } 5246 5247 tx->tx_uar = uar; 5248 tx->tx_sqn = mcx_get_id(out->cmd_sqn); 5249 5250 mcx_dmamem_free(sc, &mxm); 5251 5252 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 5253 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE); 5254 5255 return (0); 5256 5257 free_mxm: 5258 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem), 5259 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE); 5260 mcx_dmamem_free(sc, &mxm); 5261 free_sq: 5262 mcx_dmamem_free(sc, &tx->tx_sq_mem); 5263 return (-1); 5264 } 5265 5266 static int 5267 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx) 5268 { 5269 struct mcx_cmdq_entry *cqe; 5270 struct mcx_cmd_destroy_sq_in *in; 5271 struct mcx_cmd_destroy_sq_out *out; 5272 int error; 5273 int token; 5274 5275 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5276 token = mcx_cmdq_token(sc); 5277 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); 5278 5279 in = mcx_cmdq_in(cqe); 5280 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ); 5281 in->cmd_op_mod = htobe16(0); 5282 in->cmd_sqn = htobe32(tx->tx_sqn); 5283 5284 mcx_cmdq_post(sc, cqe, 0); 5285 error = mcx_cmdq_poll(sc, cqe, 1000); 5286 if (error != 0) { 5287 printf("%s: destroy sq timeout\n", DEVNAME(sc)); 5288 return error; 5289 } 5290 if (mcx_cmdq_verify(cqe) != 0) { 5291 printf("%s: destroy sq command corrupt\n", DEVNAME(sc)); 5292 return error; 5293 } 5294 5295 out = mcx_cmdq_out(cqe); 5296 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5297 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc), 5298 out->cmd_status, be32toh(out->cmd_syndrome)); 5299 return -1; 5300 } 5301 5302 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 5303 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE); 5304 5305 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem), 5306 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE); 5307 mcx_dmamem_free(sc, &tx->tx_sq_mem); 5308 5309 tx->tx_sqn = 0; 5310 return 0; 5311 } 5312 5313 static int 5314 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx) 5315 { 5316 struct mcx_cmdq_entry *cqe; 5317 struct mcx_dmamem mxm; 5318 struct mcx_cmd_modify_sq_in *in; 5319 struct mcx_cmd_modify_sq_mb_in *mbin; 5320 struct mcx_cmd_modify_sq_out *out; 5321 int error; 5322 int token; 5323 5324 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5325 token = mcx_cmdq_token(sc); 5326 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 5327 sizeof(*out), token); 5328 5329 in = mcx_cmdq_in(cqe); 5330 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ); 5331 in->cmd_op_mod = htobe16(0); 5332 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn); 5333 5334 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5335 &cqe->cq_input_ptr, token) != 0) { 5336 printf("%s: unable to allocate modify sq mailbox\n", 5337 DEVNAME(sc)); 5338 return (-1); 5339 } 5340 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5341 mbin->cmd_sq_ctx.sq_flags = htobe32( 5342 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT); 5343 5344 mcx_cmdq_mboxes_sign(&mxm, 1); 5345 mcx_cmdq_post(sc, cqe, 0); 5346 error = mcx_cmdq_poll(sc, cqe, 1000); 5347 if (error != 0) { 5348 printf("%s: modify sq timeout\n", DEVNAME(sc)); 5349 goto free; 5350 } 5351 if (mcx_cmdq_verify(cqe) != 0) { 5352 printf("%s: modify sq command corrupt\n", DEVNAME(sc)); 5353 goto free; 5354 } 5355 5356 out = mcx_cmdq_out(cqe); 5357 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5358 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc), 5359 out->cmd_status, be32toh(out->cmd_syndrome)); 5360 error = -1; 5361 goto free; 5362 } 5363 5364 free: 5365 mcx_dmamem_free(sc, &mxm); 5366 return (error); 5367 } 5368 5369 static int 5370 mcx_create_tis(struct mcx_softc *sc, int *tis) 5371 { 5372 struct mcx_cmdq_entry *cqe; 5373 struct mcx_dmamem mxm; 5374 struct mcx_cmd_create_tis_in *in; 5375 struct mcx_cmd_create_tis_mb_in *mbin; 5376 struct mcx_cmd_create_tis_out *out; 5377 int error; 5378 int token; 5379 5380 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5381 token = mcx_cmdq_token(sc); 5382 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 5383 sizeof(*out), token); 5384 5385 in = mcx_cmdq_in(cqe); 5386 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS); 5387 in->cmd_op_mod = htobe16(0); 5388 5389 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5390 &cqe->cq_input_ptr, token) != 0) { 5391 printf("%s: unable to allocate create tis mailbox\n", 5392 DEVNAME(sc)); 5393 return (-1); 5394 } 5395 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5396 mbin->cmd_tdomain = htobe32(sc->sc_tdomain); 5397 5398 mcx_cmdq_mboxes_sign(&mxm, 1); 5399 mcx_cmdq_post(sc, cqe, 0); 5400 error = mcx_cmdq_poll(sc, cqe, 1000); 5401 if (error != 0) { 5402 printf("%s: create tis timeout\n", DEVNAME(sc)); 5403 goto free; 5404 } 5405 if (mcx_cmdq_verify(cqe) != 0) { 5406 printf("%s: create tis command corrupt\n", DEVNAME(sc)); 5407 goto free; 5408 } 5409 5410 out = mcx_cmdq_out(cqe); 5411 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5412 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc), 5413 out->cmd_status, be32toh(out->cmd_syndrome)); 5414 error = -1; 5415 goto free; 5416 } 5417 5418 *tis = mcx_get_id(out->cmd_tisn); 5419 free: 5420 mcx_dmamem_free(sc, &mxm); 5421 return (error); 5422 } 5423 5424 static int 5425 mcx_destroy_tis(struct mcx_softc *sc, int tis) 5426 { 5427 struct mcx_cmdq_entry *cqe; 5428 struct mcx_cmd_destroy_tis_in *in; 5429 struct mcx_cmd_destroy_tis_out *out; 5430 int error; 5431 int token; 5432 5433 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5434 token = mcx_cmdq_token(sc); 5435 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); 5436 5437 in = mcx_cmdq_in(cqe); 5438 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS); 5439 in->cmd_op_mod = htobe16(0); 5440 in->cmd_tisn = htobe32(tis); 5441 5442 mcx_cmdq_post(sc, cqe, 0); 5443 error = mcx_cmdq_poll(sc, cqe, 1000); 5444 if (error != 0) { 5445 printf("%s: destroy tis timeout\n", DEVNAME(sc)); 5446 return error; 5447 } 5448 if (mcx_cmdq_verify(cqe) != 0) { 5449 printf("%s: destroy tis command corrupt\n", DEVNAME(sc)); 5450 return error; 5451 } 5452 5453 out = mcx_cmdq_out(cqe); 5454 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5455 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc), 5456 out->cmd_status, be32toh(out->cmd_syndrome)); 5457 return -1; 5458 } 5459 5460 return 0; 5461 } 5462 5463 static int 5464 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt) 5465 { 5466 struct mcx_cmdq_entry *cqe; 5467 struct mcx_dmamem mxm; 5468 struct mcx_cmd_create_rqt_in *in; 5469 struct mcx_cmd_create_rqt_mb_in *mbin; 5470 struct mcx_cmd_create_rqt_out *out; 5471 struct mcx_rqt_ctx *rqt_ctx; 5472 int *rqtn; 5473 int error; 5474 int token; 5475 int i; 5476 5477 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5478 token = mcx_cmdq_token(sc); 5479 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + 5480 (size * sizeof(int)), sizeof(*out), token); 5481 5482 in = mcx_cmdq_in(cqe); 5483 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT); 5484 in->cmd_op_mod = htobe16(0); 5485 5486 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5487 &cqe->cq_input_ptr, token) != 0) { 5488 printf("%s: unable to allocate create rqt mailbox\n", 5489 DEVNAME(sc)); 5490 return (-1); 5491 } 5492 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5493 rqt_ctx = &mbin->cmd_rqt; 5494 rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size); 5495 rqt_ctx->cmd_rqt_actual_size = htobe16(size); 5496 5497 /* rqt list follows the rqt context */ 5498 rqtn = (int *)(rqt_ctx + 1); 5499 for (i = 0; i < size; i++) { 5500 rqtn[i] = htobe32(rqns[i]); 5501 } 5502 5503 mcx_cmdq_mboxes_sign(&mxm, 1); 5504 mcx_cmdq_post(sc, cqe, 0); 5505 error = mcx_cmdq_poll(sc, cqe, 1000); 5506 if (error != 0) { 5507 printf("%s: create rqt timeout\n", DEVNAME(sc)); 5508 goto free; 5509 } 5510 if (mcx_cmdq_verify(cqe) != 0) { 5511 printf("%s: create rqt command corrupt\n", DEVNAME(sc)); 5512 goto free; 5513 } 5514 5515 out = mcx_cmdq_out(cqe); 5516 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5517 printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc), 5518 out->cmd_status, be32toh(out->cmd_syndrome)); 5519 error = -1; 5520 goto free; 5521 } 5522 5523 *rqt = mcx_get_id(out->cmd_rqtn); 5524 return (0); 5525 free: 5526 mcx_dmamem_free(sc, &mxm); 5527 return (error); 5528 } 5529 5530 static int 5531 mcx_destroy_rqt(struct mcx_softc *sc, int rqt) 5532 { 5533 struct mcx_cmdq_entry *cqe; 5534 struct mcx_cmd_destroy_rqt_in *in; 5535 struct mcx_cmd_destroy_rqt_out *out; 5536 int error; 5537 int token; 5538 5539 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5540 token = mcx_cmdq_token(sc); 5541 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); 5542 5543 in = mcx_cmdq_in(cqe); 5544 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT); 5545 in->cmd_op_mod = htobe16(0); 5546 in->cmd_rqtn = htobe32(rqt); 5547 5548 mcx_cmdq_post(sc, cqe, 0); 5549 error = mcx_cmdq_poll(sc, cqe, 1000); 5550 if (error != 0) { 5551 printf("%s: destroy rqt timeout\n", DEVNAME(sc)); 5552 return error; 5553 } 5554 if (mcx_cmdq_verify(cqe) != 0) { 5555 printf("%s: destroy rqt command corrupt\n", DEVNAME(sc)); 5556 return error; 5557 } 5558 5559 out = mcx_cmdq_out(cqe); 5560 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5561 printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc), 5562 out->cmd_status, be32toh(out->cmd_syndrome)); 5563 return -1; 5564 } 5565 5566 return 0; 5567 } 5568 5569 #if 0 5570 static int 5571 mcx_alloc_flow_counter(struct mcx_softc *sc, int i) 5572 { 5573 struct mcx_cmdq_entry *cqe; 5574 struct mcx_cmd_alloc_flow_counter_in *in; 5575 struct mcx_cmd_alloc_flow_counter_out *out; 5576 int error; 5577 5578 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5579 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); 5580 5581 in = mcx_cmdq_in(cqe); 5582 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER); 5583 in->cmd_op_mod = htobe16(0); 5584 5585 mcx_cmdq_post(sc, cqe, 0); 5586 5587 error = mcx_cmdq_poll(sc, cqe, 1000); 5588 if (error != 0) { 5589 printf("%s: alloc flow counter timeout\n", DEVNAME(sc)); 5590 return (-1); 5591 } 5592 if (mcx_cmdq_verify(cqe) != 0) { 5593 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc)); 5594 return (-1); 5595 } 5596 5597 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data; 5598 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5599 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc), 5600 out->cmd_status); 5601 return (-1); 5602 } 5603 5604 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id); 5605 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]); 5606 5607 return (0); 5608 } 5609 #endif 5610 5611 static int 5612 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level, 5613 int *flow_table_id) 5614 { 5615 struct mcx_cmdq_entry *cqe; 5616 struct mcx_dmamem mxm; 5617 struct mcx_cmd_create_flow_table_in *in; 5618 struct mcx_cmd_create_flow_table_mb_in *mbin; 5619 struct mcx_cmd_create_flow_table_out *out; 5620 int error; 5621 int token; 5622 5623 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5624 token = mcx_cmdq_token(sc); 5625 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 5626 sizeof(*out), token); 5627 5628 in = mcx_cmdq_in(cqe); 5629 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE); 5630 in->cmd_op_mod = htobe16(0); 5631 5632 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5633 &cqe->cq_input_ptr, token) != 0) { 5634 printf("%s: unable to allocate create flow table mailbox\n", 5635 DEVNAME(sc)); 5636 return (-1); 5637 } 5638 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5639 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 5640 mbin->cmd_ctx.ft_log_size = log_size; 5641 mbin->cmd_ctx.ft_level = level; 5642 5643 mcx_cmdq_mboxes_sign(&mxm, 1); 5644 mcx_cmdq_post(sc, cqe, 0); 5645 error = mcx_cmdq_poll(sc, cqe, 1000); 5646 if (error != 0) { 5647 printf("%s: create flow table timeout\n", DEVNAME(sc)); 5648 goto free; 5649 } 5650 if (mcx_cmdq_verify(cqe) != 0) { 5651 printf("%s: create flow table command corrupt\n", DEVNAME(sc)); 5652 goto free; 5653 } 5654 5655 out = mcx_cmdq_out(cqe); 5656 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5657 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc), 5658 out->cmd_status, be32toh(out->cmd_syndrome)); 5659 error = -1; 5660 goto free; 5661 } 5662 5663 *flow_table_id = mcx_get_id(out->cmd_table_id); 5664 free: 5665 mcx_dmamem_free(sc, &mxm); 5666 return (error); 5667 } 5668 5669 static int 5670 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id) 5671 { 5672 struct mcx_cmdq_entry *cqe; 5673 struct mcx_dmamem mxm; 5674 struct mcx_cmd_set_flow_table_root_in *in; 5675 struct mcx_cmd_set_flow_table_root_mb_in *mbin; 5676 struct mcx_cmd_set_flow_table_root_out *out; 5677 int error; 5678 int token; 5679 5680 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5681 token = mcx_cmdq_token(sc); 5682 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 5683 sizeof(*out), token); 5684 5685 in = mcx_cmdq_in(cqe); 5686 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT); 5687 in->cmd_op_mod = htobe16(0); 5688 5689 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5690 &cqe->cq_input_ptr, token) != 0) { 5691 printf("%s: unable to allocate set flow table root mailbox\n", 5692 DEVNAME(sc)); 5693 return (-1); 5694 } 5695 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5696 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 5697 mbin->cmd_table_id = htobe32(flow_table_id); 5698 5699 mcx_cmdq_mboxes_sign(&mxm, 1); 5700 mcx_cmdq_post(sc, cqe, 0); 5701 error = mcx_cmdq_poll(sc, cqe, 1000); 5702 if (error != 0) { 5703 printf("%s: set flow table root timeout\n", DEVNAME(sc)); 5704 goto free; 5705 } 5706 if (mcx_cmdq_verify(cqe) != 0) { 5707 printf("%s: set flow table root command corrupt\n", 5708 DEVNAME(sc)); 5709 goto free; 5710 } 5711 5712 out = mcx_cmdq_out(cqe); 5713 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5714 printf("%s: set flow table root failed (%x, %x)\n", 5715 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome)); 5716 error = -1; 5717 goto free; 5718 } 5719 5720 free: 5721 mcx_dmamem_free(sc, &mxm); 5722 return (error); 5723 } 5724 5725 static int 5726 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id) 5727 { 5728 struct mcx_cmdq_entry *cqe; 5729 struct mcx_dmamem mxm; 5730 struct mcx_cmd_destroy_flow_table_in *in; 5731 struct mcx_cmd_destroy_flow_table_mb_in *mb; 5732 struct mcx_cmd_destroy_flow_table_out *out; 5733 int error; 5734 int token; 5735 5736 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5737 token = mcx_cmdq_token(sc); 5738 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token); 5739 5740 in = mcx_cmdq_in(cqe); 5741 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE); 5742 in->cmd_op_mod = htobe16(0); 5743 5744 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 5745 &cqe->cq_input_ptr, token) != 0) { 5746 printf("%s: unable to allocate destroy flow table mailbox\n", 5747 DEVNAME(sc)); 5748 return (-1); 5749 } 5750 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5751 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 5752 mb->cmd_table_id = htobe32(flow_table_id); 5753 5754 mcx_cmdq_mboxes_sign(&mxm, 1); 5755 mcx_cmdq_post(sc, cqe, 0); 5756 error = mcx_cmdq_poll(sc, cqe, 1000); 5757 if (error != 0) { 5758 printf("%s: destroy flow table timeout\n", DEVNAME(sc)); 5759 goto free; 5760 } 5761 if (mcx_cmdq_verify(cqe) != 0) { 5762 printf("%s: destroy flow table command corrupt\n", 5763 DEVNAME(sc)); 5764 goto free; 5765 } 5766 5767 out = mcx_cmdq_out(cqe); 5768 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5769 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc), 5770 out->cmd_status, be32toh(out->cmd_syndrome)); 5771 error = -1; 5772 goto free; 5773 } 5774 5775 free: 5776 mcx_dmamem_free(sc, &mxm); 5777 return (error); 5778 } 5779 5780 5781 static int 5782 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group, 5783 int start, int size, int match_enable, struct mcx_flow_match *match) 5784 { 5785 struct mcx_cmdq_entry *cqe; 5786 struct mcx_dmamem mxm; 5787 struct mcx_cmd_create_flow_group_in *in; 5788 struct mcx_cmd_create_flow_group_mb_in *mbin; 5789 struct mcx_cmd_create_flow_group_out *out; 5790 struct mcx_flow_group *mfg; 5791 int error; 5792 int token; 5793 5794 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5795 token = mcx_cmdq_token(sc); 5796 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), 5797 token); 5798 5799 in = mcx_cmdq_in(cqe); 5800 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP); 5801 in->cmd_op_mod = htobe16(0); 5802 5803 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) 5804 != 0) { 5805 printf("%s: unable to allocate create flow group mailbox\n", 5806 DEVNAME(sc)); 5807 return (-1); 5808 } 5809 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5810 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 5811 mbin->cmd_table_id = htobe32(flow_table_id); 5812 mbin->cmd_start_flow_index = htobe32(start); 5813 mbin->cmd_end_flow_index = htobe32(start + (size - 1)); 5814 5815 mbin->cmd_match_criteria_enable = match_enable; 5816 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match)); 5817 5818 mcx_cmdq_mboxes_sign(&mxm, 2); 5819 mcx_cmdq_post(sc, cqe, 0); 5820 error = mcx_cmdq_poll(sc, cqe, 1000); 5821 if (error != 0) { 5822 printf("%s: create flow group timeout\n", DEVNAME(sc)); 5823 goto free; 5824 } 5825 if (mcx_cmdq_verify(cqe) != 0) { 5826 printf("%s: create flow group command corrupt\n", DEVNAME(sc)); 5827 goto free; 5828 } 5829 5830 out = mcx_cmdq_out(cqe); 5831 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5832 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc), 5833 out->cmd_status, be32toh(out->cmd_syndrome)); 5834 error = -1; 5835 goto free; 5836 } 5837 5838 mfg = &sc->sc_flow_group[group]; 5839 mfg->g_id = mcx_get_id(out->cmd_group_id); 5840 mfg->g_table = flow_table_id; 5841 mfg->g_start = start; 5842 mfg->g_size = size; 5843 5844 free: 5845 mcx_dmamem_free(sc, &mxm); 5846 return (error); 5847 } 5848 5849 static int 5850 mcx_destroy_flow_group(struct mcx_softc *sc, int group) 5851 { 5852 struct mcx_cmdq_entry *cqe; 5853 struct mcx_dmamem mxm; 5854 struct mcx_cmd_destroy_flow_group_in *in; 5855 struct mcx_cmd_destroy_flow_group_mb_in *mb; 5856 struct mcx_cmd_destroy_flow_group_out *out; 5857 struct mcx_flow_group *mfg; 5858 int error; 5859 int token; 5860 5861 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5862 token = mcx_cmdq_token(sc); 5863 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token); 5864 5865 in = mcx_cmdq_in(cqe); 5866 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP); 5867 in->cmd_op_mod = htobe16(0); 5868 5869 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 5870 &cqe->cq_input_ptr, token) != 0) { 5871 printf("%s: unable to allocate destroy flow group mailbox\n", 5872 DEVNAME(sc)); 5873 return (-1); 5874 } 5875 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5876 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 5877 mfg = &sc->sc_flow_group[group]; 5878 mb->cmd_table_id = htobe32(mfg->g_table); 5879 mb->cmd_group_id = htobe32(mfg->g_id); 5880 5881 mcx_cmdq_mboxes_sign(&mxm, 2); 5882 mcx_cmdq_post(sc, cqe, 0); 5883 error = mcx_cmdq_poll(sc, cqe, 1000); 5884 if (error != 0) { 5885 printf("%s: destroy flow group timeout\n", DEVNAME(sc)); 5886 goto free; 5887 } 5888 if (mcx_cmdq_verify(cqe) != 0) { 5889 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc)); 5890 goto free; 5891 } 5892 5893 out = mcx_cmdq_out(cqe); 5894 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5895 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc), 5896 out->cmd_status, be32toh(out->cmd_syndrome)); 5897 error = -1; 5898 goto free; 5899 } 5900 5901 mfg->g_id = -1; 5902 mfg->g_table = -1; 5903 mfg->g_size = 0; 5904 mfg->g_start = 0; 5905 free: 5906 mcx_dmamem_free(sc, &mxm); 5907 return (error); 5908 } 5909 5910 static int 5911 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index, 5912 const uint8_t *macaddr, uint32_t dest) 5913 { 5914 struct mcx_cmdq_entry *cqe; 5915 struct mcx_dmamem mxm; 5916 struct mcx_cmd_set_flow_table_entry_in *in; 5917 struct mcx_cmd_set_flow_table_entry_mb_in *mbin; 5918 struct mcx_cmd_set_flow_table_entry_out *out; 5919 struct mcx_flow_group *mfg; 5920 uint32_t *pdest; 5921 int error; 5922 int token; 5923 5924 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 5925 token = mcx_cmdq_token(sc); 5926 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest), 5927 sizeof(*out), token); 5928 5929 in = mcx_cmdq_in(cqe); 5930 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY); 5931 in->cmd_op_mod = htobe16(0); 5932 5933 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) 5934 != 0) { 5935 printf("%s: unable to allocate set flow table entry mailbox\n", 5936 DEVNAME(sc)); 5937 return (-1); 5938 } 5939 5940 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 5941 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 5942 5943 mfg = &sc->sc_flow_group[group]; 5944 mbin->cmd_table_id = htobe32(mfg->g_table); 5945 mbin->cmd_flow_index = htobe32(mfg->g_start + index); 5946 mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id); 5947 5948 /* flow context ends at offset 0x330, 0x130 into the second mbox */ 5949 pdest = (uint32_t *) 5950 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130); 5951 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD); 5952 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1); 5953 *pdest = htobe32(dest); 5954 5955 /* the only thing we match on at the moment is the dest mac address */ 5956 if (macaddr != NULL) { 5957 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr, 5958 ETHER_ADDR_LEN); 5959 } 5960 5961 mcx_cmdq_mboxes_sign(&mxm, 2); 5962 mcx_cmdq_post(sc, cqe, 0); 5963 error = mcx_cmdq_poll(sc, cqe, 1000); 5964 if (error != 0) { 5965 printf("%s: set flow table entry timeout\n", DEVNAME(sc)); 5966 goto free; 5967 } 5968 if (mcx_cmdq_verify(cqe) != 0) { 5969 printf("%s: set flow table entry command corrupt\n", 5970 DEVNAME(sc)); 5971 goto free; 5972 } 5973 5974 out = mcx_cmdq_out(cqe); 5975 if (out->cmd_status != MCX_CQ_STATUS_OK) { 5976 printf("%s: set flow table entry failed (%x, %x)\n", 5977 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome)); 5978 error = -1; 5979 goto free; 5980 } 5981 5982 free: 5983 mcx_dmamem_free(sc, &mxm); 5984 return (error); 5985 } 5986 5987 static int 5988 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index, 5989 int ethertype, int ip_proto, uint32_t dest) 5990 { 5991 struct mcx_cmdq_entry *cqe; 5992 struct mcx_dmamem mxm; 5993 struct mcx_cmd_set_flow_table_entry_in *in; 5994 struct mcx_cmd_set_flow_table_entry_mb_in *mbin; 5995 struct mcx_cmd_set_flow_table_entry_out *out; 5996 struct mcx_flow_group *mfg; 5997 uint32_t *pdest; 5998 int error; 5999 int token; 6000 6001 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6002 token = mcx_cmdq_token(sc); 6003 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest), 6004 sizeof(*out), token); 6005 6006 in = mcx_cmdq_in(cqe); 6007 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY); 6008 in->cmd_op_mod = htobe16(0); 6009 6010 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) 6011 != 0) { 6012 printf("%s: unable to allocate set flow table entry mailbox\n", 6013 DEVNAME(sc)); 6014 return (-1); 6015 } 6016 6017 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6018 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 6019 6020 mfg = &sc->sc_flow_group[group]; 6021 mbin->cmd_table_id = htobe32(mfg->g_table); 6022 mbin->cmd_flow_index = htobe32(mfg->g_start + index); 6023 mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id); 6024 6025 /* flow context ends at offset 0x330, 0x130 into the second mbox */ 6026 pdest = (uint32_t *) 6027 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130); 6028 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD); 6029 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1); 6030 *pdest = htobe32(dest); 6031 6032 mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype); 6033 mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto; 6034 6035 mcx_cmdq_mboxes_sign(&mxm, 2); 6036 mcx_cmdq_post(sc, cqe, 0); 6037 error = mcx_cmdq_poll(sc, cqe, 1000); 6038 if (error != 0) { 6039 printf("%s: set flow table entry timeout\n", DEVNAME(sc)); 6040 goto free; 6041 } 6042 if (mcx_cmdq_verify(cqe) != 0) { 6043 printf("%s: set flow table entry command corrupt\n", 6044 DEVNAME(sc)); 6045 goto free; 6046 } 6047 6048 out = mcx_cmdq_out(cqe); 6049 if (out->cmd_status != MCX_CQ_STATUS_OK) { 6050 printf("%s: set flow table entry failed (%x, %x)\n", 6051 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome)); 6052 error = -1; 6053 goto free; 6054 } 6055 6056 free: 6057 mcx_dmamem_free(sc, &mxm); 6058 return (error); 6059 } 6060 6061 static int 6062 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index) 6063 { 6064 struct mcx_cmdq_entry *cqe; 6065 struct mcx_dmamem mxm; 6066 struct mcx_cmd_delete_flow_table_entry_in *in; 6067 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin; 6068 struct mcx_cmd_delete_flow_table_entry_out *out; 6069 struct mcx_flow_group *mfg; 6070 int error; 6071 int token; 6072 6073 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6074 token = mcx_cmdq_token(sc); 6075 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), 6076 token); 6077 6078 in = mcx_cmdq_in(cqe); 6079 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY); 6080 in->cmd_op_mod = htobe16(0); 6081 6082 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6083 &cqe->cq_input_ptr, token) != 0) { 6084 printf("%s: unable to allocate " 6085 "delete flow table entry mailbox\n", DEVNAME(sc)); 6086 return (-1); 6087 } 6088 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6089 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX; 6090 6091 mfg = &sc->sc_flow_group[group]; 6092 mbin->cmd_table_id = htobe32(mfg->g_table); 6093 mbin->cmd_flow_index = htobe32(mfg->g_start + index); 6094 6095 mcx_cmdq_mboxes_sign(&mxm, 2); 6096 mcx_cmdq_post(sc, cqe, 0); 6097 error = mcx_cmdq_poll(sc, cqe, 1000); 6098 if (error != 0) { 6099 printf("%s: delete flow table entry timeout\n", DEVNAME(sc)); 6100 goto free; 6101 } 6102 if (mcx_cmdq_verify(cqe) != 0) { 6103 printf("%s: delete flow table entry command corrupt\n", 6104 DEVNAME(sc)); 6105 goto free; 6106 } 6107 6108 out = mcx_cmdq_out(cqe); 6109 if (out->cmd_status != MCX_CQ_STATUS_OK) { 6110 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n", 6111 DEVNAME(sc), group, index, out->cmd_status, 6112 be32toh(out->cmd_syndrome)); 6113 error = -1; 6114 goto free; 6115 } 6116 6117 free: 6118 mcx_dmamem_free(sc, &mxm); 6119 return (error); 6120 } 6121 6122 #if 0 6123 int 6124 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id) 6125 { 6126 struct mcx_dmamem mxm; 6127 struct mcx_cmdq_entry *cqe; 6128 struct mcx_cmd_query_flow_table_in *in; 6129 struct mcx_cmd_query_flow_table_mb_in *mbin; 6130 struct mcx_cmd_query_flow_table_out *out; 6131 struct mcx_cmd_query_flow_table_mb_out *mbout; 6132 uint8_t token = mcx_cmdq_token(sc); 6133 int error; 6134 int i; 6135 uint8_t *dump; 6136 6137 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6138 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 6139 sizeof(*out) + sizeof(*mbout) + 16, token); 6140 6141 in = mcx_cmdq_in(cqe); 6142 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE); 6143 in->cmd_op_mod = htobe16(0); 6144 6145 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE); 6146 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE); 6147 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6148 &cqe->cq_output_ptr, token) != 0) { 6149 printf(", unable to allocate query flow table mailboxes\n"); 6150 return (-1); 6151 } 6152 cqe->cq_input_ptr = cqe->cq_output_ptr; 6153 6154 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6155 mbin->cmd_table_type = 0; 6156 mbin->cmd_table_id = htobe32(flow_table_id); 6157 6158 mcx_cmdq_mboxes_sign(&mxm, 1); 6159 6160 mcx_cmdq_post(sc, cqe, 0); 6161 error = mcx_cmdq_poll(sc, cqe, 1000); 6162 if (error != 0) { 6163 printf("%s: query flow table timeout\n", DEVNAME(sc)); 6164 goto free; 6165 } 6166 error = mcx_cmdq_verify(cqe); 6167 if (error != 0) { 6168 printf("%s: query flow table reply corrupt\n", DEVNAME(sc)); 6169 goto free; 6170 } 6171 6172 out = mcx_cmdq_out(cqe); 6173 switch (out->cmd_status) { 6174 case MCX_CQ_STATUS_OK: 6175 break; 6176 default: 6177 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc), 6178 out->cmd_status, be32toh(out->cmd_syndrome)); 6179 error = -1; 6180 goto free; 6181 } 6182 6183 mbout = (struct mcx_cmd_query_flow_table_mb_out *) 6184 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6185 dump = (uint8_t *)mbout + 8; 6186 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) { 6187 printf("%.2x ", dump[i]); 6188 if (i % 16 == 15) 6189 printf("\n"); 6190 } 6191 free: 6192 mcx_cq_mboxes_free(sc, &mxm); 6193 return (error); 6194 } 6195 int 6196 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index) 6197 { 6198 struct mcx_dmamem mxm; 6199 struct mcx_cmdq_entry *cqe; 6200 struct mcx_cmd_query_flow_table_entry_in *in; 6201 struct mcx_cmd_query_flow_table_entry_mb_in *mbin; 6202 struct mcx_cmd_query_flow_table_entry_out *out; 6203 struct mcx_cmd_query_flow_table_entry_mb_out *mbout; 6204 uint8_t token = mcx_cmdq_token(sc); 6205 int error; 6206 int i; 6207 uint8_t *dump; 6208 6209 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6210 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 6211 sizeof(*out) + sizeof(*mbout) + 16, token); 6212 6213 in = mcx_cmdq_in(cqe); 6214 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY); 6215 in->cmd_op_mod = htobe16(0); 6216 6217 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE); 6218 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2); 6219 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6220 &cqe->cq_output_ptr, token) != 0) { 6221 printf(", unable to allocate " 6222 "query flow table entry mailboxes\n"); 6223 return (-1); 6224 } 6225 cqe->cq_input_ptr = cqe->cq_output_ptr; 6226 6227 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6228 mbin->cmd_table_type = 0; 6229 mbin->cmd_table_id = htobe32(flow_table_id); 6230 mbin->cmd_flow_index = htobe32(index); 6231 6232 mcx_cmdq_mboxes_sign(&mxm, 1); 6233 6234 mcx_cmdq_post(sc, cqe, 0); 6235 error = mcx_cmdq_poll(sc, cqe, 1000); 6236 if (error != 0) { 6237 printf("%s: query flow table entry timeout\n", DEVNAME(sc)); 6238 goto free; 6239 } 6240 error = mcx_cmdq_verify(cqe); 6241 if (error != 0) { 6242 printf("%s: query flow table entry reply corrupt\n", 6243 DEVNAME(sc)); 6244 goto free; 6245 } 6246 6247 out = mcx_cmdq_out(cqe); 6248 switch (out->cmd_status) { 6249 case MCX_CQ_STATUS_OK: 6250 break; 6251 default: 6252 printf("%s: query flow table entry failed (%x/%x)\n", 6253 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome)); 6254 error = -1; 6255 goto free; 6256 } 6257 6258 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *) 6259 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6260 dump = (uint8_t *)mbout; 6261 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) { 6262 printf("%.2x ", dump[i]); 6263 if (i % 16 == 15) 6264 printf("\n"); 6265 } 6266 6267 free: 6268 mcx_cq_mboxes_free(sc, &mxm); 6269 return (error); 6270 } 6271 6272 int 6273 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id) 6274 { 6275 struct mcx_dmamem mxm; 6276 struct mcx_cmdq_entry *cqe; 6277 struct mcx_cmd_query_flow_group_in *in; 6278 struct mcx_cmd_query_flow_group_mb_in *mbin; 6279 struct mcx_cmd_query_flow_group_out *out; 6280 struct mcx_cmd_query_flow_group_mb_out *mbout; 6281 uint8_t token = mcx_cmdq_token(sc); 6282 int error; 6283 int i; 6284 uint8_t *dump; 6285 6286 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6287 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 6288 sizeof(*out) + sizeof(*mbout) + 16, token); 6289 6290 in = mcx_cmdq_in(cqe); 6291 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP); 6292 in->cmd_op_mod = htobe16(0); 6293 6294 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE); 6295 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2); 6296 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6297 &cqe->cq_output_ptr, token) != 0) { 6298 printf(", unable to allocate query flow group mailboxes\n"); 6299 return (-1); 6300 } 6301 cqe->cq_input_ptr = cqe->cq_output_ptr; 6302 6303 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6304 mbin->cmd_table_type = 0; 6305 mbin->cmd_table_id = htobe32(flow_table_id); 6306 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id); 6307 6308 mcx_cmdq_mboxes_sign(&mxm, 1); 6309 6310 mcx_cmdq_post(sc, cqe, 0); 6311 error = mcx_cmdq_poll(sc, cqe, 1000); 6312 if (error != 0) { 6313 printf("%s: query flow group timeout\n", DEVNAME(sc)); 6314 goto free; 6315 } 6316 error = mcx_cmdq_verify(cqe); 6317 if (error != 0) { 6318 printf("%s: query flow group reply corrupt\n", DEVNAME(sc)); 6319 goto free; 6320 } 6321 6322 out = mcx_cmdq_out(cqe); 6323 switch (out->cmd_status) { 6324 case MCX_CQ_STATUS_OK: 6325 break; 6326 default: 6327 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc), 6328 out->cmd_status, be32toh(out->cmd_syndrome)); 6329 error = -1; 6330 goto free; 6331 } 6332 6333 mbout = (struct mcx_cmd_query_flow_group_mb_out *) 6334 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6335 dump = (uint8_t *)mbout; 6336 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) { 6337 printf("%.2x ", dump[i]); 6338 if (i % 16 == 15) 6339 printf("\n"); 6340 } 6341 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))); 6342 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) { 6343 printf("%.2x ", dump[i]); 6344 if (i % 16 == 15) 6345 printf("\n"); 6346 } 6347 6348 free: 6349 mcx_cq_mboxes_free(sc, &mxm); 6350 return (error); 6351 } 6352 6353 static int 6354 mcx_dump_counters(struct mcx_softc *sc) 6355 { 6356 struct mcx_dmamem mxm; 6357 struct mcx_cmdq_entry *cqe; 6358 struct mcx_cmd_query_vport_counters_in *in; 6359 struct mcx_cmd_query_vport_counters_mb_in *mbin; 6360 struct mcx_cmd_query_vport_counters_out *out; 6361 struct mcx_nic_vport_counters *counters; 6362 int error, token; 6363 6364 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6365 token = mcx_cmdq_token(sc); 6366 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), 6367 sizeof(*out) + sizeof(*counters), token); 6368 6369 in = mcx_cmdq_in(cqe); 6370 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS); 6371 in->cmd_op_mod = htobe16(0); 6372 6373 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 6374 &cqe->cq_output_ptr, token) != 0) { 6375 printf(", unable to allocate " 6376 "query nic vport counters mailboxen\n"); 6377 return (-1); 6378 } 6379 cqe->cq_input_ptr = cqe->cq_output_ptr; 6380 6381 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6382 mbin->cmd_clear = 0x80; 6383 6384 mcx_cmdq_mboxes_sign(&mxm, 1); 6385 mcx_cmdq_post(sc, cqe, 0); 6386 6387 error = mcx_cmdq_poll(sc, cqe, 1000); 6388 if (error != 0) { 6389 printf("%s: query nic vport counters timeout\n", DEVNAME(sc)); 6390 goto free; 6391 } 6392 if (mcx_cmdq_verify(cqe) != 0) { 6393 printf("%s: query nic vport counters command corrupt\n", 6394 DEVNAME(sc)); 6395 goto free; 6396 } 6397 6398 out = mcx_cmdq_out(cqe); 6399 if (out->cmd_status != MCX_CQ_STATUS_OK) { 6400 printf("%s: query nic vport counters failed (%x, %x)\n", 6401 DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome)); 6402 error = -1; 6403 goto free; 6404 } 6405 6406 counters = (struct mcx_nic_vport_counters *) 6407 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6408 if (counters->rx_bcast.packets + counters->tx_bcast.packets + 6409 counters->rx_ucast.packets + counters->tx_ucast.packets + 6410 counters->rx_err.packets + counters->tx_err.packets) 6411 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n", 6412 DEVNAME(sc), 6413 betoh64(counters->tx_err.packets), 6414 betoh64(counters->rx_err.packets), 6415 betoh64(counters->tx_ucast.packets), 6416 betoh64(counters->rx_ucast.packets), 6417 betoh64(counters->tx_bcast.packets), 6418 betoh64(counters->rx_bcast.packets)); 6419 free: 6420 mcx_dmamem_free(sc, &mxm); 6421 6422 return (error); 6423 } 6424 6425 static int 6426 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what) 6427 { 6428 struct mcx_dmamem mxm; 6429 struct mcx_cmdq_entry *cqe; 6430 struct mcx_cmd_query_flow_counter_in *in; 6431 struct mcx_cmd_query_flow_counter_mb_in *mbin; 6432 struct mcx_cmd_query_flow_counter_out *out; 6433 struct mcx_counter *counters; 6434 int error, token; 6435 6436 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6437 token = mcx_cmdq_token(sc); 6438 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) + 6439 sizeof(*counters), token); 6440 6441 in = mcx_cmdq_in(cqe); 6442 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER); 6443 in->cmd_op_mod = htobe16(0); 6444 6445 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, 6446 &cqe->cq_output_ptr, token) != 0) { 6447 printf(", unable to allocate query flow counter mailboxen\n"); 6448 return (-1); 6449 } 6450 cqe->cq_input_ptr = cqe->cq_output_ptr; 6451 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); 6452 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]); 6453 mbin->cmd_clear = 0x80; 6454 6455 mcx_cmdq_mboxes_sign(&mxm, 1); 6456 mcx_cmdq_post(sc, cqe, 0); 6457 6458 error = mcx_cmdq_poll(sc, cqe, 1000); 6459 if (error != 0) { 6460 printf("%s: query flow counter timeout\n", DEVNAME(sc)); 6461 goto free; 6462 } 6463 if (mcx_cmdq_verify(cqe) != 0) { 6464 printf("%s: query flow counter command corrupt\n", DEVNAME(sc)); 6465 goto free; 6466 } 6467 6468 out = mcx_cmdq_out(cqe); 6469 if (out->cmd_status != MCX_CQ_STATUS_OK) { 6470 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc), 6471 out->cmd_status, betoh32(out->cmd_syndrome)); 6472 error = -1; 6473 goto free; 6474 } 6475 6476 counters = (struct mcx_counter *) 6477 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6478 if (counters->packets) 6479 printf("%s: %s inflow %llx\n", DEVNAME(sc), what, 6480 betoh64(counters->packets)); 6481 free: 6482 mcx_dmamem_free(sc, &mxm); 6483 6484 return (error); 6485 } 6486 6487 #endif 6488 6489 #if NKSTAT > 0 6490 6491 int 6492 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx) 6493 { 6494 struct mcx_dmamem mxm; 6495 struct mcx_cmdq_entry *cqe; 6496 struct mcx_cmd_query_rq_in *in; 6497 struct mcx_cmd_query_rq_out *out; 6498 struct mcx_cmd_query_rq_mb_out *mbout; 6499 uint8_t token = mcx_cmdq_token(sc); 6500 int error; 6501 6502 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6503 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16, 6504 token); 6505 6506 in = mcx_cmdq_in(cqe); 6507 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ); 6508 in->cmd_op_mod = htobe16(0); 6509 in->cmd_rqn = htobe32(rx->rx_rqn); 6510 6511 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2); 6512 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6513 &cqe->cq_output_ptr, token) != 0) { 6514 printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc)); 6515 return (-1); 6516 } 6517 6518 mcx_cmdq_mboxes_sign(&mxm, 1); 6519 6520 mcx_cmdq_post(sc, cqe, 0); 6521 error = mcx_cmdq_poll(sc, cqe, 1000); 6522 if (error != 0) { 6523 printf("%s: query rq timeout\n", DEVNAME(sc)); 6524 goto free; 6525 } 6526 error = mcx_cmdq_verify(cqe); 6527 if (error != 0) { 6528 printf("%s: query rq reply corrupt\n", DEVNAME(sc)); 6529 goto free; 6530 } 6531 6532 out = mcx_cmdq_out(cqe); 6533 switch (out->cmd_status) { 6534 case MCX_CQ_STATUS_OK: 6535 break; 6536 default: 6537 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc), 6538 out->cmd_status, be32toh(out->cmd_syndrome)); 6539 error = -1; 6540 goto free; 6541 } 6542 6543 mbout = (struct mcx_cmd_query_rq_mb_out *) 6544 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6545 memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx)); 6546 6547 free: 6548 mcx_cq_mboxes_free(sc, &mxm); 6549 return (error); 6550 } 6551 6552 int 6553 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx) 6554 { 6555 struct mcx_dmamem mxm; 6556 struct mcx_cmdq_entry *cqe; 6557 struct mcx_cmd_query_sq_in *in; 6558 struct mcx_cmd_query_sq_out *out; 6559 struct mcx_cmd_query_sq_mb_out *mbout; 6560 uint8_t token = mcx_cmdq_token(sc); 6561 int error; 6562 6563 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6564 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16, 6565 token); 6566 6567 in = mcx_cmdq_in(cqe); 6568 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ); 6569 in->cmd_op_mod = htobe16(0); 6570 in->cmd_sqn = htobe32(tx->tx_sqn); 6571 6572 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2); 6573 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6574 &cqe->cq_output_ptr, token) != 0) { 6575 printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc)); 6576 return (-1); 6577 } 6578 6579 mcx_cmdq_mboxes_sign(&mxm, 1); 6580 6581 mcx_cmdq_post(sc, cqe, 0); 6582 error = mcx_cmdq_poll(sc, cqe, 1000); 6583 if (error != 0) { 6584 printf("%s: query sq timeout\n", DEVNAME(sc)); 6585 goto free; 6586 } 6587 error = mcx_cmdq_verify(cqe); 6588 if (error != 0) { 6589 printf("%s: query sq reply corrupt\n", DEVNAME(sc)); 6590 goto free; 6591 } 6592 6593 out = mcx_cmdq_out(cqe); 6594 switch (out->cmd_status) { 6595 case MCX_CQ_STATUS_OK: 6596 break; 6597 default: 6598 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc), 6599 out->cmd_status, be32toh(out->cmd_syndrome)); 6600 error = -1; 6601 goto free; 6602 } 6603 6604 mbout = (struct mcx_cmd_query_sq_mb_out *) 6605 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6606 memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx)); 6607 6608 free: 6609 mcx_cq_mboxes_free(sc, &mxm); 6610 return (error); 6611 } 6612 6613 int 6614 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx) 6615 { 6616 struct mcx_dmamem mxm; 6617 struct mcx_cmdq_entry *cqe; 6618 struct mcx_cmd_query_cq_in *in; 6619 struct mcx_cmd_query_cq_out *out; 6620 struct mcx_cq_ctx *ctx; 6621 uint8_t token = mcx_cmdq_token(sc); 6622 int error; 6623 6624 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6625 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16, 6626 token); 6627 6628 in = mcx_cmdq_in(cqe); 6629 in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ); 6630 in->cmd_op_mod = htobe16(0); 6631 in->cmd_cqn = htobe32(cq->cq_n); 6632 6633 CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2); 6634 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6635 &cqe->cq_output_ptr, token) != 0) { 6636 printf("%s: unable to allocate query cq mailboxes\n", 6637 DEVNAME(sc)); 6638 return (-1); 6639 } 6640 6641 mcx_cmdq_mboxes_sign(&mxm, 1); 6642 6643 mcx_cmdq_post(sc, cqe, 0); 6644 error = mcx_cmdq_poll(sc, cqe, 1000); 6645 if (error != 0) { 6646 printf("%s: query cq timeout\n", DEVNAME(sc)); 6647 goto free; 6648 } 6649 if (mcx_cmdq_verify(cqe) != 0) { 6650 printf("%s: query cq reply corrupt\n", DEVNAME(sc)); 6651 goto free; 6652 } 6653 6654 out = mcx_cmdq_out(cqe); 6655 switch (out->cmd_status) { 6656 case MCX_CQ_STATUS_OK: 6657 break; 6658 default: 6659 printf("%s: query qc failed (%x/%x)\n", DEVNAME(sc), 6660 out->cmd_status, be32toh(out->cmd_syndrome)); 6661 error = -1; 6662 goto free; 6663 } 6664 6665 ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6666 memcpy(cq_ctx, ctx, sizeof(*cq_ctx)); 6667 free: 6668 mcx_dmamem_free(sc, &mxm); 6669 return (error); 6670 } 6671 6672 int 6673 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx) 6674 { 6675 struct mcx_dmamem mxm; 6676 struct mcx_cmdq_entry *cqe; 6677 struct mcx_cmd_query_eq_in *in; 6678 struct mcx_cmd_query_eq_out *out; 6679 struct mcq_eq_ctx *ctx; 6680 uint8_t token = mcx_cmdq_token(sc); 6681 int error; 6682 6683 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem); 6684 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16, 6685 token); 6686 6687 in = mcx_cmdq_in(cqe); 6688 in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ); 6689 in->cmd_op_mod = htobe16(0); 6690 in->cmd_eqn = htobe32(eq->eq_n); 6691 6692 CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2); 6693 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, 6694 &cqe->cq_output_ptr, token) != 0) { 6695 printf("%s: unable to allocate query eq mailboxes\n", 6696 DEVNAME(sc)); 6697 return (-1); 6698 } 6699 6700 mcx_cmdq_mboxes_sign(&mxm, 1); 6701 6702 mcx_cmdq_post(sc, cqe, 0); 6703 error = mcx_cmdq_poll(sc, cqe, 1000); 6704 if (error != 0) { 6705 printf("%s: query eq timeout\n", DEVNAME(sc)); 6706 goto free; 6707 } 6708 if (mcx_cmdq_verify(cqe) != 0) { 6709 printf("%s: query eq reply corrupt\n", DEVNAME(sc)); 6710 goto free; 6711 } 6712 6713 out = mcx_cmdq_out(cqe); 6714 switch (out->cmd_status) { 6715 case MCX_CQ_STATUS_OK: 6716 break; 6717 default: 6718 printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc), 6719 out->cmd_status, be32toh(out->cmd_syndrome)); 6720 error = -1; 6721 goto free; 6722 } 6723 6724 ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); 6725 memcpy(eq_ctx, ctx, sizeof(*eq_ctx)); 6726 free: 6727 mcx_dmamem_free(sc, &mxm); 6728 return (error); 6729 } 6730 6731 #endif /* NKSTAT > 0 */ 6732 6733 6734 static inline unsigned int 6735 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots) 6736 { 6737 struct mcx_rq_entry *ring, *rqe; 6738 struct mcx_slot *ms; 6739 struct mbuf *m; 6740 uint slot, p, fills; 6741 6742 ring = MCX_DMA_KVA(&rx->rx_rq_mem); 6743 p = rx->rx_prod; 6744 6745 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem), 6746 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE); 6747 6748 slot = (p % (1 << MCX_LOG_RQ_SIZE)); 6749 rqe = ring; 6750 for (fills = 0; fills < nslots; fills++) { 6751 slot = p % (1 << MCX_LOG_RQ_SIZE); 6752 6753 ms = &rx->rx_slots[slot]; 6754 rqe = &ring[slot]; 6755 6756 m = NULL; 6757 MGETHDR(m, M_DONTWAIT, MT_DATA); 6758 if (m == NULL) 6759 break; 6760 6761 MCLGET(m, M_DONTWAIT); 6762 if ((m->m_flags & M_EXT) == 0) { 6763 m_freem(m); 6764 break; 6765 } 6766 6767 m->m_len = m->m_pkthdr.len = sc->sc_hardmtu; 6768 m_adj(m, m->m_ext.ext_size - sc->sc_rxbufsz); 6769 m_adj(m, ETHER_ALIGN); 6770 6771 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, 6772 BUS_DMA_NOWAIT) != 0) { 6773 m_freem(m); 6774 break; 6775 } 6776 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD); 6777 ms->ms_m = m; 6778 6779 be32enc(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len); 6780 be64enc(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr); 6781 be32enc(&rqe->rqe_lkey, sc->sc_lkey); 6782 6783 p++; 6784 } 6785 6786 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem), 6787 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE); 6788 6789 rx->rx_prod = p; 6790 6791 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 6792 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE); 6793 be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell), 6794 p & MCX_WQ_DOORBELL_MASK); 6795 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 6796 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE); 6797 6798 return (nslots - fills); 6799 } 6800 6801 static int 6802 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx) 6803 { 6804 u_int slots; 6805 6806 slots = mcx_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE)); 6807 if (slots == 0) 6808 return (1); 6809 6810 slots = mcx_rx_fill_slots(sc, rx, slots); 6811 mcx_rxr_put(&rx->rx_rxr, slots); 6812 return (0); 6813 } 6814 6815 void 6816 mcx_refill(void *xrx) 6817 { 6818 struct mcx_rx *rx = xrx; 6819 struct mcx_softc *sc = rx->rx_softc; 6820 6821 mcx_rx_fill(sc, rx); 6822 6823 if (mcx_rxr_inuse(&rx->rx_rxr) == 0) 6824 callout_schedule(&rx->rx_refill, 1); 6825 } 6826 6827 static int 6828 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx, 6829 struct mcx_cq_entry *cqe) 6830 { 6831 struct mcx_slot *ms; 6832 bus_dmamap_t map; 6833 int slot, slots; 6834 6835 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE); 6836 6837 ms = &tx->tx_slots[slot]; 6838 map = ms->ms_map; 6839 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 6840 BUS_DMASYNC_POSTWRITE); 6841 6842 slots = 1; 6843 if (map->dm_nsegs > 1) 6844 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT; 6845 6846 bus_dmamap_unload(sc->sc_dmat, map); 6847 m_freem(ms->ms_m); 6848 ms->ms_m = NULL; 6849 6850 return (slots); 6851 } 6852 6853 static uint64_t 6854 mcx_uptime(void) 6855 { 6856 struct timespec ts; 6857 6858 nanouptime(&ts); 6859 6860 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec); 6861 } 6862 6863 static void 6864 mcx_calibrate_first(struct mcx_softc *sc) 6865 { 6866 struct mcx_calibration *c = &sc->sc_calibration[0]; 6867 int s; 6868 6869 sc->sc_calibration_gen = 0; 6870 6871 s = splhigh(); /* crit_enter? */ 6872 c->c_ubase = mcx_uptime(); 6873 c->c_tbase = mcx_timer(sc); 6874 splx(s); 6875 c->c_ratio = 0; 6876 6877 #if notyet 6878 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz); 6879 #endif 6880 } 6881 6882 #define MCX_TIMESTAMP_SHIFT 24 6883 6884 static void 6885 mcx_calibrate(void *arg) 6886 { 6887 struct mcx_softc *sc = arg; 6888 struct mcx_calibration *nc, *pc; 6889 uint64_t udiff, tdiff; 6890 unsigned int gen; 6891 int s; 6892 6893 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING)) 6894 return; 6895 6896 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz); 6897 6898 gen = sc->sc_calibration_gen; 6899 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)]; 6900 gen++; 6901 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)]; 6902 6903 nc->c_uptime = pc->c_ubase; 6904 nc->c_timestamp = pc->c_tbase; 6905 6906 s = splhigh(); /* crit_enter? */ 6907 nc->c_ubase = mcx_uptime(); 6908 nc->c_tbase = mcx_timer(sc); 6909 splx(s); 6910 6911 udiff = nc->c_ubase - nc->c_uptime; 6912 tdiff = nc->c_tbase - nc->c_timestamp; 6913 6914 /* 6915 * udiff is the wall clock time between calibration ticks, 6916 * which should be 32 seconds or 32 billion nanoseconds. if 6917 * we squint, 1 billion nanoseconds is kind of like a 32 bit 6918 * number, so 32 billion should still have a lot of high bits 6919 * spare. we use this space by shifting the nanoseconds up 6920 * 24 bits so we have a nice big number to divide by the 6921 * number of mcx timer ticks. 6922 */ 6923 nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff; 6924 6925 membar_producer(); 6926 sc->sc_calibration_gen = gen; 6927 } 6928 6929 static int 6930 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx, 6931 struct mcx_cq_entry *cqe, struct mcx_mbufq *mq, 6932 const struct mcx_calibration *c) 6933 { 6934 struct ifnet *ifp = &sc->sc_ec.ec_if; 6935 struct mcx_slot *ms; 6936 struct mbuf *m; 6937 uint32_t flags, len; 6938 int slot; 6939 6940 len = be32dec(&cqe->cq_byte_cnt); 6941 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE); 6942 6943 ms = &rx->rx_slots[slot]; 6944 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD); 6945 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 6946 6947 m = ms->ms_m; 6948 ms->ms_m = NULL; 6949 6950 m_set_rcvif(m, &sc->sc_ec.ec_if); 6951 m->m_pkthdr.len = m->m_len = len; 6952 6953 #if 0 6954 if (cqe->cq_rx_hash_type) { 6955 m->m_pkthdr.ph_flowid = be32toh(cqe->cq_rx_hash); 6956 m->m_pkthdr.csum_flags |= M_FLOWID; 6957 } 6958 #endif 6959 6960 flags = be32dec(&cqe->cq_flags); 6961 if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK) { 6962 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 6963 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 6964 } 6965 if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK) { 6966 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) 6967 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 6968 if (ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx) 6969 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 6970 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) 6971 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 6972 if (ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx) 6973 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 6974 } 6975 if (flags & MCX_CQ_ENTRY_FLAGS_CV) { 6976 vlan_set_tag(m, flags & MCX_CQ_ENTRY_FLAGS_VLAN_MASK); 6977 } 6978 6979 #if notyet 6980 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_LINK0) && c->c_ratio) { 6981 uint64_t t = be64dec(&cqe->cq_timestamp); 6982 t -= c->c_timestamp; 6983 t *= c->c_ratio; 6984 t >>= MCX_TIMESTAMP_SHIFT; 6985 t += c->c_uptime; 6986 6987 m->m_pkthdr.ph_timestamp = t; 6988 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP); 6989 } 6990 #endif 6991 6992 MBUFQ_ENQUEUE(mq, m); 6993 6994 return (1); 6995 } 6996 6997 static struct mcx_cq_entry * 6998 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq) 6999 { 7000 struct mcx_cq_entry *cqe; 7001 int next; 7002 7003 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem); 7004 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE); 7005 7006 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) == 7007 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) { 7008 return (&cqe[next]); 7009 } 7010 7011 return (NULL); 7012 } 7013 7014 static void 7015 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar) 7016 { 7017 struct mcx_cq_doorbell *db; 7018 bus_size_t offset; 7019 uint32_t val; 7020 uint64_t uval; 7021 7022 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT; 7023 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK); 7024 7025 db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell); 7026 7027 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 7028 cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE); 7029 7030 be32enc(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK); 7031 be32enc(&db->db_arm_ci, val); 7032 7033 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 7034 cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE); 7035 7036 offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL; 7037 7038 uval = (uint64_t)val << 32; 7039 uval |= cq->cq_n; 7040 7041 bus_space_write_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval)); 7042 mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE); 7043 } 7044 7045 void 7046 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq) 7047 { 7048 struct mcx_rx *rx = &q->q_rx; 7049 struct mcx_tx *tx = &q->q_tx; 7050 struct ifnet *ifp = &sc->sc_ec.ec_if; 7051 const struct mcx_calibration *c; 7052 unsigned int gen; 7053 struct mcx_cq_entry *cqe; 7054 struct mcx_mbufq mq; 7055 struct mbuf *m; 7056 int rxfree, txfree; 7057 7058 MBUFQ_INIT(&mq); 7059 7060 gen = sc->sc_calibration_gen; 7061 membar_consumer(); 7062 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)]; 7063 7064 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem), 7065 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD); 7066 7067 rxfree = 0; 7068 txfree = 0; 7069 while ((cqe = mcx_next_cq_entry(sc, cq))) { 7070 uint8_t opcode; 7071 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT); 7072 switch (opcode) { 7073 case MCX_CQ_ENTRY_OPCODE_REQ: 7074 txfree += mcx_process_txeof(sc, tx, cqe); 7075 break; 7076 case MCX_CQ_ENTRY_OPCODE_SEND: 7077 rxfree += mcx_process_rx(sc, rx, cqe, &mq, c); 7078 break; 7079 case MCX_CQ_ENTRY_OPCODE_REQ_ERR: 7080 case MCX_CQ_ENTRY_OPCODE_SEND_ERR: 7081 /* uint8_t *cqp = (uint8_t *)cqe; */ 7082 /* printf("%s: cq completion error: %x\n", 7083 DEVNAME(sc), cqp[0x37]); */ 7084 break; 7085 7086 default: 7087 /* printf("%s: cq completion opcode %x??\n", 7088 DEVNAME(sc), opcode); */ 7089 break; 7090 } 7091 7092 cq->cq_cons++; 7093 } 7094 7095 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem), 7096 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD); 7097 7098 if (rxfree > 0) { 7099 mcx_rxr_put(&rx->rx_rxr, rxfree); 7100 while (MBUFQ_FIRST(&mq) != NULL) { 7101 MBUFQ_DEQUEUE(&mq, m); 7102 if_percpuq_enqueue(ifp->if_percpuq, m); 7103 } 7104 7105 mcx_rx_fill(sc, rx); 7106 if (mcx_rxr_inuse(&rx->rx_rxr) == 0) 7107 callout_schedule(&rx->rx_refill, 1); 7108 } 7109 7110 cq->cq_count++; 7111 mcx_arm_cq(sc, cq, q->q_uar); 7112 7113 if (txfree > 0) { 7114 tx->tx_cons += txfree; 7115 if_schedule_deferred_start(ifp); 7116 } 7117 } 7118 7119 7120 static void 7121 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar) 7122 { 7123 bus_size_t offset; 7124 uint32_t val; 7125 7126 offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM; 7127 val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff); 7128 7129 mcx_wr(sc, offset, val); 7130 mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE); 7131 } 7132 7133 static struct mcx_eq_entry * 7134 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq) 7135 { 7136 struct mcx_eq_entry *eqe; 7137 int next; 7138 7139 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem); 7140 next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE); 7141 if ((eqe[next].eq_owner & 1) == 7142 ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) { 7143 eq->eq_cons++; 7144 return (&eqe[next]); 7145 } 7146 return (NULL); 7147 } 7148 7149 int 7150 mcx_admin_intr(void *xsc) 7151 { 7152 struct mcx_softc *sc = (struct mcx_softc *)xsc; 7153 struct mcx_eq *eq = &sc->sc_admin_eq; 7154 struct mcx_eq_entry *eqe; 7155 7156 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem), 7157 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD); 7158 7159 while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) { 7160 switch (eqe->eq_event_type) { 7161 case MCX_EVENT_TYPE_LAST_WQE: 7162 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */ 7163 break; 7164 7165 case MCX_EVENT_TYPE_CQ_ERROR: 7166 /* printf("%s: cq error\n", DEVNAME(sc)); */ 7167 break; 7168 7169 case MCX_EVENT_TYPE_CMD_COMPLETION: 7170 /* wakeup probably */ 7171 break; 7172 7173 case MCX_EVENT_TYPE_PORT_CHANGE: 7174 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL); 7175 break; 7176 7177 default: 7178 /* printf("%s: something happened\n", DEVNAME(sc)); */ 7179 break; 7180 } 7181 } 7182 7183 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem), 7184 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD); 7185 7186 mcx_arm_eq(sc, eq, sc->sc_uar); 7187 7188 return (1); 7189 } 7190 7191 int 7192 mcx_cq_intr(void *xq) 7193 { 7194 struct mcx_queues *q = (struct mcx_queues *)xq; 7195 struct mcx_softc *sc = q->q_sc; 7196 struct mcx_eq *eq = &q->q_eq; 7197 struct mcx_eq_entry *eqe; 7198 int cqn; 7199 7200 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem), 7201 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD); 7202 7203 while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) { 7204 switch (eqe->eq_event_type) { 7205 case MCX_EVENT_TYPE_COMPLETION: 7206 cqn = be32toh(eqe->eq_event_data[6]); 7207 if (cqn == q->q_cq.cq_n) 7208 mcx_process_cq(sc, q, &q->q_cq); 7209 break; 7210 } 7211 } 7212 7213 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem), 7214 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD); 7215 7216 mcx_arm_eq(sc, eq, q->q_uar); 7217 7218 return (1); 7219 } 7220 7221 static void 7222 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated, 7223 int total) 7224 { 7225 struct mcx_slot *ms; 7226 7227 int i = allocated; 7228 while (i-- > 0) { 7229 ms = &slots[i]; 7230 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 7231 if (ms->ms_m != NULL) 7232 m_freem(ms->ms_m); 7233 } 7234 kmem_free(slots, total * sizeof(*ms)); 7235 } 7236 7237 static int 7238 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q) 7239 { 7240 struct mcx_rx *rx; 7241 struct mcx_tx *tx; 7242 struct mcx_slot *ms; 7243 int i; 7244 7245 rx = &q->q_rx; 7246 rx->rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE), 7247 KM_SLEEP); 7248 7249 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) { 7250 ms = &rx->rx_slots[i]; 7251 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1, 7252 sc->sc_hardmtu, 0, 7253 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 7254 &ms->ms_map) != 0) { 7255 printf("%s: failed to allocate rx dma maps\n", 7256 DEVNAME(sc)); 7257 goto destroy_rx_slots; 7258 } 7259 } 7260 7261 tx = &q->q_tx; 7262 tx->tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE), 7263 KM_SLEEP); 7264 7265 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) { 7266 ms = &tx->tx_slots[i]; 7267 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 7268 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0, 7269 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 7270 &ms->ms_map) != 0) { 7271 printf("%s: failed to allocate tx dma maps\n", 7272 DEVNAME(sc)); 7273 goto destroy_tx_slots; 7274 } 7275 } 7276 7277 if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index, 7278 q->q_eq.eq_n) != 0) 7279 goto destroy_tx_slots; 7280 7281 if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n) 7282 != 0) 7283 goto destroy_cq; 7284 7285 if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0) 7286 goto destroy_sq; 7287 7288 return 0; 7289 7290 destroy_sq: 7291 mcx_destroy_sq(sc, tx); 7292 destroy_cq: 7293 mcx_destroy_cq(sc, &q->q_cq); 7294 destroy_tx_slots: 7295 mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE)); 7296 tx->tx_slots = NULL; 7297 7298 i = (1 << MCX_LOG_RQ_SIZE); 7299 destroy_rx_slots: 7300 mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE)); 7301 rx->rx_slots = NULL; 7302 return ENOMEM; 7303 } 7304 7305 static int 7306 mcx_rss_group_entry_count(struct mcx_softc *sc, int group) 7307 { 7308 int i; 7309 int count; 7310 7311 count = 0; 7312 for (i = 0; i < __arraycount(mcx_rss_config); i++) { 7313 if (mcx_rss_config[i].flow_group == group) 7314 count++; 7315 } 7316 7317 return count; 7318 } 7319 7320 static int 7321 mcx_init(struct ifnet *ifp) 7322 { 7323 struct mcx_softc *sc = ifp->if_softc; 7324 struct mcx_rx *rx; 7325 struct mcx_tx *tx; 7326 int i, start, count, flow_group, flow_index; 7327 struct mcx_flow_match match_crit; 7328 struct mcx_rss_rule *rss; 7329 uint32_t dest; 7330 int rqns[MCX_MAX_QUEUES] = { 0 }; 7331 7332 if (ISSET(ifp->if_flags, IFF_RUNNING)) 7333 mcx_stop(ifp, 0); 7334 7335 if (mcx_create_tis(sc, &sc->sc_tis) != 0) 7336 goto down; 7337 7338 for (i = 0; i < sc->sc_nqueues; i++) { 7339 if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) { 7340 goto down; 7341 } 7342 } 7343 7344 /* RSS flow table and flow groups */ 7345 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1, 7346 &sc->sc_rss_flow_table_id) != 0) 7347 goto down; 7348 7349 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE | 7350 sc->sc_rss_flow_table_id; 7351 7352 /* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */ 7353 memset(&match_crit, 0, sizeof(match_crit)); 7354 match_crit.mc_ethertype = 0xffff; 7355 match_crit.mc_ip_proto = 0xff; 7356 match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG; 7357 start = 0; 7358 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4); 7359 if (count != 0) { 7360 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id, 7361 MCX_FLOW_GROUP_RSS_L4, start, count, 7362 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0) 7363 goto down; 7364 start += count; 7365 } 7366 7367 /* L3 RSS flow group (v4/v6, including fragments) */ 7368 memset(&match_crit, 0, sizeof(match_crit)); 7369 match_crit.mc_ethertype = 0xffff; 7370 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3); 7371 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id, 7372 MCX_FLOW_GROUP_RSS_L3, start, count, 7373 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0) 7374 goto down; 7375 start += count; 7376 7377 /* non-RSS flow group */ 7378 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE); 7379 memset(&match_crit, 0, sizeof(match_crit)); 7380 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id, 7381 MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0) 7382 goto down; 7383 7384 /* Root flow table, matching packets based on mac address */ 7385 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0, 7386 &sc->sc_mac_flow_table_id) != 0) 7387 goto down; 7388 7389 /* promisc flow group */ 7390 start = 0; 7391 memset(&match_crit, 0, sizeof(match_crit)); 7392 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id, 7393 MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0) 7394 goto down; 7395 sc->sc_promisc_flow_enabled = 0; 7396 start++; 7397 7398 /* all multicast flow group */ 7399 match_crit.mc_dest_mac[0] = 0x01; 7400 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id, 7401 MCX_FLOW_GROUP_ALLMULTI, start, 1, 7402 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0) 7403 goto down; 7404 sc->sc_allmulti_flow_enabled = 0; 7405 start++; 7406 7407 /* mac address matching flow group */ 7408 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac)); 7409 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id, 7410 MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start, 7411 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0) 7412 goto down; 7413 7414 /* flow table entries for unicast and broadcast */ 7415 start = 0; 7416 if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start, 7417 LLADDR(satosdl(ifp->if_dl->ifa_addr)), dest) != 0) 7418 goto down; 7419 start++; 7420 7421 if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start, 7422 etherbroadcastaddr, dest) != 0) 7423 goto down; 7424 start++; 7425 7426 /* multicast entries go after that */ 7427 sc->sc_mcast_flow_base = start; 7428 7429 /* re-add any existing multicast flows */ 7430 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) { 7431 if (sc->sc_mcast_flows[i][0] != 0) { 7432 mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, 7433 sc->sc_mcast_flow_base + i, 7434 sc->sc_mcast_flows[i], dest); 7435 } 7436 } 7437 7438 if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0) 7439 goto down; 7440 7441 /* 7442 * the RQT can be any size as long as it's a power of two. 7443 * since we also restrict the number of queues to a power of two, 7444 * we can just put each rx queue in once. 7445 */ 7446 for (i = 0; i < sc->sc_nqueues; i++) 7447 rqns[i] = sc->sc_queues[i].q_rx.rx_rqn; 7448 7449 if (mcx_create_rqt(sc, sc->sc_nqueues, rqns, &sc->sc_rqt) != 0) 7450 goto down; 7451 7452 start = 0; 7453 flow_index = 0; 7454 flow_group = -1; 7455 for (i = 0; i < __arraycount(mcx_rss_config); i++) { 7456 rss = &mcx_rss_config[i]; 7457 if (rss->flow_group != flow_group) { 7458 flow_group = rss->flow_group; 7459 flow_index = 0; 7460 } 7461 7462 if (rss->hash_sel == 0) { 7463 if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx, 7464 &sc->sc_tir[i]) != 0) 7465 goto down; 7466 } else { 7467 if (mcx_create_tir_indirect(sc, sc->sc_rqt, 7468 rss->hash_sel, &sc->sc_tir[i]) != 0) 7469 goto down; 7470 } 7471 7472 if (mcx_set_flow_table_entry_proto(sc, flow_group, 7473 flow_index, rss->ethertype, rss->ip_proto, 7474 MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0) 7475 goto down; 7476 flow_index++; 7477 } 7478 7479 for (i = 0; i < sc->sc_nqueues; i++) { 7480 struct mcx_queues *q = &sc->sc_queues[i]; 7481 rx = &q->q_rx; 7482 tx = &q->q_tx; 7483 7484 /* start the queues */ 7485 if (mcx_ready_sq(sc, tx) != 0) 7486 goto down; 7487 7488 if (mcx_ready_rq(sc, rx) != 0) 7489 goto down; 7490 7491 mcx_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE)); 7492 rx->rx_prod = 0; 7493 mcx_rx_fill(sc, rx); 7494 7495 tx->tx_cons = 0; 7496 tx->tx_prod = 0; 7497 } 7498 7499 mcx_calibrate_first(sc); 7500 7501 SET(ifp->if_flags, IFF_RUNNING); 7502 CLR(ifp->if_flags, IFF_OACTIVE); 7503 if_schedule_deferred_start(ifp); 7504 7505 return 0; 7506 down: 7507 mcx_stop(ifp, 0); 7508 return EIO; 7509 } 7510 7511 static void 7512 mcx_stop(struct ifnet *ifp, int disable) 7513 { 7514 struct mcx_softc *sc = ifp->if_softc; 7515 struct mcx_rss_rule *rss; 7516 int group, i, flow_group, flow_index; 7517 7518 CLR(ifp->if_flags, IFF_RUNNING); 7519 7520 /* 7521 * delete flow table entries first, so no packets can arrive 7522 * after the barriers 7523 */ 7524 if (sc->sc_promisc_flow_enabled) 7525 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0); 7526 if (sc->sc_allmulti_flow_enabled) 7527 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0); 7528 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0); 7529 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1); 7530 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) { 7531 if (sc->sc_mcast_flows[i][0] != 0) { 7532 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 7533 sc->sc_mcast_flow_base + i); 7534 } 7535 } 7536 7537 flow_group = -1; 7538 flow_index = 0; 7539 for (i = 0; i < __arraycount(mcx_rss_config); i++) { 7540 rss = &mcx_rss_config[i]; 7541 if (rss->flow_group != flow_group) { 7542 flow_group = rss->flow_group; 7543 flow_index = 0; 7544 } 7545 7546 mcx_delete_flow_table_entry(sc, flow_group, flow_index); 7547 7548 mcx_destroy_tir(sc, sc->sc_tir[i]); 7549 sc->sc_tir[i] = 0; 7550 7551 flow_index++; 7552 } 7553 7554 for (i = 0; i < sc->sc_nqueues; i++) { 7555 callout_halt(&sc->sc_queues[i].q_rx.rx_refill, NULL); 7556 } 7557 7558 callout_halt(&sc->sc_calibrate, NULL); 7559 7560 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) { 7561 if (sc->sc_flow_group[group].g_id != -1) 7562 mcx_destroy_flow_group(sc, group); 7563 } 7564 7565 if (sc->sc_mac_flow_table_id != -1) { 7566 mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id); 7567 sc->sc_mac_flow_table_id = -1; 7568 } 7569 if (sc->sc_rss_flow_table_id != -1) { 7570 mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id); 7571 sc->sc_rss_flow_table_id = -1; 7572 } 7573 if (sc->sc_rqt != -1) { 7574 mcx_destroy_rqt(sc, sc->sc_rqt); 7575 sc->sc_rqt = -1; 7576 } 7577 7578 for (i = 0; i < sc->sc_nqueues; i++) { 7579 struct mcx_queues *q = &sc->sc_queues[i]; 7580 struct mcx_rx *rx = &q->q_rx; 7581 struct mcx_tx *tx = &q->q_tx; 7582 struct mcx_cq *cq = &q->q_cq; 7583 7584 if (rx->rx_rqn != 0) 7585 mcx_destroy_rq(sc, rx); 7586 7587 if (tx->tx_sqn != 0) 7588 mcx_destroy_sq(sc, tx); 7589 7590 if (tx->tx_slots != NULL) { 7591 mcx_free_slots(sc, tx->tx_slots, 7592 (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE)); 7593 tx->tx_slots = NULL; 7594 } 7595 if (rx->rx_slots != NULL) { 7596 mcx_free_slots(sc, rx->rx_slots, 7597 (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE)); 7598 rx->rx_slots = NULL; 7599 } 7600 7601 if (cq->cq_n != 0) 7602 mcx_destroy_cq(sc, cq); 7603 } 7604 if (sc->sc_tis != 0) { 7605 mcx_destroy_tis(sc, sc->sc_tis); 7606 sc->sc_tis = 0; 7607 } 7608 } 7609 7610 static int 7611 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data) 7612 { 7613 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; 7614 struct ifreq *ifr = (struct ifreq *)data; 7615 struct ethercom *ec = &sc->sc_ec; 7616 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 7617 struct ether_multi *enm; 7618 struct ether_multistep step; 7619 int s, i, flags, error = 0; 7620 uint32_t dest; 7621 7622 s = splnet(); 7623 switch (cmd) { 7624 7625 case SIOCADDMULTI: 7626 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) { 7627 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi); 7628 if (error != 0) { 7629 splx(s); 7630 return (error); 7631 } 7632 7633 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE | 7634 sc->sc_rss_flow_table_id; 7635 7636 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) { 7637 if (sc->sc_mcast_flows[i][0] == 0) { 7638 memcpy(sc->sc_mcast_flows[i], addrlo, 7639 ETHER_ADDR_LEN); 7640 if (ISSET(ifp->if_flags, IFF_RUNNING)) { 7641 mcx_set_flow_table_entry_mac(sc, 7642 MCX_FLOW_GROUP_MAC, 7643 sc->sc_mcast_flow_base + i, 7644 sc->sc_mcast_flows[i], dest); 7645 } 7646 break; 7647 } 7648 } 7649 7650 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 7651 if (i == MCX_NUM_MCAST_FLOWS) { 7652 SET(ifp->if_flags, IFF_ALLMULTI); 7653 sc->sc_extra_mcast++; 7654 error = ENETRESET; 7655 } 7656 7657 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) { 7658 SET(ifp->if_flags, IFF_ALLMULTI); 7659 error = ENETRESET; 7660 } 7661 } 7662 } 7663 break; 7664 7665 case SIOCDELMULTI: 7666 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) { 7667 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi); 7668 if (error != 0) { 7669 splx(s); 7670 return (error); 7671 } 7672 7673 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) { 7674 if (memcmp(sc->sc_mcast_flows[i], addrlo, 7675 ETHER_ADDR_LEN) == 0) { 7676 if (ISSET(ifp->if_flags, IFF_RUNNING)) { 7677 mcx_delete_flow_table_entry(sc, 7678 MCX_FLOW_GROUP_MAC, 7679 sc->sc_mcast_flow_base + i); 7680 } 7681 sc->sc_mcast_flows[i][0] = 0; 7682 break; 7683 } 7684 } 7685 7686 if (i == MCX_NUM_MCAST_FLOWS) 7687 sc->sc_extra_mcast--; 7688 7689 if (ISSET(ifp->if_flags, IFF_ALLMULTI) && 7690 sc->sc_extra_mcast == 0) { 7691 flags = 0; 7692 ETHER_LOCK(ec); 7693 ETHER_FIRST_MULTI(step, ec, enm); 7694 while (enm != NULL) { 7695 if (memcmp(enm->enm_addrlo, 7696 enm->enm_addrhi, ETHER_ADDR_LEN)) { 7697 SET(flags, IFF_ALLMULTI); 7698 break; 7699 } 7700 ETHER_NEXT_MULTI(step, enm); 7701 } 7702 ETHER_UNLOCK(ec); 7703 if (!ISSET(flags, IFF_ALLMULTI)) { 7704 CLR(ifp->if_flags, IFF_ALLMULTI); 7705 error = ENETRESET; 7706 } 7707 } 7708 } 7709 break; 7710 7711 default: 7712 error = ether_ioctl(ifp, cmd, data); 7713 } 7714 7715 if (error == ENETRESET) { 7716 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 7717 (IFF_UP | IFF_RUNNING)) 7718 mcx_iff(sc); 7719 error = 0; 7720 } 7721 splx(s); 7722 7723 return (error); 7724 } 7725 7726 #if 0 7727 static int 7728 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff) 7729 { 7730 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; 7731 struct mcx_reg_mcia mcia; 7732 struct mcx_reg_pmlp pmlp; 7733 int offset, error; 7734 7735 /* get module number */ 7736 memset(&pmlp, 0, sizeof(pmlp)); 7737 pmlp.rp_local_port = 1; 7738 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp, 7739 sizeof(pmlp)); 7740 if (error != 0) { 7741 printf("%s: unable to get eeprom module number\n", 7742 DEVNAME(sc)); 7743 return error; 7744 } 7745 7746 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) { 7747 memset(&mcia, 0, sizeof(mcia)); 7748 mcia.rm_l = 0; 7749 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) & 7750 MCX_PMLP_MODULE_NUM_MASK; 7751 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */ 7752 mcia.rm_page_num = sff->sff_page; 7753 mcia.rm_dev_addr = htobe16(offset); 7754 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES); 7755 7756 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ, 7757 &mcia, sizeof(mcia)); 7758 if (error != 0) { 7759 printf("%s: unable to read eeprom at %x\n", 7760 DEVNAME(sc), offset); 7761 return error; 7762 } 7763 7764 memcpy(sff->sff_data + offset, mcia.rm_data, 7765 MCX_MCIA_EEPROM_BYTES); 7766 } 7767 7768 return 0; 7769 } 7770 #endif 7771 7772 static int 7773 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m) 7774 { 7775 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, 7776 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 7777 case 0: 7778 break; 7779 7780 case EFBIG: 7781 if (m_defrag(m, M_DONTWAIT) != NULL && 7782 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, 7783 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 7784 break; 7785 7786 /* FALLTHROUGH */ 7787 default: 7788 return (1); 7789 } 7790 7791 ms->ms_m = m; 7792 return (0); 7793 } 7794 7795 static void 7796 mcx_send_common_locked(struct ifnet *ifp, struct mcx_tx *tx, bool is_transmit) 7797 { 7798 struct mcx_softc *sc = ifp->if_softc; 7799 struct mcx_sq_entry *sq, *sqe; 7800 struct mcx_sq_entry_seg *sqs; 7801 struct mcx_slot *ms; 7802 bus_dmamap_t map; 7803 struct mbuf *m; 7804 u_int idx, free, used; 7805 uint64_t *bf; 7806 uint32_t csum; 7807 size_t bf_base; 7808 int i, seg, nseg; 7809 7810 KASSERT(mutex_owned(&tx->tx_lock)); 7811 7812 if ((ifp->if_flags & IFF_RUNNING) == 0) 7813 return; 7814 7815 bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF; 7816 7817 idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE); 7818 free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod; 7819 7820 used = 0; 7821 bf = NULL; 7822 7823 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem), 7824 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE); 7825 7826 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem); 7827 7828 for (;;) { 7829 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) { 7830 SET(ifp->if_flags, IFF_OACTIVE); 7831 break; 7832 } 7833 7834 if (is_transmit) { 7835 m = pcq_get(tx->tx_pcq); 7836 } else { 7837 IFQ_DEQUEUE(&ifp->if_snd, m); 7838 } 7839 if (m == NULL) { 7840 break; 7841 } 7842 7843 sqe = sq + idx; 7844 ms = &tx->tx_slots[idx]; 7845 memset(sqe, 0, sizeof(*sqe)); 7846 7847 /* ctrl segment */ 7848 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND | 7849 ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT)); 7850 /* always generate a completion event */ 7851 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS); 7852 7853 /* eth segment */ 7854 csum = 0; 7855 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 7856 csum |= MCX_SQE_L3_CSUM; 7857 if (m->m_pkthdr.csum_flags & 7858 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) 7859 csum |= MCX_SQE_L4_CSUM; 7860 sqe->sqe_mss_csum = htobe32(csum); 7861 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE); 7862 if (vlan_has_tag(m)) { 7863 struct ether_vlan_header *evh; 7864 evh = (struct ether_vlan_header *) 7865 &sqe->sqe_inline_headers; 7866 7867 m_copydata(m, 0, ETHER_HDR_LEN, evh); 7868 evh->evl_proto = evh->evl_encap_proto; 7869 evh->evl_encap_proto = htons(ETHERTYPE_VLAN); 7870 evh->evl_tag = htons(vlan_get_tag(m)); 7871 m_adj(m, ETHER_HDR_LEN); 7872 } else { 7873 m_copydata(m, 0, MCX_SQ_INLINE_SIZE, 7874 sqe->sqe_inline_headers); 7875 m_adj(m, MCX_SQ_INLINE_SIZE); 7876 } 7877 7878 if (mcx_load_mbuf(sc, ms, m) != 0) { 7879 m_freem(m); 7880 if_statinc(ifp, if_oerrors); 7881 continue; 7882 } 7883 bf = (uint64_t *)sqe; 7884 7885 if (ifp->if_bpf != NULL) 7886 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers, 7887 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT); 7888 7889 map = ms->ms_map; 7890 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 7891 BUS_DMASYNC_PREWRITE); 7892 7893 sqe->sqe_ds_sq_num = 7894 htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) | 7895 (map->dm_nsegs + 3)); 7896 7897 /* data segment - first wqe has one segment */ 7898 sqs = sqe->sqe_segs; 7899 seg = 0; 7900 nseg = 1; 7901 for (i = 0; i < map->dm_nsegs; i++) { 7902 if (seg == nseg) { 7903 /* next slot */ 7904 idx++; 7905 if (idx == (1 << MCX_LOG_SQ_SIZE)) 7906 idx = 0; 7907 tx->tx_prod++; 7908 used++; 7909 7910 sqs = (struct mcx_sq_entry_seg *)(sq + idx); 7911 seg = 0; 7912 nseg = MCX_SQ_SEGS_PER_SLOT; 7913 } 7914 sqs[seg].sqs_byte_count = 7915 htobe32(map->dm_segs[i].ds_len); 7916 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey); 7917 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr); 7918 seg++; 7919 } 7920 7921 idx++; 7922 if (idx == (1 << MCX_LOG_SQ_SIZE)) 7923 idx = 0; 7924 tx->tx_prod++; 7925 used++; 7926 } 7927 7928 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem), 7929 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE); 7930 7931 if (used) { 7932 bus_size_t blueflame; 7933 7934 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 7935 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE); 7936 be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell), 7937 tx->tx_prod & MCX_WQ_DOORBELL_MASK); 7938 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem), 7939 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE); 7940 7941 /* 7942 * write the first 64 bits of the last sqe we produced 7943 * to the blue flame buffer 7944 */ 7945 7946 blueflame = bf_base + tx->tx_bf_offset; 7947 bus_space_write_8(sc->sc_memt, sc->sc_memh, 7948 blueflame, *bf); 7949 mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE); 7950 7951 /* next write goes to the other buffer */ 7952 tx->tx_bf_offset ^= sc->sc_bf_size; 7953 } 7954 } 7955 7956 static void 7957 mcx_start(struct ifnet *ifp) 7958 { 7959 struct mcx_softc *sc = ifp->if_softc; 7960 /* mcx_start() always uses TX ring[0] */ 7961 struct mcx_tx *tx = &sc->sc_queues[0].q_tx; 7962 7963 mutex_enter(&tx->tx_lock); 7964 if (!ISSET(ifp->if_flags, IFF_OACTIVE)) { 7965 mcx_send_common_locked(ifp, tx, false); 7966 } 7967 mutex_exit(&tx->tx_lock); 7968 } 7969 7970 static int 7971 mcx_transmit(struct ifnet *ifp, struct mbuf *m) 7972 { 7973 struct mcx_softc *sc = ifp->if_softc; 7974 struct mcx_tx *tx; 7975 7976 tx = &sc->sc_queues[cpu_index(curcpu()) % sc->sc_nqueues].q_tx; 7977 if (__predict_false(!pcq_put(tx->tx_pcq, m))) { 7978 m_freem(m); 7979 return ENOBUFS; 7980 } 7981 7982 if (mutex_tryenter(&tx->tx_lock)) { 7983 mcx_send_common_locked(ifp, tx, true); 7984 mutex_exit(&tx->tx_lock); 7985 } else { 7986 softint_schedule(tx->tx_softint); 7987 } 7988 7989 return 0; 7990 } 7991 7992 static void 7993 mcx_deferred_transmit(void *arg) 7994 { 7995 struct mcx_tx *tx = arg; 7996 struct mcx_softc *sc = tx->tx_softc; 7997 struct ifnet *ifp = &sc->sc_ec.ec_if; 7998 7999 mutex_enter(&tx->tx_lock); 8000 if (pcq_peek(tx->tx_pcq) != NULL) { 8001 mcx_send_common_locked(ifp, tx, true); 8002 } 8003 mutex_exit(&tx->tx_lock); 8004 } 8005 8006 8007 static void 8008 mcx_media_add_types(struct mcx_softc *sc) 8009 { 8010 struct mcx_reg_ptys ptys; 8011 int i; 8012 uint32_t proto_cap; 8013 8014 memset(&ptys, 0, sizeof(ptys)); 8015 ptys.rp_local_port = 1; 8016 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH; 8017 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys, 8018 sizeof(ptys)) != 0) { 8019 printf("%s: unable to read port type/speed\n", DEVNAME(sc)); 8020 return; 8021 } 8022 8023 proto_cap = be32toh(ptys.rp_eth_proto_cap); 8024 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) { 8025 const struct mcx_eth_proto_capability *cap; 8026 if (!ISSET(proto_cap, 1U << i)) 8027 continue; 8028 8029 cap = &mcx_eth_cap_map[i]; 8030 if (cap->cap_media == 0) 8031 continue; 8032 8033 ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL); 8034 } 8035 } 8036 8037 static void 8038 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 8039 { 8040 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; 8041 struct mcx_reg_ptys ptys; 8042 int i; 8043 uint32_t proto_oper; 8044 uint64_t media_oper; 8045 8046 memset(&ptys, 0, sizeof(ptys)); 8047 ptys.rp_local_port = 1; 8048 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH; 8049 8050 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys, 8051 sizeof(ptys)) != 0) { 8052 printf("%s: unable to read port type/speed\n", DEVNAME(sc)); 8053 return; 8054 } 8055 8056 proto_oper = be32toh(ptys.rp_eth_proto_oper); 8057 8058 media_oper = 0; 8059 8060 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) { 8061 const struct mcx_eth_proto_capability *cap; 8062 if (!ISSET(proto_oper, 1U << i)) 8063 continue; 8064 8065 cap = &mcx_eth_cap_map[i]; 8066 8067 if (cap->cap_media != 0) 8068 media_oper = cap->cap_media; 8069 } 8070 8071 ifmr->ifm_status = IFM_AVALID; 8072 if (proto_oper != 0) { 8073 ifmr->ifm_status |= IFM_ACTIVE; 8074 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper; 8075 /* txpause, rxpause, duplex? */ 8076 } 8077 } 8078 8079 static int 8080 mcx_media_change(struct ifnet *ifp) 8081 { 8082 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; 8083 struct mcx_reg_ptys ptys; 8084 struct mcx_reg_paos paos; 8085 uint32_t media; 8086 int i, error; 8087 8088 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER) 8089 return EINVAL; 8090 8091 error = 0; 8092 8093 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) { 8094 /* read ptys to get supported media */ 8095 memset(&ptys, 0, sizeof(ptys)); 8096 ptys.rp_local_port = 1; 8097 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH; 8098 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, 8099 &ptys, sizeof(ptys)) != 0) { 8100 printf("%s: unable to read port type/speed\n", 8101 DEVNAME(sc)); 8102 return EIO; 8103 } 8104 8105 media = be32toh(ptys.rp_eth_proto_cap); 8106 } else { 8107 /* map media type */ 8108 media = 0; 8109 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) { 8110 const struct mcx_eth_proto_capability *cap; 8111 8112 cap = &mcx_eth_cap_map[i]; 8113 if (cap->cap_media == 8114 IFM_SUBTYPE(sc->sc_media.ifm_media)) { 8115 media = (1 << i); 8116 break; 8117 } 8118 } 8119 } 8120 8121 /* disable the port */ 8122 memset(&paos, 0, sizeof(paos)); 8123 paos.rp_local_port = 1; 8124 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN; 8125 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN; 8126 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos, 8127 sizeof(paos)) != 0) { 8128 printf("%s: unable to set port state to down\n", DEVNAME(sc)); 8129 return EIO; 8130 } 8131 8132 memset(&ptys, 0, sizeof(ptys)); 8133 ptys.rp_local_port = 1; 8134 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH; 8135 ptys.rp_eth_proto_admin = htobe32(media); 8136 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys, 8137 sizeof(ptys)) != 0) { 8138 printf("%s: unable to set port media type/speed\n", 8139 DEVNAME(sc)); 8140 error = EIO; 8141 } 8142 8143 /* re-enable the port to start negotiation */ 8144 memset(&paos, 0, sizeof(paos)); 8145 paos.rp_local_port = 1; 8146 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP; 8147 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN; 8148 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos, 8149 sizeof(paos)) != 0) { 8150 printf("%s: unable to set port state to up\n", DEVNAME(sc)); 8151 error = EIO; 8152 } 8153 8154 return error; 8155 } 8156 8157 static void 8158 mcx_port_change(struct work *wk, void *xsc) 8159 { 8160 struct mcx_softc *sc = xsc; 8161 struct ifnet *ifp = &sc->sc_ec.ec_if; 8162 struct mcx_reg_ptys ptys = { 8163 .rp_local_port = 1, 8164 .rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH, 8165 }; 8166 int link_state = LINK_STATE_DOWN; 8167 8168 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys, 8169 sizeof(ptys)) == 0) { 8170 uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper); 8171 uint64_t baudrate = 0; 8172 unsigned int i; 8173 8174 if (proto_oper != 0) 8175 link_state = LINK_STATE_UP; 8176 8177 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) { 8178 const struct mcx_eth_proto_capability *cap; 8179 if (!ISSET(proto_oper, 1U << i)) 8180 continue; 8181 8182 cap = &mcx_eth_cap_map[i]; 8183 if (cap->cap_baudrate == 0) 8184 continue; 8185 8186 baudrate = cap->cap_baudrate; 8187 break; 8188 } 8189 8190 ifp->if_baudrate = baudrate; 8191 } 8192 8193 if (link_state != ifp->if_link_state) { 8194 if_link_state_change(ifp, link_state); 8195 } 8196 } 8197 8198 8199 static inline uint32_t 8200 mcx_rd(struct mcx_softc *sc, bus_size_t r) 8201 { 8202 uint32_t word; 8203 8204 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r); 8205 8206 return (be32toh(word)); 8207 } 8208 8209 static inline void 8210 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v) 8211 { 8212 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v)); 8213 } 8214 8215 static inline void 8216 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f) 8217 { 8218 #ifndef __NetBSD__ 8219 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f); 8220 #endif 8221 } 8222 8223 static uint64_t 8224 mcx_timer(struct mcx_softc *sc) 8225 { 8226 uint32_t hi, lo, ni; 8227 8228 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H); 8229 for (;;) { 8230 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L); 8231 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ); 8232 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H); 8233 8234 if (ni == hi) 8235 break; 8236 8237 hi = ni; 8238 } 8239 8240 return (((uint64_t)hi << 32) | (uint64_t)lo); 8241 } 8242 8243 static int 8244 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm, 8245 bus_size_t size, u_int align) 8246 { 8247 mxm->mxm_size = size; 8248 8249 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1, 8250 mxm->mxm_size, 0, 8251 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 8252 &mxm->mxm_map) != 0) 8253 return (1); 8254 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size, 8255 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs, 8256 BUS_DMA_WAITOK) != 0) 8257 goto destroy; 8258 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs, 8259 mxm->mxm_size, &mxm->mxm_kva, 8260 BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0) 8261 goto free; 8262 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva, 8263 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0) 8264 goto unmap; 8265 8266 mcx_dmamem_zero(mxm); 8267 8268 return (0); 8269 unmap: 8270 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 8271 free: 8272 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 8273 destroy: 8274 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 8275 return (1); 8276 } 8277 8278 static void 8279 mcx_dmamem_zero(struct mcx_dmamem *mxm) 8280 { 8281 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm)); 8282 } 8283 8284 static void 8285 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm) 8286 { 8287 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map); 8288 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 8289 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 8290 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 8291 } 8292 8293 static int 8294 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages) 8295 { 8296 bus_dma_segment_t *segs; 8297 bus_size_t len = pages * MCX_PAGE_SIZE; 8298 size_t seglen; 8299 8300 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP); 8301 seglen = sizeof(*segs) * pages; 8302 8303 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0, 8304 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0) 8305 goto free_segs; 8306 8307 if (mhm->mhm_seg_count < pages) { 8308 size_t nseglen; 8309 8310 mhm->mhm_segs = kmem_alloc( 8311 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP); 8312 8313 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count; 8314 8315 memcpy(mhm->mhm_segs, segs, nseglen); 8316 8317 kmem_free(segs, seglen); 8318 8319 segs = mhm->mhm_segs; 8320 seglen = nseglen; 8321 } else 8322 mhm->mhm_segs = segs; 8323 8324 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE, 8325 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/, 8326 &mhm->mhm_map) != 0) 8327 goto free_dmamem; 8328 8329 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map, 8330 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0) 8331 goto destroy; 8332 8333 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map, 8334 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW); 8335 8336 mhm->mhm_npages = pages; 8337 8338 return (0); 8339 8340 destroy: 8341 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map); 8342 free_dmamem: 8343 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count); 8344 free_segs: 8345 kmem_free(segs, seglen); 8346 mhm->mhm_segs = NULL; 8347 8348 return (-1); 8349 } 8350 8351 static void 8352 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm) 8353 { 8354 if (mhm->mhm_npages == 0) 8355 return; 8356 8357 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map, 8358 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW); 8359 8360 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map); 8361 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map); 8362 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count); 8363 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count); 8364 8365 mhm->mhm_npages = 0; 8366 } 8367 8368 #if NKSTAT > 0 8369 struct mcx_ppcnt { 8370 char name[KSTAT_KV_NAMELEN]; 8371 enum kstat_kv_unit unit; 8372 }; 8373 8374 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = { 8375 { "Good Tx", KSTAT_KV_U_PACKETS, }, 8376 { "Good Rx", KSTAT_KV_U_PACKETS, }, 8377 { "FCS errs", KSTAT_KV_U_PACKETS, }, 8378 { "Alignment Errs", KSTAT_KV_U_PACKETS, }, 8379 { "Good Tx", KSTAT_KV_U_BYTES, }, 8380 { "Good Rx", KSTAT_KV_U_BYTES, }, 8381 { "Multicast Tx", KSTAT_KV_U_PACKETS, }, 8382 { "Broadcast Tx", KSTAT_KV_U_PACKETS, }, 8383 { "Multicast Rx", KSTAT_KV_U_PACKETS, }, 8384 { "Broadcast Rx", KSTAT_KV_U_PACKETS, }, 8385 { "In Range Len", KSTAT_KV_U_PACKETS, }, 8386 { "Out Of Range Len", KSTAT_KV_U_PACKETS, }, 8387 { "Frame Too Long", KSTAT_KV_U_PACKETS, }, 8388 { "Symbol Errs", KSTAT_KV_U_PACKETS, }, 8389 { "MAC Ctrl Tx", KSTAT_KV_U_PACKETS, }, 8390 { "MAC Ctrl Rx", KSTAT_KV_U_PACKETS, }, 8391 { "MAC Ctrl Unsup", KSTAT_KV_U_PACKETS, }, 8392 { "Pause Rx", KSTAT_KV_U_PACKETS, }, 8393 { "Pause Tx", KSTAT_KV_U_PACKETS, }, 8394 }; 8395 CTASSERT(__arraycount(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count); 8396 8397 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = { 8398 { "Rx Bytes", KSTAT_KV_U_BYTES, }, 8399 { "Rx Unicast", KSTAT_KV_U_PACKETS, }, 8400 { "Rx Discards", KSTAT_KV_U_PACKETS, }, 8401 { "Rx Errors", KSTAT_KV_U_PACKETS, }, 8402 { "Rx Unknown Proto", KSTAT_KV_U_PACKETS, }, 8403 { "Tx Bytes", KSTAT_KV_U_BYTES, }, 8404 { "Tx Unicast", KSTAT_KV_U_PACKETS, }, 8405 { "Tx Discards", KSTAT_KV_U_PACKETS, }, 8406 { "Tx Errors", KSTAT_KV_U_PACKETS, }, 8407 { "Rx Multicast", KSTAT_KV_U_PACKETS, }, 8408 { "Rx Broadcast", KSTAT_KV_U_PACKETS, }, 8409 { "Tx Multicast", KSTAT_KV_U_PACKETS, }, 8410 { "Tx Broadcast", KSTAT_KV_U_PACKETS, }, 8411 }; 8412 CTASSERT(__arraycount(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count); 8413 8414 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = { 8415 { "Drop Events", KSTAT_KV_U_PACKETS, }, 8416 { "Octets", KSTAT_KV_U_BYTES, }, 8417 { "Packets", KSTAT_KV_U_PACKETS, }, 8418 { "Broadcasts", KSTAT_KV_U_PACKETS, }, 8419 { "Multicasts", KSTAT_KV_U_PACKETS, }, 8420 { "CRC Align Errs", KSTAT_KV_U_PACKETS, }, 8421 { "Undersize", KSTAT_KV_U_PACKETS, }, 8422 { "Oversize", KSTAT_KV_U_PACKETS, }, 8423 { "Fragments", KSTAT_KV_U_PACKETS, }, 8424 { "Jabbers", KSTAT_KV_U_PACKETS, }, 8425 { "Collisions", KSTAT_KV_U_NONE, }, 8426 { "64B", KSTAT_KV_U_PACKETS, }, 8427 { "65-127B", KSTAT_KV_U_PACKETS, }, 8428 { "128-255B", KSTAT_KV_U_PACKETS, }, 8429 { "256-511B", KSTAT_KV_U_PACKETS, }, 8430 { "512-1023B", KSTAT_KV_U_PACKETS, }, 8431 { "1024-1518B", KSTAT_KV_U_PACKETS, }, 8432 { "1519-2047B", KSTAT_KV_U_PACKETS, }, 8433 { "2048-4095B", KSTAT_KV_U_PACKETS, }, 8434 { "4096-8191B", KSTAT_KV_U_PACKETS, }, 8435 { "8192-10239B", KSTAT_KV_U_PACKETS, }, 8436 }; 8437 CTASSERT(__arraycount(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count); 8438 8439 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = { 8440 { "Alignment Errs", KSTAT_KV_U_PACKETS, }, 8441 { "FCS Errs", KSTAT_KV_U_PACKETS, }, 8442 { "Single Colls", KSTAT_KV_U_PACKETS, }, 8443 { "Multiple Colls", KSTAT_KV_U_PACKETS, }, 8444 { "SQE Test Errs", KSTAT_KV_U_NONE, }, 8445 { "Deferred Tx", KSTAT_KV_U_PACKETS, }, 8446 { "Late Colls", KSTAT_KV_U_NONE, }, 8447 { "Exess Colls", KSTAT_KV_U_NONE, }, 8448 { "Int MAC Tx Errs", KSTAT_KV_U_PACKETS, }, 8449 { "CSM Sense Errs", KSTAT_KV_U_NONE, }, 8450 { "Too Long", KSTAT_KV_U_PACKETS, }, 8451 { "Int MAC Rx Errs", KSTAT_KV_U_PACKETS, }, 8452 { "Symbol Errs", KSTAT_KV_U_NONE, }, 8453 { "Unknown Control", KSTAT_KV_U_PACKETS, }, 8454 { "Pause Rx", KSTAT_KV_U_PACKETS, }, 8455 { "Pause Tx", KSTAT_KV_U_PACKETS, }, 8456 }; 8457 CTASSERT(__arraycount(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count); 8458 8459 struct mcx_kstat_ppcnt { 8460 const char *ksp_name; 8461 const struct mcx_ppcnt *ksp_tpl; 8462 unsigned int ksp_n; 8463 uint8_t ksp_grp; 8464 }; 8465 8466 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = { 8467 .ksp_name = "ieee802.3", 8468 .ksp_tpl = mcx_ppcnt_ieee8023_tpl, 8469 .ksp_n = __arraycount(mcx_ppcnt_ieee8023_tpl), 8470 .ksp_grp = MCX_REG_PPCNT_GRP_IEEE8023, 8471 }; 8472 8473 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = { 8474 .ksp_name = "rfc2863", 8475 .ksp_tpl = mcx_ppcnt_rfc2863_tpl, 8476 .ksp_n = __arraycount(mcx_ppcnt_rfc2863_tpl), 8477 .ksp_grp = MCX_REG_PPCNT_GRP_RFC2863, 8478 }; 8479 8480 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = { 8481 .ksp_name = "rfc2819", 8482 .ksp_tpl = mcx_ppcnt_rfc2819_tpl, 8483 .ksp_n = __arraycount(mcx_ppcnt_rfc2819_tpl), 8484 .ksp_grp = MCX_REG_PPCNT_GRP_RFC2819, 8485 }; 8486 8487 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = { 8488 .ksp_name = "rfc3635", 8489 .ksp_tpl = mcx_ppcnt_rfc3635_tpl, 8490 .ksp_n = __arraycount(mcx_ppcnt_rfc3635_tpl), 8491 .ksp_grp = MCX_REG_PPCNT_GRP_RFC3635, 8492 }; 8493 8494 static int mcx_kstat_ppcnt_read(struct kstat *); 8495 8496 static void mcx_kstat_attach_tmps(struct mcx_softc *sc); 8497 static void mcx_kstat_attach_queues(struct mcx_softc *sc); 8498 8499 static struct kstat * 8500 mcx_kstat_attach_ppcnt(struct mcx_softc *sc, 8501 const struct mcx_kstat_ppcnt *ksp) 8502 { 8503 struct kstat *ks; 8504 struct kstat_kv *kvs; 8505 unsigned int i; 8506 8507 ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0); 8508 if (ks == NULL) 8509 return (NULL); 8510 8511 kvs = mallocarray(ksp->ksp_n, sizeof(*kvs), 8512 M_DEVBUF, M_WAITOK); 8513 8514 for (i = 0; i < ksp->ksp_n; i++) { 8515 const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i]; 8516 8517 kstat_kv_unit_init(&kvs[i], tpl->name, 8518 KSTAT_KV_T_COUNTER64, tpl->unit); 8519 } 8520 8521 ks->ks_softc = sc; 8522 ks->ks_ptr = (void *)ksp; 8523 ks->ks_data = kvs; 8524 ks->ks_datalen = ksp->ksp_n * sizeof(*kvs); 8525 ks->ks_read = mcx_kstat_ppcnt_read; 8526 8527 kstat_install(ks); 8528 8529 return (ks); 8530 } 8531 8532 static void 8533 mcx_kstat_attach(struct mcx_softc *sc) 8534 { 8535 sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc, 8536 &mcx_kstat_ppcnt_ieee8023); 8537 sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc, 8538 &mcx_kstat_ppcnt_rfc2863); 8539 sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc, 8540 &mcx_kstat_ppcnt_rfc2819); 8541 sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc, 8542 &mcx_kstat_ppcnt_rfc3635); 8543 8544 mcx_kstat_attach_tmps(sc); 8545 mcx_kstat_attach_queues(sc); 8546 } 8547 8548 static int 8549 mcx_kstat_ppcnt_read(struct kstat *ks) 8550 { 8551 struct mcx_softc *sc = ks->ks_softc; 8552 struct mcx_kstat_ppcnt *ksp = ks->ks_ptr; 8553 struct mcx_reg_ppcnt ppcnt = { 8554 .ppcnt_grp = ksp->ksp_grp, 8555 .ppcnt_local_port = 1, 8556 }; 8557 struct kstat_kv *kvs = ks->ks_data; 8558 uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set; 8559 unsigned int i; 8560 int rv; 8561 8562 KERNEL_LOCK(); /* XXX */ 8563 rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ, 8564 &ppcnt, sizeof(ppcnt)); 8565 KERNEL_UNLOCK(); 8566 if (rv != 0) 8567 return (EIO); 8568 8569 nanouptime(&ks->ks_updated); 8570 8571 for (i = 0; i < ksp->ksp_n; i++) 8572 kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]); 8573 8574 return (0); 8575 } 8576 8577 struct mcx_kstat_mtmp { 8578 struct kstat_kv ktmp_name; 8579 struct kstat_kv ktmp_temperature; 8580 struct kstat_kv ktmp_threshold_lo; 8581 struct kstat_kv ktmp_threshold_hi; 8582 }; 8583 8584 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = { 8585 KSTAT_KV_INITIALIZER("name", KSTAT_KV_T_ISTR), 8586 KSTAT_KV_INITIALIZER("temperature", KSTAT_KV_T_TEMP), 8587 KSTAT_KV_INITIALIZER("lo threshold", KSTAT_KV_T_TEMP), 8588 KSTAT_KV_INITIALIZER("hi threshold", KSTAT_KV_T_TEMP), 8589 }; 8590 8591 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 }; 8592 8593 static int mcx_kstat_mtmp_read(struct kstat *); 8594 8595 static void 8596 mcx_kstat_attach_tmps(struct mcx_softc *sc) 8597 { 8598 struct kstat *ks; 8599 struct mcx_reg_mcam mcam; 8600 struct mcx_reg_mtcap mtcap; 8601 struct mcx_kstat_mtmp *ktmp; 8602 uint64_t map; 8603 unsigned int i, n; 8604 8605 memset(&mtcap, 0, sizeof(mtcap)); 8606 memset(&mcam, 0, sizeof(mcam)); 8607 8608 if (sc->sc_mcam_reg == 0) { 8609 /* no management capabilities */ 8610 return; 8611 } 8612 8613 if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ, 8614 &mcam, sizeof(mcam)) != 0) { 8615 /* unable to check management capabilities? */ 8616 return; 8617 } 8618 8619 if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask, 8620 MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) { 8621 /* no sensor map */ 8622 return; 8623 } 8624 8625 if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ, 8626 &mtcap, sizeof(mtcap)) != 0) { 8627 /* unable to find temperature sensors */ 8628 return; 8629 } 8630 8631 sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count; 8632 sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count, 8633 sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK); 8634 8635 n = 0; 8636 map = bemtoh64(&mtcap.mtcap_sensor_map); 8637 for (i = 0; i < sizeof(map) * NBBY; i++) { 8638 if (!ISSET(map, (1ULL << i))) 8639 continue; 8640 8641 ks = kstat_create(DEVNAME(sc), 0, "temperature", i, 8642 KSTAT_T_KV, 0); 8643 if (ks == NULL) { 8644 /* unable to attach temperature sensor %u, i */ 8645 continue; 8646 } 8647 8648 ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO); 8649 *ktmp = mcx_kstat_mtmp_tpl; 8650 8651 ks->ks_data = ktmp; 8652 ks->ks_datalen = sizeof(*ktmp); 8653 TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval); 8654 ks->ks_read = mcx_kstat_mtmp_read; 8655 8656 ks->ks_softc = sc; 8657 kstat_install(ks); 8658 8659 sc->sc_kstat_mtmp[n++] = ks; 8660 if (n >= sc->sc_kstat_mtmp_count) 8661 break; 8662 } 8663 } 8664 8665 static uint64_t 8666 mcx_tmp_to_uK(uint16_t *t) 8667 { 8668 int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */ 8669 mt *= 1000000 / 8; /* convert to uC */ 8670 mt += 273150000; /* convert to uK */ 8671 8672 return (mt); 8673 } 8674 8675 static int 8676 mcx_kstat_mtmp_read(struct kstat *ks) 8677 { 8678 struct mcx_softc *sc = ks->ks_softc; 8679 struct mcx_kstat_mtmp *ktmp = ks->ks_data; 8680 struct mcx_reg_mtmp mtmp; 8681 int rv; 8682 struct timeval updated; 8683 8684 TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated); 8685 8686 if (!ratecheck(&updated, &mcx_kstat_mtmp_rate)) 8687 return (0); 8688 8689 memset(&mtmp, 0, sizeof(mtmp)); 8690 htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit); 8691 8692 KERNEL_LOCK(); /* XXX */ 8693 rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ, 8694 &mtmp, sizeof(mtmp)); 8695 KERNEL_UNLOCK(); 8696 if (rv != 0) 8697 return (EIO); 8698 8699 memset(kstat_kv_istr(&ktmp->ktmp_name), 0, 8700 sizeof(kstat_kv_istr(&ktmp->ktmp_name))); 8701 memcpy(kstat_kv_istr(&ktmp->ktmp_name), 8702 mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name)); 8703 kstat_kv_temp(&ktmp->ktmp_temperature) = 8704 mcx_tmp_to_uK(&mtmp.mtmp_temperature); 8705 kstat_kv_temp(&ktmp->ktmp_threshold_lo) = 8706 mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo); 8707 kstat_kv_temp(&ktmp->ktmp_threshold_hi) = 8708 mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi); 8709 8710 TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated); 8711 8712 return (0); 8713 } 8714 8715 struct mcx_queuestat { 8716 char name[KSTAT_KV_NAMELEN]; 8717 enum kstat_kv_type type; 8718 }; 8719 8720 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = { 8721 { "RQ SW prod", KSTAT_KV_T_COUNTER64 }, 8722 { "RQ HW prod", KSTAT_KV_T_COUNTER64 }, 8723 { "RQ HW cons", KSTAT_KV_T_COUNTER64 }, 8724 { "RQ HW state", KSTAT_KV_T_ISTR }, 8725 8726 { "SQ SW prod", KSTAT_KV_T_COUNTER64 }, 8727 { "SQ SW cons", KSTAT_KV_T_COUNTER64 }, 8728 { "SQ HW prod", KSTAT_KV_T_COUNTER64 }, 8729 { "SQ HW cons", KSTAT_KV_T_COUNTER64 }, 8730 { "SQ HW state", KSTAT_KV_T_ISTR }, 8731 8732 { "CQ SW cons", KSTAT_KV_T_COUNTER64 }, 8733 { "CQ HW prod", KSTAT_KV_T_COUNTER64 }, 8734 { "CQ HW cons", KSTAT_KV_T_COUNTER64 }, 8735 { "CQ HW notify", KSTAT_KV_T_COUNTER64 }, 8736 { "CQ HW solicit", KSTAT_KV_T_COUNTER64 }, 8737 { "CQ HW status", KSTAT_KV_T_ISTR }, 8738 { "CQ HW state", KSTAT_KV_T_ISTR }, 8739 8740 { "EQ SW cons", KSTAT_KV_T_COUNTER64 }, 8741 { "EQ HW prod", KSTAT_KV_T_COUNTER64 }, 8742 { "EQ HW cons", KSTAT_KV_T_COUNTER64 }, 8743 { "EQ HW status", KSTAT_KV_T_ISTR }, 8744 { "EQ HW state", KSTAT_KV_T_ISTR }, 8745 }; 8746 8747 static int mcx_kstat_queue_read(struct kstat *); 8748 8749 static void 8750 mcx_kstat_attach_queues(struct mcx_softc *sc) 8751 { 8752 struct kstat *ks; 8753 struct kstat_kv *kvs; 8754 int q, i; 8755 8756 for (q = 0; q < sc->sc_nqueues; q++) { 8757 ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q, 8758 KSTAT_T_KV, 0); 8759 if (ks == NULL) { 8760 /* unable to attach queue stats %u, q */ 8761 continue; 8762 } 8763 8764 kvs = mallocarray(nitems(mcx_queue_kstat_tpl), 8765 sizeof(*kvs), M_DEVBUF, M_WAITOK); 8766 8767 for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) { 8768 const struct mcx_queuestat *tpl = 8769 &mcx_queue_kstat_tpl[i]; 8770 8771 kstat_kv_init(&kvs[i], tpl->name, tpl->type); 8772 } 8773 8774 ks->ks_softc = &sc->sc_queues[q]; 8775 ks->ks_data = kvs; 8776 ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs); 8777 ks->ks_read = mcx_kstat_queue_read; 8778 8779 sc->sc_queues[q].q_kstat = ks; 8780 kstat_install(ks); 8781 } 8782 } 8783 8784 static int 8785 mcx_kstat_queue_read(struct kstat *ks) 8786 { 8787 struct mcx_queues *q = ks->ks_softc; 8788 struct mcx_softc *sc = q->q_sc; 8789 struct kstat_kv *kvs = ks->ks_data; 8790 union { 8791 struct mcx_rq_ctx rq; 8792 struct mcx_sq_ctx sq; 8793 struct mcx_cq_ctx cq; 8794 struct mcx_eq_ctx eq; 8795 } u; 8796 const char *text; 8797 int error = 0; 8798 8799 KERNEL_LOCK(); 8800 8801 if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) { 8802 error = EIO; 8803 goto out; 8804 } 8805 8806 kstat_kv_u64(kvs++) = q->q_rx.rx_prod; 8807 kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter); 8808 kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter); 8809 switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >> 8810 MCX_RQ_CTX_STATE_SHIFT) { 8811 case MCX_RQ_CTX_STATE_RST: 8812 text = "RST"; 8813 break; 8814 case MCX_RQ_CTX_STATE_RDY: 8815 text = "RDY"; 8816 break; 8817 case MCX_RQ_CTX_STATE_ERR: 8818 text = "ERR"; 8819 break; 8820 default: 8821 text = "unknown"; 8822 break; 8823 } 8824 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); 8825 kvs++; 8826 8827 if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) { 8828 error = EIO; 8829 goto out; 8830 } 8831 8832 kstat_kv_u64(kvs++) = q->q_tx.tx_prod; 8833 kstat_kv_u64(kvs++) = q->q_tx.tx_cons; 8834 kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter); 8835 kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter); 8836 switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >> 8837 MCX_SQ_CTX_STATE_SHIFT) { 8838 case MCX_SQ_CTX_STATE_RST: 8839 text = "RST"; 8840 break; 8841 case MCX_SQ_CTX_STATE_RDY: 8842 text = "RDY"; 8843 break; 8844 case MCX_SQ_CTX_STATE_ERR: 8845 text = "ERR"; 8846 break; 8847 default: 8848 text = "unknown"; 8849 break; 8850 } 8851 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); 8852 kvs++; 8853 8854 if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) { 8855 error = EIO; 8856 goto out; 8857 } 8858 8859 kstat_kv_u64(kvs++) = q->q_cq.cq_cons; 8860 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter); 8861 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter); 8862 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified); 8863 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit); 8864 8865 switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >> 8866 MCX_CQ_CTX_STATUS_SHIFT) { 8867 case MCX_CQ_CTX_STATUS_OK: 8868 text = "OK"; 8869 break; 8870 case MCX_CQ_CTX_STATUS_OVERFLOW: 8871 text = "overflow"; 8872 break; 8873 case MCX_CQ_CTX_STATUS_WRITE_FAIL: 8874 text = "write fail"; 8875 break; 8876 default: 8877 text = "unknown"; 8878 break; 8879 } 8880 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); 8881 kvs++; 8882 8883 switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >> 8884 MCX_CQ_CTX_STATE_SHIFT) { 8885 case MCX_CQ_CTX_STATE_SOLICITED: 8886 text = "solicited"; 8887 break; 8888 case MCX_CQ_CTX_STATE_ARMED: 8889 text = "armed"; 8890 break; 8891 case MCX_CQ_CTX_STATE_FIRED: 8892 text = "fired"; 8893 break; 8894 default: 8895 text = "unknown"; 8896 break; 8897 } 8898 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); 8899 kvs++; 8900 8901 if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) { 8902 error = EIO; 8903 goto out; 8904 } 8905 8906 kstat_kv_u64(kvs++) = q->q_eq.eq_cons; 8907 kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter); 8908 kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter); 8909 8910 switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >> 8911 MCX_EQ_CTX_STATUS_SHIFT) { 8912 case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE: 8913 text = "write fail"; 8914 break; 8915 case MCX_EQ_CTX_STATUS_OK: 8916 text = "OK"; 8917 break; 8918 default: 8919 text = "unknown"; 8920 break; 8921 } 8922 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); 8923 kvs++; 8924 8925 switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >> 8926 MCX_EQ_CTX_STATE_SHIFT) { 8927 case MCX_EQ_CTX_STATE_ARMED: 8928 text = "armed"; 8929 break; 8930 case MCX_EQ_CTX_STATE_FIRED: 8931 text = "fired"; 8932 break; 8933 default: 8934 text = "unknown"; 8935 break; 8936 } 8937 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); 8938 kvs++; 8939 8940 nanouptime(&ks->ks_updated); 8941 out: 8942 KERNEL_UNLOCK(); 8943 return (error); 8944 } 8945 8946 #endif /* NKSTAT > 0 */ 8947 8948 static unsigned int 8949 mcx_timecounter_read(struct timecounter *tc) 8950 { 8951 struct mcx_softc *sc = tc->tc_priv; 8952 8953 return (mcx_rd(sc, MCX_INTERNAL_TIMER_L)); 8954 } 8955 8956 static void 8957 mcx_timecounter_attach(struct mcx_softc *sc) 8958 { 8959 struct timecounter *tc = &sc->sc_timecounter; 8960 8961 tc->tc_get_timecount = mcx_timecounter_read; 8962 tc->tc_counter_mask = ~0U; 8963 tc->tc_frequency = sc->sc_khz * 1000; 8964 tc->tc_name = device_xname(sc->sc_dev); 8965 tc->tc_quality = -100; 8966 tc->tc_priv = sc; 8967 8968 tc_init(tc); 8969 } 8970