1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_PRM_H_ 7 #define RTE_PMD_MLX5_PRM_H_ 8 9 #include <unistd.h> 10 11 #include <rte_vect.h> 12 #include <rte_byteorder.h> 13 14 #include <mlx5_glue.h> 15 #include "mlx5_autoconf.h" 16 17 /* RSS hash key size. */ 18 #define MLX5_RSS_HASH_KEY_LEN 40 19 20 /* Get CQE owner bit. */ 21 #define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK) 22 23 /* Get CQE format. */ 24 #define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2) 25 26 /* Get CQE opcode. */ 27 #define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4) 28 29 /* Get CQE solicited event. */ 30 #define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1) 31 32 /* Invalidate a CQE. */ 33 #define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4) 34 35 /* Hardware index widths. */ 36 #define MLX5_CQ_INDEX_WIDTH 24 37 #define MLX5_WQ_INDEX_WIDTH 16 38 39 /* WQE Segment sizes in bytes. */ 40 #define MLX5_WSEG_SIZE 16u 41 #define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg) 42 #define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg) 43 #define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg) 44 45 /* WQE/WQEBB size in bytes. */ 46 #define MLX5_WQE_SIZE sizeof(struct mlx5_wqe) 47 48 /* 49 * Max size of a WQE session. 50 * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments, 51 * the WQE size field in Control Segment is 6 bits wide. 52 */ 53 #define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE) 54 55 /* 56 * Default minimum number of Tx queues for inlining packets. 57 * If there are less queues as specified we assume we have 58 * no enough CPU resources (cycles) to perform inlining, 59 * the PCIe throughput is not supposed as bottleneck and 60 * inlining is disabled. 61 */ 62 #define MLX5_INLINE_MAX_TXQS 8u 63 #define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u 64 65 /* 66 * Default packet length threshold to be inlined with 67 * enhanced MPW. If packet length exceeds the threshold 68 * the data are not inlined. Should be aligned in WQEBB 69 * boundary with accounting the title Control and Ethernet 70 * segments. 71 */ 72 #define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \ 73 MLX5_DSEG_MIN_INLINE_SIZE) 74 /* 75 * Maximal inline data length sent with enhanced MPW. 76 * Is based on maximal WQE size. 77 */ 78 #define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \ 79 MLX5_WQE_CSEG_SIZE - \ 80 MLX5_WQE_ESEG_SIZE - \ 81 MLX5_WQE_DSEG_SIZE + \ 82 MLX5_DSEG_MIN_INLINE_SIZE) 83 /* 84 * Minimal amount of packets to be sent with EMPW. 85 * This limits the minimal required size of sent EMPW. 86 * If there are no enough resources to built minimal 87 * EMPW the sending loop exits. 88 */ 89 #define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u) 90 /* 91 * Maximal amount of packets to be sent with EMPW. 92 * This value is not recommended to exceed MLX5_TX_COMP_THRESH, 93 * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs 94 * without CQE generation request, being multiplied by 95 * MLX5_TX_COMP_MAX_CQE it may cause significant latency 96 * in tx burst routine at the moment of freeing multiple mbufs. 97 */ 98 #define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH 99 #define MLX5_MPW_MAX_PACKETS 6 100 #define MLX5_MPW_INLINE_MAX_PACKETS 6 101 102 /* 103 * Default packet length threshold to be inlined with 104 * ordinary SEND. Inlining saves the MR key search 105 * and extra PCIe data fetch transaction, but eats the 106 * CPU cycles. 107 */ 108 #define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \ 109 MLX5_ESEG_MIN_INLINE_SIZE - \ 110 MLX5_WQE_CSEG_SIZE - \ 111 MLX5_WQE_ESEG_SIZE - \ 112 MLX5_WQE_DSEG_SIZE) 113 /* 114 * Maximal inline data length sent with ordinary SEND. 115 * Is based on maximal WQE size. 116 */ 117 #define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \ 118 MLX5_WQE_CSEG_SIZE - \ 119 MLX5_WQE_ESEG_SIZE - \ 120 MLX5_WQE_DSEG_SIZE + \ 121 MLX5_ESEG_MIN_INLINE_SIZE) 122 123 /* Missed in mlv5dv.h, should define here. */ 124 #ifndef HAVE_MLX5_OPCODE_ENHANCED_MPSW 125 #define MLX5_OPCODE_ENHANCED_MPSW 0x29u 126 #endif 127 128 #ifndef HAVE_MLX5_OPCODE_SEND_EN 129 #define MLX5_OPCODE_SEND_EN 0x17u 130 #endif 131 132 #ifndef HAVE_MLX5_OPCODE_WAIT 133 #define MLX5_OPCODE_WAIT 0x0fu 134 #endif 135 136 /* CQE value to inform that VLAN is stripped. */ 137 #define MLX5_CQE_VLAN_STRIPPED (1u << 0) 138 139 /* IPv4 options. */ 140 #define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1) 141 142 /* IPv6 packet. */ 143 #define MLX5_CQE_RX_IPV6_PACKET (1u << 2) 144 145 /* IPv4 packet. */ 146 #define MLX5_CQE_RX_IPV4_PACKET (1u << 3) 147 148 /* TCP packet. */ 149 #define MLX5_CQE_RX_TCP_PACKET (1u << 4) 150 151 /* UDP packet. */ 152 #define MLX5_CQE_RX_UDP_PACKET (1u << 5) 153 154 /* IP is fragmented. */ 155 #define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7) 156 157 /* L2 header is valid. */ 158 #define MLX5_CQE_RX_L2_HDR_VALID (1u << 8) 159 160 /* L3 header is valid. */ 161 #define MLX5_CQE_RX_L3_HDR_VALID (1u << 9) 162 163 /* L4 header is valid. */ 164 #define MLX5_CQE_RX_L4_HDR_VALID (1u << 10) 165 166 /* Outer packet, 0 IPv4, 1 IPv6. */ 167 #define MLX5_CQE_RX_OUTER_PACKET (1u << 1) 168 169 /* Tunnel packet bit in the CQE. */ 170 #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0) 171 172 /* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */ 173 #define MLX5_CQE_LRO_PUSH_MASK 0x40 174 175 /* Mask for L4 type in the CQE hdr_type_etc field. */ 176 #define MLX5_CQE_L4_TYPE_MASK 0x70 177 178 /* The bit index of L4 type in CQE hdr_type_etc field. */ 179 #define MLX5_CQE_L4_TYPE_SHIFT 0x4 180 181 /* L4 type to indicate TCP packet without acknowledgment. */ 182 #define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3 183 184 /* L4 type to indicate TCP packet with acknowledgment. */ 185 #define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4 186 187 /* Inner L3 checksum offload (Tunneled packets only). */ 188 #define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4) 189 190 /* Inner L4 checksum offload (Tunneled packets only). */ 191 #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) 192 193 /* Outer L4 type is TCP. */ 194 #define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5) 195 196 /* Outer L4 type is UDP. */ 197 #define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5) 198 199 /* Outer L3 type is IPV4. */ 200 #define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4) 201 202 /* Outer L3 type is IPV6. */ 203 #define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4) 204 205 /* Inner L4 type is TCP. */ 206 #define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1) 207 208 /* Inner L4 type is UDP. */ 209 #define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1) 210 211 /* Inner L3 type is IPV4. */ 212 #define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0) 213 214 /* Inner L3 type is IPV6. */ 215 #define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0) 216 217 /* VLAN insertion flag. */ 218 #define MLX5_ETH_WQE_VLAN_INSERT (1u << 31) 219 220 /* Data inline segment flag. */ 221 #define MLX5_ETH_WQE_DATA_INLINE (1u << 31) 222 223 /* Is flow mark valid. */ 224 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 225 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) 226 #else 227 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff) 228 #endif 229 230 /* INVALID is used by packets matching no flow rules. */ 231 #define MLX5_FLOW_MARK_INVALID 0 232 233 /* Maximum allowed value to mark a packet. */ 234 #define MLX5_FLOW_MARK_MAX 0xfffff0 235 236 /* Default mark value used when none is provided. */ 237 #define MLX5_FLOW_MARK_DEFAULT 0xffffff 238 239 /* Default mark mask for metadata legacy mode. */ 240 #define MLX5_FLOW_MARK_MASK 0xffffff 241 242 /* Maximum number of DS in WQE. Limited by 6-bit field. */ 243 #define MLX5_DSEG_MAX 63 244 245 /* The completion mode offset in the WQE control segment line 2. */ 246 #define MLX5_COMP_MODE_OFFSET 2 247 248 /* Amount of data bytes in minimal inline data segment. */ 249 #define MLX5_DSEG_MIN_INLINE_SIZE 12u 250 251 /* Amount of data bytes in minimal inline eth segment. */ 252 #define MLX5_ESEG_MIN_INLINE_SIZE 18u 253 254 /* Amount of data bytes after eth data segment. */ 255 #define MLX5_ESEG_EXTRA_DATA_SIZE 32u 256 257 /* The maximum log value of segments per RQ WQE. */ 258 #define MLX5_MAX_LOG_RQ_SEGS 5u 259 260 /* The alignment needed for WQ buffer. */ 261 #define MLX5_WQE_BUF_ALIGNMENT rte_mem_page_size() 262 263 /* The alignment needed for CQ buffer. */ 264 #define MLX5_CQE_BUF_ALIGNMENT rte_mem_page_size() 265 266 /* Completion mode. */ 267 enum mlx5_completion_mode { 268 MLX5_COMP_ONLY_ERR = 0x0, 269 MLX5_COMP_ONLY_FIRST_ERR = 0x1, 270 MLX5_COMP_ALWAYS = 0x2, 271 MLX5_COMP_CQE_AND_EQE = 0x3, 272 }; 273 274 /* MPW mode. */ 275 enum mlx5_mpw_mode { 276 MLX5_MPW_DISABLED, 277 MLX5_MPW, 278 MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */ 279 }; 280 281 /* WQE Control segment. */ 282 struct mlx5_wqe_cseg { 283 uint32_t opcode; 284 uint32_t sq_ds; 285 uint32_t flags; 286 uint32_t misc; 287 } __rte_packed __rte_aligned(MLX5_WSEG_SIZE); 288 289 /* Header of data segment. Minimal size Data Segment */ 290 struct mlx5_wqe_dseg { 291 uint32_t bcount; 292 union { 293 uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE]; 294 struct { 295 uint32_t lkey; 296 uint64_t pbuf; 297 } __rte_packed; 298 }; 299 } __rte_packed; 300 301 /* Subset of struct WQE Ethernet Segment. */ 302 struct mlx5_wqe_eseg { 303 union { 304 struct { 305 uint32_t swp_offs; 306 uint8_t cs_flags; 307 uint8_t swp_flags; 308 uint16_t mss; 309 uint32_t metadata; 310 uint16_t inline_hdr_sz; 311 union { 312 uint16_t inline_data; 313 uint16_t vlan_tag; 314 }; 315 } __rte_packed; 316 struct { 317 uint32_t offsets; 318 uint32_t flags; 319 uint32_t flow_metadata; 320 uint32_t inline_hdr; 321 } __rte_packed; 322 }; 323 } __rte_packed; 324 325 struct mlx5_wqe_qseg { 326 uint32_t reserved0; 327 uint32_t reserved1; 328 uint32_t max_index; 329 uint32_t qpn_cqn; 330 } __rte_packed; 331 332 /* The title WQEBB, header of WQE. */ 333 struct mlx5_wqe { 334 union { 335 struct mlx5_wqe_cseg cseg; 336 uint32_t ctrl[4]; 337 }; 338 struct mlx5_wqe_eseg eseg; 339 union { 340 struct mlx5_wqe_dseg dseg[2]; 341 uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE]; 342 }; 343 } __rte_packed; 344 345 /* WQE for Multi-Packet RQ. */ 346 struct mlx5_wqe_mprq { 347 struct mlx5_wqe_srq_next_seg next_seg; 348 struct mlx5_wqe_data_seg dseg; 349 }; 350 351 #define MLX5_MPRQ_LEN_MASK 0x000ffff 352 #define MLX5_MPRQ_LEN_SHIFT 0 353 #define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000 354 #define MLX5_MPRQ_STRIDE_NUM_SHIFT 16 355 #define MLX5_MPRQ_FILLER_MASK 0x80000000 356 #define MLX5_MPRQ_FILLER_SHIFT 31 357 358 #define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2 359 360 /* CQ element structure - should be equal to the cache line size */ 361 struct mlx5_cqe { 362 #if (RTE_CACHE_LINE_SIZE == 128) 363 uint8_t padding[64]; 364 #endif 365 uint8_t pkt_info; 366 uint8_t rsvd0; 367 uint16_t wqe_id; 368 uint8_t lro_tcppsh_abort_dupack; 369 uint8_t lro_min_ttl; 370 uint16_t lro_tcp_win; 371 uint32_t lro_ack_seq_num; 372 uint32_t rx_hash_res; 373 uint8_t rx_hash_type; 374 uint8_t rsvd1[3]; 375 uint16_t csum; 376 uint8_t rsvd2[6]; 377 uint16_t hdr_type_etc; 378 uint16_t vlan_info; 379 uint8_t lro_num_seg; 380 uint8_t rsvd3[3]; 381 uint32_t flow_table_metadata; 382 uint8_t rsvd4[4]; 383 uint32_t byte_cnt; 384 uint64_t timestamp; 385 uint32_t sop_drop_qpn; 386 uint16_t wqe_counter; 387 uint8_t rsvd5; 388 uint8_t op_own; 389 }; 390 391 struct mlx5_cqe_ts { 392 uint64_t timestamp; 393 uint32_t sop_drop_qpn; 394 uint16_t wqe_counter; 395 uint8_t rsvd5; 396 uint8_t op_own; 397 }; 398 399 /* MMO metadata segment */ 400 401 #define MLX5_OPCODE_MMO 0x2f 402 #define MLX5_OPC_MOD_MMO_REGEX 0x4 403 404 struct mlx5_wqe_metadata_seg { 405 uint32_t mmo_control_31_0; /* mmo_control_63_32 is in ctrl_seg.imm */ 406 uint32_t lkey; 407 uint64_t addr; 408 }; 409 410 struct mlx5_ifc_regexp_mmo_control_bits { 411 uint8_t reserved_at_31[0x2]; 412 uint8_t le[0x1]; 413 uint8_t reserved_at_28[0x1]; 414 uint8_t subset_id_0[0xc]; 415 uint8_t reserved_at_16[0x4]; 416 uint8_t subset_id_1[0xc]; 417 uint8_t ctrl[0x4]; 418 uint8_t subset_id_2[0xc]; 419 uint8_t reserved_at_16_1[0x4]; 420 uint8_t subset_id_3[0xc]; 421 }; 422 423 struct mlx5_ifc_regexp_metadata_bits { 424 uint8_t rof_version[0x10]; 425 uint8_t latency_count[0x10]; 426 uint8_t instruction_count[0x10]; 427 uint8_t primary_thread_count[0x10]; 428 uint8_t match_count[0x8]; 429 uint8_t detected_match_count[0x8]; 430 uint8_t status[0x10]; 431 uint8_t job_id[0x20]; 432 uint8_t reserved[0x80]; 433 }; 434 435 struct mlx5_ifc_regexp_match_tuple_bits { 436 uint8_t length[0x10]; 437 uint8_t start_ptr[0x10]; 438 uint8_t rule_id[0x20]; 439 }; 440 441 /* Adding direct verbs to data-path. */ 442 443 /* CQ sequence number mask. */ 444 #define MLX5_CQ_SQN_MASK 0x3 445 446 /* CQ sequence number index. */ 447 #define MLX5_CQ_SQN_OFFSET 28 448 449 /* CQ doorbell index mask. */ 450 #define MLX5_CI_MASK 0xffffff 451 452 /* CQ doorbell offset. */ 453 #define MLX5_CQ_ARM_DB 1 454 455 /* CQ doorbell offset*/ 456 #define MLX5_CQ_DOORBELL 0x20 457 458 /* CQE format value. */ 459 #define MLX5_COMPRESSED 0x3 460 461 /* CQ doorbell cmd types. */ 462 #define MLX5_CQ_DBR_CMD_SOL_ONLY (1 << 24) 463 #define MLX5_CQ_DBR_CMD_ALL (0 << 24) 464 465 /* Action type of header modification. */ 466 enum { 467 MLX5_MODIFICATION_TYPE_SET = 0x1, 468 MLX5_MODIFICATION_TYPE_ADD = 0x2, 469 MLX5_MODIFICATION_TYPE_COPY = 0x3, 470 }; 471 472 /* The field of packet to be modified. */ 473 enum mlx5_modification_field { 474 MLX5_MODI_OUT_NONE = -1, 475 MLX5_MODI_OUT_SMAC_47_16 = 1, 476 MLX5_MODI_OUT_SMAC_15_0, 477 MLX5_MODI_OUT_ETHERTYPE, 478 MLX5_MODI_OUT_DMAC_47_16, 479 MLX5_MODI_OUT_DMAC_15_0, 480 MLX5_MODI_OUT_IP_DSCP, 481 MLX5_MODI_OUT_TCP_FLAGS, 482 MLX5_MODI_OUT_TCP_SPORT, 483 MLX5_MODI_OUT_TCP_DPORT, 484 MLX5_MODI_OUT_IPV4_TTL, 485 MLX5_MODI_OUT_UDP_SPORT, 486 MLX5_MODI_OUT_UDP_DPORT, 487 MLX5_MODI_OUT_SIPV6_127_96, 488 MLX5_MODI_OUT_SIPV6_95_64, 489 MLX5_MODI_OUT_SIPV6_63_32, 490 MLX5_MODI_OUT_SIPV6_31_0, 491 MLX5_MODI_OUT_DIPV6_127_96, 492 MLX5_MODI_OUT_DIPV6_95_64, 493 MLX5_MODI_OUT_DIPV6_63_32, 494 MLX5_MODI_OUT_DIPV6_31_0, 495 MLX5_MODI_OUT_SIPV4, 496 MLX5_MODI_OUT_DIPV4, 497 MLX5_MODI_OUT_FIRST_VID, 498 MLX5_MODI_IN_SMAC_47_16 = 0x31, 499 MLX5_MODI_IN_SMAC_15_0, 500 MLX5_MODI_IN_ETHERTYPE, 501 MLX5_MODI_IN_DMAC_47_16, 502 MLX5_MODI_IN_DMAC_15_0, 503 MLX5_MODI_IN_IP_DSCP, 504 MLX5_MODI_IN_TCP_FLAGS, 505 MLX5_MODI_IN_TCP_SPORT, 506 MLX5_MODI_IN_TCP_DPORT, 507 MLX5_MODI_IN_IPV4_TTL, 508 MLX5_MODI_IN_UDP_SPORT, 509 MLX5_MODI_IN_UDP_DPORT, 510 MLX5_MODI_IN_SIPV6_127_96, 511 MLX5_MODI_IN_SIPV6_95_64, 512 MLX5_MODI_IN_SIPV6_63_32, 513 MLX5_MODI_IN_SIPV6_31_0, 514 MLX5_MODI_IN_DIPV6_127_96, 515 MLX5_MODI_IN_DIPV6_95_64, 516 MLX5_MODI_IN_DIPV6_63_32, 517 MLX5_MODI_IN_DIPV6_31_0, 518 MLX5_MODI_IN_SIPV4, 519 MLX5_MODI_IN_DIPV4, 520 MLX5_MODI_OUT_IPV6_HOPLIMIT, 521 MLX5_MODI_IN_IPV6_HOPLIMIT, 522 MLX5_MODI_META_DATA_REG_A, 523 MLX5_MODI_META_DATA_REG_B = 0x50, 524 MLX5_MODI_META_REG_C_0, 525 MLX5_MODI_META_REG_C_1, 526 MLX5_MODI_META_REG_C_2, 527 MLX5_MODI_META_REG_C_3, 528 MLX5_MODI_META_REG_C_4, 529 MLX5_MODI_META_REG_C_5, 530 MLX5_MODI_META_REG_C_6, 531 MLX5_MODI_META_REG_C_7, 532 MLX5_MODI_OUT_TCP_SEQ_NUM, 533 MLX5_MODI_IN_TCP_SEQ_NUM, 534 MLX5_MODI_OUT_TCP_ACK_NUM, 535 MLX5_MODI_IN_TCP_ACK_NUM = 0x5C, 536 }; 537 538 /* Total number of metadata reg_c's. */ 539 #define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1) 540 541 enum modify_reg { 542 REG_NONE = 0, 543 REG_A, 544 REG_B, 545 REG_C_0, 546 REG_C_1, 547 REG_C_2, 548 REG_C_3, 549 REG_C_4, 550 REG_C_5, 551 REG_C_6, 552 REG_C_7, 553 }; 554 555 /* Modification sub command. */ 556 struct mlx5_modification_cmd { 557 union { 558 uint32_t data0; 559 struct { 560 unsigned int length:5; 561 unsigned int rsvd0:3; 562 unsigned int offset:5; 563 unsigned int rsvd1:3; 564 unsigned int field:12; 565 unsigned int action_type:4; 566 }; 567 }; 568 union { 569 uint32_t data1; 570 uint8_t data[4]; 571 struct { 572 unsigned int rsvd2:8; 573 unsigned int dst_offset:5; 574 unsigned int rsvd3:3; 575 unsigned int dst_field:12; 576 unsigned int rsvd4:4; 577 }; 578 }; 579 }; 580 581 typedef uint32_t u32; 582 typedef uint16_t u16; 583 typedef uint8_t u8; 584 585 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 586 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) 587 #define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \ 588 (&(__mlx5_nullp(typ)->fld))) 589 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \ 590 (__mlx5_bit_off(typ, fld) & 0x1f)) 591 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) 592 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) 593 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \ 594 __mlx5_dw_bit_off(typ, fld)) 595 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 596 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) 597 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \ 598 (__mlx5_bit_off(typ, fld) & 0xf)) 599 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 600 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << \ 601 __mlx5_16_bit_off(typ, fld)) 602 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 603 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 604 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 605 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) 606 607 /* insert a value to a struct */ 608 #define MLX5_SET(typ, p, fld, v) \ 609 do { \ 610 u32 _v = v; \ 611 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 612 rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \ 613 __mlx5_dw_off(typ, fld))) & \ 614 (~__mlx5_dw_mask(typ, fld))) | \ 615 (((_v) & __mlx5_mask(typ, fld)) << \ 616 __mlx5_dw_bit_off(typ, fld))); \ 617 } while (0) 618 619 #define MLX5_SET64(typ, p, fld, v) \ 620 do { \ 621 MLX5_ASSERT(__mlx5_bit_sz(typ, fld) == 64); \ 622 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \ 623 rte_cpu_to_be_64(v); \ 624 } while (0) 625 626 #define MLX5_SET16(typ, p, fld, v) \ 627 do { \ 628 u16 _v = v; \ 629 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ 630 rte_cpu_to_be_16((rte_be_to_cpu_16(*((__be16 *)(p) + \ 631 __mlx5_16_off(typ, fld))) & \ 632 (~__mlx5_16_mask(typ, fld))) | \ 633 (((_v) & __mlx5_mask16(typ, fld)) << \ 634 __mlx5_16_bit_off(typ, fld))); \ 635 } while (0) 636 637 #define MLX5_GET_VOLATILE(typ, p, fld) \ 638 ((rte_be_to_cpu_32(*((volatile __be32 *)(p) +\ 639 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 640 __mlx5_mask(typ, fld)) 641 #define MLX5_GET(typ, p, fld) \ 642 ((rte_be_to_cpu_32(*((__be32 *)(p) +\ 643 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 644 __mlx5_mask(typ, fld)) 645 #define MLX5_GET16(typ, p, fld) \ 646 ((rte_be_to_cpu_16(*((__be16 *)(p) + \ 647 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ 648 __mlx5_mask16(typ, fld)) 649 #define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((__be64 *)(p) + \ 650 __mlx5_64_off(typ, fld))) 651 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 652 653 struct mlx5_ifc_fte_match_set_misc_bits { 654 u8 gre_c_present[0x1]; 655 u8 reserved_at_1[0x1]; 656 u8 gre_k_present[0x1]; 657 u8 gre_s_present[0x1]; 658 u8 source_vhci_port[0x4]; 659 u8 source_sqn[0x18]; 660 u8 reserved_at_20[0x10]; 661 u8 source_port[0x10]; 662 u8 outer_second_prio[0x3]; 663 u8 outer_second_cfi[0x1]; 664 u8 outer_second_vid[0xc]; 665 u8 inner_second_prio[0x3]; 666 u8 inner_second_cfi[0x1]; 667 u8 inner_second_vid[0xc]; 668 u8 outer_second_cvlan_tag[0x1]; 669 u8 inner_second_cvlan_tag[0x1]; 670 u8 outer_second_svlan_tag[0x1]; 671 u8 inner_second_svlan_tag[0x1]; 672 u8 reserved_at_64[0xc]; 673 u8 gre_protocol[0x10]; 674 u8 gre_key_h[0x18]; 675 u8 gre_key_l[0x8]; 676 u8 vxlan_vni[0x18]; 677 u8 reserved_at_b8[0x8]; 678 u8 geneve_vni[0x18]; 679 u8 reserved_at_e4[0x7]; 680 u8 geneve_oam[0x1]; 681 u8 reserved_at_e0[0xc]; 682 u8 outer_ipv6_flow_label[0x14]; 683 u8 reserved_at_100[0xc]; 684 u8 inner_ipv6_flow_label[0x14]; 685 u8 reserved_at_120[0xa]; 686 u8 geneve_opt_len[0x6]; 687 u8 geneve_protocol_type[0x10]; 688 u8 reserved_at_140[0xc0]; 689 }; 690 691 struct mlx5_ifc_ipv4_layout_bits { 692 u8 reserved_at_0[0x60]; 693 u8 ipv4[0x20]; 694 }; 695 696 struct mlx5_ifc_ipv6_layout_bits { 697 u8 ipv6[16][0x8]; 698 }; 699 700 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { 701 struct mlx5_ifc_ipv6_layout_bits ipv6_layout; 702 struct mlx5_ifc_ipv4_layout_bits ipv4_layout; 703 u8 reserved_at_0[0x80]; 704 }; 705 706 struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 707 u8 smac_47_16[0x20]; 708 u8 smac_15_0[0x10]; 709 u8 ethertype[0x10]; 710 u8 dmac_47_16[0x20]; 711 u8 dmac_15_0[0x10]; 712 u8 first_prio[0x3]; 713 u8 first_cfi[0x1]; 714 u8 first_vid[0xc]; 715 u8 ip_protocol[0x8]; 716 u8 ip_dscp[0x6]; 717 u8 ip_ecn[0x2]; 718 u8 cvlan_tag[0x1]; 719 u8 svlan_tag[0x1]; 720 u8 frag[0x1]; 721 u8 ip_version[0x4]; 722 u8 tcp_flags[0x9]; 723 u8 tcp_sport[0x10]; 724 u8 tcp_dport[0x10]; 725 u8 reserved_at_c0[0x18]; 726 u8 ip_ttl_hoplimit[0x8]; 727 u8 udp_sport[0x10]; 728 u8 udp_dport[0x10]; 729 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; 730 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; 731 }; 732 733 struct mlx5_ifc_fte_match_mpls_bits { 734 u8 mpls_label[0x14]; 735 u8 mpls_exp[0x3]; 736 u8 mpls_s_bos[0x1]; 737 u8 mpls_ttl[0x8]; 738 }; 739 740 struct mlx5_ifc_fte_match_set_misc2_bits { 741 struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls; 742 struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls; 743 struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre; 744 struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; 745 u8 metadata_reg_c_7[0x20]; 746 u8 metadata_reg_c_6[0x20]; 747 u8 metadata_reg_c_5[0x20]; 748 u8 metadata_reg_c_4[0x20]; 749 u8 metadata_reg_c_3[0x20]; 750 u8 metadata_reg_c_2[0x20]; 751 u8 metadata_reg_c_1[0x20]; 752 u8 metadata_reg_c_0[0x20]; 753 u8 metadata_reg_a[0x20]; 754 u8 metadata_reg_b[0x20]; 755 u8 reserved_at_1c0[0x40]; 756 }; 757 758 struct mlx5_ifc_fte_match_set_misc3_bits { 759 u8 inner_tcp_seq_num[0x20]; 760 u8 outer_tcp_seq_num[0x20]; 761 u8 inner_tcp_ack_num[0x20]; 762 u8 outer_tcp_ack_num[0x20]; 763 u8 reserved_at_auto1[0x8]; 764 u8 outer_vxlan_gpe_vni[0x18]; 765 u8 outer_vxlan_gpe_next_protocol[0x8]; 766 u8 outer_vxlan_gpe_flags[0x8]; 767 u8 reserved_at_a8[0x10]; 768 u8 icmp_header_data[0x20]; 769 u8 icmpv6_header_data[0x20]; 770 u8 icmp_type[0x8]; 771 u8 icmp_code[0x8]; 772 u8 icmpv6_type[0x8]; 773 u8 icmpv6_code[0x8]; 774 u8 reserved_at_120[0x20]; 775 u8 gtpu_teid[0x20]; 776 u8 gtpu_msg_type[0x08]; 777 u8 gtpu_msg_flags[0x08]; 778 u8 reserved_at_170[0x90]; 779 }; 780 781 struct mlx5_ifc_fte_match_set_misc4_bits { 782 u8 prog_sample_field_value_0[0x20]; 783 u8 prog_sample_field_id_0[0x20]; 784 u8 prog_sample_field_value_1[0x20]; 785 u8 prog_sample_field_id_1[0x20]; 786 u8 prog_sample_field_value_2[0x20]; 787 u8 prog_sample_field_id_2[0x20]; 788 u8 prog_sample_field_value_3[0x20]; 789 u8 prog_sample_field_id_3[0x20]; 790 u8 reserved_at_100[0x100]; 791 }; 792 793 /* Flow matcher. */ 794 struct mlx5_ifc_fte_match_param_bits { 795 struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; 796 struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; 797 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; 798 struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2; 799 struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; 800 struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4; 801 }; 802 803 enum { 804 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 805 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT, 806 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT, 807 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT, 808 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT, 809 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT, 810 }; 811 812 enum { 813 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 814 MLX5_CMD_OP_CREATE_MKEY = 0x200, 815 MLX5_CMD_OP_CREATE_CQ = 0x400, 816 MLX5_CMD_OP_CREATE_QP = 0x500, 817 MLX5_CMD_OP_RST2INIT_QP = 0x502, 818 MLX5_CMD_OP_INIT2RTR_QP = 0x503, 819 MLX5_CMD_OP_RTR2RTS_QP = 0x504, 820 MLX5_CMD_OP_RTS2RTS_QP = 0x505, 821 MLX5_CMD_OP_SQERR2RTS_QP = 0x506, 822 MLX5_CMD_OP_QP_2ERR = 0x507, 823 MLX5_CMD_OP_QP_2RST = 0x50A, 824 MLX5_CMD_OP_QUERY_QP = 0x50B, 825 MLX5_CMD_OP_SQD2RTS_QP = 0x50C, 826 MLX5_CMD_OP_INIT2INIT_QP = 0x50E, 827 MLX5_CMD_OP_SUSPEND_QP = 0x50F, 828 MLX5_CMD_OP_RESUME_QP = 0x510, 829 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, 830 MLX5_CMD_OP_ACCESS_REGISTER = 0x805, 831 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, 832 MLX5_CMD_OP_CREATE_TIR = 0x900, 833 MLX5_CMD_OP_CREATE_SQ = 0X904, 834 MLX5_CMD_OP_MODIFY_SQ = 0X905, 835 MLX5_CMD_OP_CREATE_RQ = 0x908, 836 MLX5_CMD_OP_MODIFY_RQ = 0x909, 837 MLX5_CMD_OP_CREATE_TIS = 0x912, 838 MLX5_CMD_OP_QUERY_TIS = 0x915, 839 MLX5_CMD_OP_CREATE_RQT = 0x916, 840 MLX5_CMD_OP_MODIFY_RQT = 0x917, 841 MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, 842 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 843 MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00, 844 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01, 845 MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02, 846 MLX5_CMD_SET_REGEX_PARAMS = 0xb04, 847 MLX5_CMD_QUERY_REGEX_PARAMS = 0xb05, 848 MLX5_CMD_SET_REGEX_REGISTERS = 0xb06, 849 MLX5_CMD_QUERY_REGEX_REGISTERS = 0xb07, 850 MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c, 851 }; 852 853 enum { 854 MLX5_MKC_ACCESS_MODE_MTT = 0x1, 855 MLX5_MKC_ACCESS_MODE_KLM = 0x2, 856 MLX5_MKC_ACCESS_MODE_KLM_FBS = 0x3, 857 }; 858 859 #define MLX5_ADAPTER_PAGE_SHIFT 12 860 #define MLX5_LOG_RQ_STRIDE_SHIFT 4 861 /** 862 * The batch counter dcs id starts from 0x800000 and none batch counter 863 * starts from 0. As currently, the counter is changed to be indexed by 864 * pool index and the offset of the counter in the pool counters_raw array. 865 * It means now the counter index is same for batch and none batch counter. 866 * Add the 0x800000 batch counter offset to the batch counter index helps 867 * indicate the counter index is from batch or none batch container pool. 868 */ 869 #define MLX5_CNT_BATCH_OFFSET 0x800000 870 871 /* The counter batch query requires ID align with 4. */ 872 #define MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT 4 873 874 /* Flow counters. */ 875 struct mlx5_ifc_alloc_flow_counter_out_bits { 876 u8 status[0x8]; 877 u8 reserved_at_8[0x18]; 878 u8 syndrome[0x20]; 879 u8 flow_counter_id[0x20]; 880 u8 reserved_at_60[0x20]; 881 }; 882 883 struct mlx5_ifc_alloc_flow_counter_in_bits { 884 u8 opcode[0x10]; 885 u8 reserved_at_10[0x10]; 886 u8 reserved_at_20[0x10]; 887 u8 op_mod[0x10]; 888 u8 flow_counter_id[0x20]; 889 u8 reserved_at_40[0x18]; 890 u8 flow_counter_bulk[0x8]; 891 }; 892 893 struct mlx5_ifc_dealloc_flow_counter_out_bits { 894 u8 status[0x8]; 895 u8 reserved_at_8[0x18]; 896 u8 syndrome[0x20]; 897 u8 reserved_at_40[0x40]; 898 }; 899 900 struct mlx5_ifc_dealloc_flow_counter_in_bits { 901 u8 opcode[0x10]; 902 u8 reserved_at_10[0x10]; 903 u8 reserved_at_20[0x10]; 904 u8 op_mod[0x10]; 905 u8 flow_counter_id[0x20]; 906 u8 reserved_at_60[0x20]; 907 }; 908 909 struct mlx5_ifc_traffic_counter_bits { 910 u8 packets[0x40]; 911 u8 octets[0x40]; 912 }; 913 914 struct mlx5_ifc_query_flow_counter_out_bits { 915 u8 status[0x8]; 916 u8 reserved_at_8[0x18]; 917 u8 syndrome[0x20]; 918 u8 reserved_at_40[0x40]; 919 struct mlx5_ifc_traffic_counter_bits flow_statistics[]; 920 }; 921 922 struct mlx5_ifc_query_flow_counter_in_bits { 923 u8 opcode[0x10]; 924 u8 reserved_at_10[0x10]; 925 u8 reserved_at_20[0x10]; 926 u8 op_mod[0x10]; 927 u8 reserved_at_40[0x20]; 928 u8 mkey[0x20]; 929 u8 address[0x40]; 930 u8 clear[0x1]; 931 u8 dump_to_memory[0x1]; 932 u8 num_of_counters[0x1e]; 933 u8 flow_counter_id[0x20]; 934 }; 935 936 #define MLX5_MAX_KLM_BYTE_COUNT 0x80000000u 937 #define MLX5_MIN_KLM_FIXED_BUFFER_SIZE 0x1000u 938 939 940 struct mlx5_ifc_klm_bits { 941 u8 byte_count[0x20]; 942 u8 mkey[0x20]; 943 u8 address[0x40]; 944 }; 945 946 struct mlx5_ifc_mkc_bits { 947 u8 reserved_at_0[0x1]; 948 u8 free[0x1]; 949 u8 reserved_at_2[0x1]; 950 u8 access_mode_4_2[0x3]; 951 u8 reserved_at_6[0x7]; 952 u8 relaxed_ordering_write[0x1]; 953 u8 reserved_at_e[0x1]; 954 u8 small_fence_on_rdma_read_response[0x1]; 955 u8 umr_en[0x1]; 956 u8 a[0x1]; 957 u8 rw[0x1]; 958 u8 rr[0x1]; 959 u8 lw[0x1]; 960 u8 lr[0x1]; 961 u8 access_mode_1_0[0x2]; 962 u8 reserved_at_18[0x8]; 963 964 u8 qpn[0x18]; 965 u8 mkey_7_0[0x8]; 966 967 u8 reserved_at_40[0x20]; 968 969 u8 length64[0x1]; 970 u8 bsf_en[0x1]; 971 u8 sync_umr[0x1]; 972 u8 reserved_at_63[0x2]; 973 u8 expected_sigerr_count[0x1]; 974 u8 reserved_at_66[0x1]; 975 u8 en_rinval[0x1]; 976 u8 pd[0x18]; 977 978 u8 start_addr[0x40]; 979 980 u8 len[0x40]; 981 982 u8 bsf_octword_size[0x20]; 983 984 u8 reserved_at_120[0x80]; 985 986 u8 translations_octword_size[0x20]; 987 988 u8 reserved_at_1c0[0x19]; 989 u8 relaxed_ordering_read[0x1]; 990 u8 reserved_at_1da[0x1]; 991 u8 log_page_size[0x5]; 992 993 u8 reserved_at_1e0[0x20]; 994 }; 995 996 struct mlx5_ifc_create_mkey_out_bits { 997 u8 status[0x8]; 998 u8 reserved_at_8[0x18]; 999 1000 u8 syndrome[0x20]; 1001 1002 u8 reserved_at_40[0x8]; 1003 u8 mkey_index[0x18]; 1004 1005 u8 reserved_at_60[0x20]; 1006 }; 1007 1008 struct mlx5_ifc_create_mkey_in_bits { 1009 u8 opcode[0x10]; 1010 u8 reserved_at_10[0x10]; 1011 1012 u8 reserved_at_20[0x10]; 1013 u8 op_mod[0x10]; 1014 1015 u8 reserved_at_40[0x20]; 1016 1017 u8 pg_access[0x1]; 1018 u8 reserved_at_61[0x1f]; 1019 1020 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 1021 1022 u8 reserved_at_280[0x80]; 1023 1024 u8 translations_octword_actual_size[0x20]; 1025 1026 u8 mkey_umem_id[0x20]; 1027 1028 u8 mkey_umem_offset[0x40]; 1029 1030 u8 reserved_at_380[0x500]; 1031 1032 u8 klm_pas_mtt[][0x20]; 1033 }; 1034 1035 enum { 1036 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1, 1037 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1, 1038 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1, 1039 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1, 1040 }; 1041 1042 #define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q (1ULL << 0xd) 1043 #define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS (1ULL << 0x1c) 1044 #define MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE (1ULL << 0x22) 1045 1046 enum { 1047 MLX5_HCA_CAP_OPMOD_GET_MAX = 0, 1048 MLX5_HCA_CAP_OPMOD_GET_CUR = 1, 1049 }; 1050 1051 enum { 1052 MLX5_CAP_INLINE_MODE_L2, 1053 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, 1054 MLX5_CAP_INLINE_MODE_NOT_REQUIRED, 1055 }; 1056 1057 enum { 1058 MLX5_INLINE_MODE_NONE, 1059 MLX5_INLINE_MODE_L2, 1060 MLX5_INLINE_MODE_IP, 1061 MLX5_INLINE_MODE_TCP_UDP, 1062 MLX5_INLINE_MODE_RESERVED4, 1063 MLX5_INLINE_MODE_INNER_L2, 1064 MLX5_INLINE_MODE_INNER_IP, 1065 MLX5_INLINE_MODE_INNER_TCP_UDP, 1066 }; 1067 1068 /* HCA bit masks indicating which Flex parser protocols are already enabled. */ 1069 #define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0) 1070 #define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1) 1071 #define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2) 1072 #define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3) 1073 #define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4) 1074 #define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5) 1075 #define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6) 1076 #define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7) 1077 #define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8) 1078 #define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9) 1079 1080 struct mlx5_ifc_cmd_hca_cap_bits { 1081 u8 reserved_at_0[0x30]; 1082 u8 vhca_id[0x10]; 1083 u8 reserved_at_40[0x40]; 1084 u8 log_max_srq_sz[0x8]; 1085 u8 log_max_qp_sz[0x8]; 1086 u8 reserved_at_90[0x9]; 1087 u8 wqe_index_ignore_cap[0x1]; 1088 u8 dynamic_qp_allocation[0x1]; 1089 u8 log_max_qp[0x5]; 1090 u8 regexp[0x1]; 1091 u8 reserved_at_a1[0x3]; 1092 u8 regexp_num_of_engines[0x4]; 1093 u8 reserved_at_a8[0x3]; 1094 u8 log_max_srq[0x5]; 1095 u8 reserved_at_b0[0x3]; 1096 u8 regexp_log_crspace_size[0x5]; 1097 u8 reserved_at_b8[0x3]; 1098 u8 scatter_fcs_w_decap_disable[0x1]; 1099 u8 reserved_at_bc[0x4]; 1100 u8 reserved_at_c0[0x8]; 1101 u8 log_max_cq_sz[0x8]; 1102 u8 reserved_at_d0[0xb]; 1103 u8 log_max_cq[0x5]; 1104 u8 log_max_eq_sz[0x8]; 1105 u8 relaxed_ordering_write[0x1]; 1106 u8 relaxed_ordering_read[0x1]; 1107 u8 access_register_user[0x1]; 1108 u8 log_max_mkey[0x5]; 1109 u8 reserved_at_f0[0x8]; 1110 u8 dump_fill_mkey[0x1]; 1111 u8 reserved_at_f9[0x3]; 1112 u8 log_max_eq[0x4]; 1113 u8 max_indirection[0x8]; 1114 u8 fixed_buffer_size[0x1]; 1115 u8 log_max_mrw_sz[0x7]; 1116 u8 force_teardown[0x1]; 1117 u8 reserved_at_111[0x1]; 1118 u8 log_max_bsf_list_size[0x6]; 1119 u8 umr_extended_translation_offset[0x1]; 1120 u8 null_mkey[0x1]; 1121 u8 log_max_klm_list_size[0x6]; 1122 u8 non_wire_sq[0x1]; 1123 u8 reserved_at_121[0x9]; 1124 u8 log_max_ra_req_dc[0x6]; 1125 u8 reserved_at_130[0x3]; 1126 u8 log_max_static_sq_wq[0x5]; 1127 u8 reserved_at_138[0x2]; 1128 u8 log_max_ra_res_dc[0x6]; 1129 u8 reserved_at_140[0xa]; 1130 u8 log_max_ra_req_qp[0x6]; 1131 u8 reserved_at_150[0xa]; 1132 u8 log_max_ra_res_qp[0x6]; 1133 u8 end_pad[0x1]; 1134 u8 cc_query_allowed[0x1]; 1135 u8 cc_modify_allowed[0x1]; 1136 u8 start_pad[0x1]; 1137 u8 cache_line_128byte[0x1]; 1138 u8 reserved_at_165[0xa]; 1139 u8 qcam_reg[0x1]; 1140 u8 gid_table_size[0x10]; 1141 u8 out_of_seq_cnt[0x1]; 1142 u8 vport_counters[0x1]; 1143 u8 retransmission_q_counters[0x1]; 1144 u8 debug[0x1]; 1145 u8 modify_rq_counter_set_id[0x1]; 1146 u8 rq_delay_drop[0x1]; 1147 u8 max_qp_cnt[0xa]; 1148 u8 pkey_table_size[0x10]; 1149 u8 vport_group_manager[0x1]; 1150 u8 vhca_group_manager[0x1]; 1151 u8 ib_virt[0x1]; 1152 u8 eth_virt[0x1]; 1153 u8 vnic_env_queue_counters[0x1]; 1154 u8 ets[0x1]; 1155 u8 nic_flow_table[0x1]; 1156 u8 eswitch_manager[0x1]; 1157 u8 device_memory[0x1]; 1158 u8 mcam_reg[0x1]; 1159 u8 pcam_reg[0x1]; 1160 u8 local_ca_ack_delay[0x5]; 1161 u8 port_module_event[0x1]; 1162 u8 enhanced_error_q_counters[0x1]; 1163 u8 ports_check[0x1]; 1164 u8 reserved_at_1b3[0x1]; 1165 u8 disable_link_up[0x1]; 1166 u8 beacon_led[0x1]; 1167 u8 port_type[0x2]; 1168 u8 num_ports[0x8]; 1169 u8 reserved_at_1c0[0x1]; 1170 u8 pps[0x1]; 1171 u8 pps_modify[0x1]; 1172 u8 log_max_msg[0x5]; 1173 u8 reserved_at_1c8[0x4]; 1174 u8 max_tc[0x4]; 1175 u8 temp_warn_event[0x1]; 1176 u8 dcbx[0x1]; 1177 u8 general_notification_event[0x1]; 1178 u8 reserved_at_1d3[0x2]; 1179 u8 fpga[0x1]; 1180 u8 rol_s[0x1]; 1181 u8 rol_g[0x1]; 1182 u8 reserved_at_1d8[0x1]; 1183 u8 wol_s[0x1]; 1184 u8 wol_g[0x1]; 1185 u8 wol_a[0x1]; 1186 u8 wol_b[0x1]; 1187 u8 wol_m[0x1]; 1188 u8 wol_u[0x1]; 1189 u8 wol_p[0x1]; 1190 u8 stat_rate_support[0x10]; 1191 u8 reserved_at_1f0[0xc]; 1192 u8 cqe_version[0x4]; 1193 u8 compact_address_vector[0x1]; 1194 u8 striding_rq[0x1]; 1195 u8 reserved_at_202[0x1]; 1196 u8 ipoib_enhanced_offloads[0x1]; 1197 u8 ipoib_basic_offloads[0x1]; 1198 u8 reserved_at_205[0x1]; 1199 u8 repeated_block_disabled[0x1]; 1200 u8 umr_modify_entity_size_disabled[0x1]; 1201 u8 umr_modify_atomic_disabled[0x1]; 1202 u8 umr_indirect_mkey_disabled[0x1]; 1203 u8 umr_fence[0x2]; 1204 u8 reserved_at_20c[0x3]; 1205 u8 drain_sigerr[0x1]; 1206 u8 cmdif_checksum[0x2]; 1207 u8 sigerr_cqe[0x1]; 1208 u8 reserved_at_213[0x1]; 1209 u8 wq_signature[0x1]; 1210 u8 sctr_data_cqe[0x1]; 1211 u8 reserved_at_216[0x1]; 1212 u8 sho[0x1]; 1213 u8 tph[0x1]; 1214 u8 rf[0x1]; 1215 u8 dct[0x1]; 1216 u8 qos[0x1]; 1217 u8 eth_net_offloads[0x1]; 1218 u8 roce[0x1]; 1219 u8 atomic[0x1]; 1220 u8 reserved_at_21f[0x1]; 1221 u8 cq_oi[0x1]; 1222 u8 cq_resize[0x1]; 1223 u8 cq_moderation[0x1]; 1224 u8 reserved_at_223[0x3]; 1225 u8 cq_eq_remap[0x1]; 1226 u8 pg[0x1]; 1227 u8 block_lb_mc[0x1]; 1228 u8 reserved_at_229[0x1]; 1229 u8 scqe_break_moderation[0x1]; 1230 u8 cq_period_start_from_cqe[0x1]; 1231 u8 cd[0x1]; 1232 u8 reserved_at_22d[0x1]; 1233 u8 apm[0x1]; 1234 u8 vector_calc[0x1]; 1235 u8 umr_ptr_rlky[0x1]; 1236 u8 imaicl[0x1]; 1237 u8 reserved_at_232[0x4]; 1238 u8 qkv[0x1]; 1239 u8 pkv[0x1]; 1240 u8 set_deth_sqpn[0x1]; 1241 u8 reserved_at_239[0x3]; 1242 u8 xrc[0x1]; 1243 u8 ud[0x1]; 1244 u8 uc[0x1]; 1245 u8 rc[0x1]; 1246 u8 uar_4k[0x1]; 1247 u8 reserved_at_241[0x9]; 1248 u8 uar_sz[0x6]; 1249 u8 reserved_at_250[0x8]; 1250 u8 log_pg_sz[0x8]; 1251 u8 bf[0x1]; 1252 u8 driver_version[0x1]; 1253 u8 pad_tx_eth_packet[0x1]; 1254 u8 reserved_at_263[0x8]; 1255 u8 log_bf_reg_size[0x5]; 1256 u8 reserved_at_270[0xb]; 1257 u8 lag_master[0x1]; 1258 u8 num_lag_ports[0x4]; 1259 u8 reserved_at_280[0x10]; 1260 u8 max_wqe_sz_sq[0x10]; 1261 u8 reserved_at_2a0[0x10]; 1262 u8 max_wqe_sz_rq[0x10]; 1263 u8 max_flow_counter_31_16[0x10]; 1264 u8 max_wqe_sz_sq_dc[0x10]; 1265 u8 reserved_at_2e0[0x7]; 1266 u8 max_qp_mcg[0x19]; 1267 u8 reserved_at_300[0x10]; 1268 u8 flow_counter_bulk_alloc[0x08]; 1269 u8 log_max_mcg[0x8]; 1270 u8 reserved_at_320[0x3]; 1271 u8 log_max_transport_domain[0x5]; 1272 u8 reserved_at_328[0x3]; 1273 u8 log_max_pd[0x5]; 1274 u8 reserved_at_330[0xb]; 1275 u8 log_max_xrcd[0x5]; 1276 u8 nic_receive_steering_discard[0x1]; 1277 u8 receive_discard_vport_down[0x1]; 1278 u8 transmit_discard_vport_down[0x1]; 1279 u8 reserved_at_343[0x5]; 1280 u8 log_max_flow_counter_bulk[0x8]; 1281 u8 max_flow_counter_15_0[0x10]; 1282 u8 modify_tis[0x1]; 1283 u8 flow_counters_dump[0x1]; 1284 u8 reserved_at_360[0x1]; 1285 u8 log_max_rq[0x5]; 1286 u8 reserved_at_368[0x3]; 1287 u8 log_max_sq[0x5]; 1288 u8 reserved_at_370[0x3]; 1289 u8 log_max_tir[0x5]; 1290 u8 reserved_at_378[0x3]; 1291 u8 log_max_tis[0x5]; 1292 u8 basic_cyclic_rcv_wqe[0x1]; 1293 u8 reserved_at_381[0x2]; 1294 u8 log_max_rmp[0x5]; 1295 u8 reserved_at_388[0x3]; 1296 u8 log_max_rqt[0x5]; 1297 u8 reserved_at_390[0x3]; 1298 u8 log_max_rqt_size[0x5]; 1299 u8 reserved_at_398[0x3]; 1300 u8 log_max_tis_per_sq[0x5]; 1301 u8 ext_stride_num_range[0x1]; 1302 u8 reserved_at_3a1[0x2]; 1303 u8 log_max_stride_sz_rq[0x5]; 1304 u8 reserved_at_3a8[0x3]; 1305 u8 log_min_stride_sz_rq[0x5]; 1306 u8 reserved_at_3b0[0x3]; 1307 u8 log_max_stride_sz_sq[0x5]; 1308 u8 reserved_at_3b8[0x3]; 1309 u8 log_min_stride_sz_sq[0x5]; 1310 u8 hairpin[0x1]; 1311 u8 reserved_at_3c1[0x2]; 1312 u8 log_max_hairpin_queues[0x5]; 1313 u8 reserved_at_3c8[0x3]; 1314 u8 log_max_hairpin_wq_data_sz[0x5]; 1315 u8 reserved_at_3d0[0x3]; 1316 u8 log_max_hairpin_num_packets[0x5]; 1317 u8 reserved_at_3d8[0x3]; 1318 u8 log_max_wq_sz[0x5]; 1319 u8 nic_vport_change_event[0x1]; 1320 u8 disable_local_lb_uc[0x1]; 1321 u8 disable_local_lb_mc[0x1]; 1322 u8 log_min_hairpin_wq_data_sz[0x5]; 1323 u8 reserved_at_3e8[0x3]; 1324 u8 log_max_vlan_list[0x5]; 1325 u8 reserved_at_3f0[0x3]; 1326 u8 log_max_current_mc_list[0x5]; 1327 u8 reserved_at_3f8[0x3]; 1328 u8 log_max_current_uc_list[0x5]; 1329 u8 general_obj_types[0x40]; 1330 u8 reserved_at_440[0x20]; 1331 u8 reserved_at_460[0x10]; 1332 u8 max_num_eqs[0x10]; 1333 u8 reserved_at_480[0x3]; 1334 u8 log_max_l2_table[0x5]; 1335 u8 reserved_at_488[0x8]; 1336 u8 log_uar_page_sz[0x10]; 1337 u8 reserved_at_4a0[0x20]; 1338 u8 device_frequency_mhz[0x20]; 1339 u8 device_frequency_khz[0x20]; 1340 u8 reserved_at_500[0x20]; 1341 u8 num_of_uars_per_page[0x20]; 1342 u8 flex_parser_protocols[0x20]; 1343 u8 reserved_at_560[0x20]; 1344 u8 reserved_at_580[0x3c]; 1345 u8 mini_cqe_resp_stride_index[0x1]; 1346 u8 cqe_128_always[0x1]; 1347 u8 cqe_compression_128[0x1]; 1348 u8 cqe_compression[0x1]; 1349 u8 cqe_compression_timeout[0x10]; 1350 u8 cqe_compression_max_num[0x10]; 1351 u8 reserved_at_5e0[0x10]; 1352 u8 tag_matching[0x1]; 1353 u8 rndv_offload_rc[0x1]; 1354 u8 rndv_offload_dc[0x1]; 1355 u8 log_tag_matching_list_sz[0x5]; 1356 u8 reserved_at_5f8[0x3]; 1357 u8 log_max_xrq[0x5]; 1358 u8 affiliate_nic_vport_criteria[0x8]; 1359 u8 native_port_num[0x8]; 1360 u8 num_vhca_ports[0x8]; 1361 u8 reserved_at_618[0x6]; 1362 u8 sw_owner_id[0x1]; 1363 u8 reserved_at_61f[0x1e1]; 1364 }; 1365 1366 struct mlx5_ifc_qos_cap_bits { 1367 u8 packet_pacing[0x1]; 1368 u8 esw_scheduling[0x1]; 1369 u8 esw_bw_share[0x1]; 1370 u8 esw_rate_limit[0x1]; 1371 u8 reserved_at_4[0x1]; 1372 u8 packet_pacing_burst_bound[0x1]; 1373 u8 packet_pacing_typical_size[0x1]; 1374 u8 flow_meter_srtcm[0x1]; 1375 u8 reserved_at_8[0x8]; 1376 u8 log_max_flow_meter[0x8]; 1377 u8 flow_meter_reg_id[0x8]; 1378 u8 wqe_rate_pp[0x1]; 1379 u8 reserved_at_25[0x7]; 1380 u8 flow_meter_reg_share[0x1]; 1381 u8 reserved_at_2e[0x17]; 1382 u8 packet_pacing_max_rate[0x20]; 1383 u8 packet_pacing_min_rate[0x20]; 1384 u8 reserved_at_80[0x10]; 1385 u8 packet_pacing_rate_table_size[0x10]; 1386 u8 esw_element_type[0x10]; 1387 u8 esw_tsar_type[0x10]; 1388 u8 reserved_at_c0[0x10]; 1389 u8 max_qos_para_vport[0x10]; 1390 u8 max_tsar_bw_share[0x20]; 1391 u8 reserved_at_100[0x6e8]; 1392 }; 1393 1394 struct mlx5_ifc_per_protocol_networking_offload_caps_bits { 1395 u8 csum_cap[0x1]; 1396 u8 vlan_cap[0x1]; 1397 u8 lro_cap[0x1]; 1398 u8 lro_psh_flag[0x1]; 1399 u8 lro_time_stamp[0x1]; 1400 u8 lro_max_msg_sz_mode[0x2]; 1401 u8 wqe_vlan_insert[0x1]; 1402 u8 self_lb_en_modifiable[0x1]; 1403 u8 self_lb_mc[0x1]; 1404 u8 self_lb_uc[0x1]; 1405 u8 max_lso_cap[0x5]; 1406 u8 multi_pkt_send_wqe[0x2]; 1407 u8 wqe_inline_mode[0x2]; 1408 u8 rss_ind_tbl_cap[0x4]; 1409 u8 reg_umr_sq[0x1]; 1410 u8 scatter_fcs[0x1]; 1411 u8 enhanced_multi_pkt_send_wqe[0x1]; 1412 u8 tunnel_lso_const_out_ip_id[0x1]; 1413 u8 tunnel_lro_gre[0x1]; 1414 u8 tunnel_lro_vxlan[0x1]; 1415 u8 tunnel_stateless_gre[0x1]; 1416 u8 tunnel_stateless_vxlan[0x1]; 1417 u8 swp[0x1]; 1418 u8 swp_csum[0x1]; 1419 u8 swp_lso[0x1]; 1420 u8 reserved_at_23[0x8]; 1421 u8 tunnel_stateless_gtp[0x1]; 1422 u8 reserved_at_25[0x4]; 1423 u8 max_vxlan_udp_ports[0x8]; 1424 u8 reserved_at_38[0x6]; 1425 u8 max_geneve_opt_len[0x1]; 1426 u8 tunnel_stateless_geneve_rx[0x1]; 1427 u8 reserved_at_40[0x10]; 1428 u8 lro_min_mss_size[0x10]; 1429 u8 reserved_at_60[0x120]; 1430 u8 lro_timer_supported_periods[4][0x20]; 1431 u8 reserved_at_200[0x600]; 1432 }; 1433 1434 enum { 1435 MLX5_VIRTQ_TYPE_SPLIT = 0, 1436 MLX5_VIRTQ_TYPE_PACKED = 1, 1437 }; 1438 1439 enum { 1440 MLX5_VIRTQ_EVENT_MODE_NO_MSIX = 0, 1441 MLX5_VIRTQ_EVENT_MODE_QP = 1, 1442 MLX5_VIRTQ_EVENT_MODE_MSIX = 2, 1443 }; 1444 1445 struct mlx5_ifc_virtio_emulation_cap_bits { 1446 u8 desc_tunnel_offload_type[0x1]; 1447 u8 eth_frame_offload_type[0x1]; 1448 u8 virtio_version_1_0[0x1]; 1449 u8 tso_ipv4[0x1]; 1450 u8 tso_ipv6[0x1]; 1451 u8 tx_csum[0x1]; 1452 u8 rx_csum[0x1]; 1453 u8 reserved_at_7[0x1][0x9]; 1454 u8 event_mode[0x8]; 1455 u8 virtio_queue_type[0x8]; 1456 u8 reserved_at_20[0x13]; 1457 u8 log_doorbell_stride[0x5]; 1458 u8 reserved_at_3b[0x3]; 1459 u8 log_doorbell_bar_size[0x5]; 1460 u8 doorbell_bar_offset[0x40]; 1461 u8 reserved_at_80[0x8]; 1462 u8 max_num_virtio_queues[0x18]; 1463 u8 reserved_at_a0[0x60]; 1464 u8 umem_1_buffer_param_a[0x20]; 1465 u8 umem_1_buffer_param_b[0x20]; 1466 u8 umem_2_buffer_param_a[0x20]; 1467 u8 umem_2_buffer_param_b[0x20]; 1468 u8 umem_3_buffer_param_a[0x20]; 1469 u8 umem_3_buffer_param_b[0x20]; 1470 u8 reserved_at_1c0[0x620]; 1471 }; 1472 1473 union mlx5_ifc_hca_cap_union_bits { 1474 struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; 1475 struct mlx5_ifc_per_protocol_networking_offload_caps_bits 1476 per_protocol_networking_offload_caps; 1477 struct mlx5_ifc_qos_cap_bits qos_cap; 1478 struct mlx5_ifc_virtio_emulation_cap_bits vdpa_caps; 1479 u8 reserved_at_0[0x8000]; 1480 }; 1481 1482 struct mlx5_ifc_query_hca_cap_out_bits { 1483 u8 status[0x8]; 1484 u8 reserved_at_8[0x18]; 1485 u8 syndrome[0x20]; 1486 u8 reserved_at_40[0x40]; 1487 union mlx5_ifc_hca_cap_union_bits capability; 1488 }; 1489 1490 struct mlx5_ifc_query_hca_cap_in_bits { 1491 u8 opcode[0x10]; 1492 u8 reserved_at_10[0x10]; 1493 u8 reserved_at_20[0x10]; 1494 u8 op_mod[0x10]; 1495 u8 reserved_at_40[0x40]; 1496 }; 1497 1498 struct mlx5_ifc_mac_address_layout_bits { 1499 u8 reserved_at_0[0x10]; 1500 u8 mac_addr_47_32[0x10]; 1501 u8 mac_addr_31_0[0x20]; 1502 }; 1503 1504 struct mlx5_ifc_nic_vport_context_bits { 1505 u8 reserved_at_0[0x5]; 1506 u8 min_wqe_inline_mode[0x3]; 1507 u8 reserved_at_8[0x15]; 1508 u8 disable_mc_local_lb[0x1]; 1509 u8 disable_uc_local_lb[0x1]; 1510 u8 roce_en[0x1]; 1511 u8 arm_change_event[0x1]; 1512 u8 reserved_at_21[0x1a]; 1513 u8 event_on_mtu[0x1]; 1514 u8 event_on_promisc_change[0x1]; 1515 u8 event_on_vlan_change[0x1]; 1516 u8 event_on_mc_address_change[0x1]; 1517 u8 event_on_uc_address_change[0x1]; 1518 u8 reserved_at_40[0xc]; 1519 u8 affiliation_criteria[0x4]; 1520 u8 affiliated_vhca_id[0x10]; 1521 u8 reserved_at_60[0xd0]; 1522 u8 mtu[0x10]; 1523 u8 system_image_guid[0x40]; 1524 u8 port_guid[0x40]; 1525 u8 node_guid[0x40]; 1526 u8 reserved_at_200[0x140]; 1527 u8 qkey_violation_counter[0x10]; 1528 u8 reserved_at_350[0x430]; 1529 u8 promisc_uc[0x1]; 1530 u8 promisc_mc[0x1]; 1531 u8 promisc_all[0x1]; 1532 u8 reserved_at_783[0x2]; 1533 u8 allowed_list_type[0x3]; 1534 u8 reserved_at_788[0xc]; 1535 u8 allowed_list_size[0xc]; 1536 struct mlx5_ifc_mac_address_layout_bits permanent_address; 1537 u8 reserved_at_7e0[0x20]; 1538 }; 1539 1540 struct mlx5_ifc_query_nic_vport_context_out_bits { 1541 u8 status[0x8]; 1542 u8 reserved_at_8[0x18]; 1543 u8 syndrome[0x20]; 1544 u8 reserved_at_40[0x40]; 1545 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 1546 }; 1547 1548 struct mlx5_ifc_query_nic_vport_context_in_bits { 1549 u8 opcode[0x10]; 1550 u8 reserved_at_10[0x10]; 1551 u8 reserved_at_20[0x10]; 1552 u8 op_mod[0x10]; 1553 u8 other_vport[0x1]; 1554 u8 reserved_at_41[0xf]; 1555 u8 vport_number[0x10]; 1556 u8 reserved_at_60[0x5]; 1557 u8 allowed_list_type[0x3]; 1558 u8 reserved_at_68[0x18]; 1559 }; 1560 1561 struct mlx5_ifc_tisc_bits { 1562 u8 strict_lag_tx_port_affinity[0x1]; 1563 u8 reserved_at_1[0x3]; 1564 u8 lag_tx_port_affinity[0x04]; 1565 u8 reserved_at_8[0x4]; 1566 u8 prio[0x4]; 1567 u8 reserved_at_10[0x10]; 1568 u8 reserved_at_20[0x100]; 1569 u8 reserved_at_120[0x8]; 1570 u8 transport_domain[0x18]; 1571 u8 reserved_at_140[0x8]; 1572 u8 underlay_qpn[0x18]; 1573 u8 reserved_at_160[0x3a0]; 1574 }; 1575 1576 struct mlx5_ifc_query_tis_out_bits { 1577 u8 status[0x8]; 1578 u8 reserved_at_8[0x18]; 1579 u8 syndrome[0x20]; 1580 u8 reserved_at_40[0x40]; 1581 struct mlx5_ifc_tisc_bits tis_context; 1582 }; 1583 1584 struct mlx5_ifc_query_tis_in_bits { 1585 u8 opcode[0x10]; 1586 u8 reserved_at_10[0x10]; 1587 u8 reserved_at_20[0x10]; 1588 u8 op_mod[0x10]; 1589 u8 reserved_at_40[0x8]; 1590 u8 tisn[0x18]; 1591 u8 reserved_at_60[0x20]; 1592 }; 1593 1594 struct mlx5_ifc_alloc_transport_domain_out_bits { 1595 u8 status[0x8]; 1596 u8 reserved_at_8[0x18]; 1597 u8 syndrome[0x20]; 1598 u8 reserved_at_40[0x8]; 1599 u8 transport_domain[0x18]; 1600 u8 reserved_at_60[0x20]; 1601 }; 1602 1603 struct mlx5_ifc_alloc_transport_domain_in_bits { 1604 u8 opcode[0x10]; 1605 u8 reserved_at_10[0x10]; 1606 u8 reserved_at_20[0x10]; 1607 u8 op_mod[0x10]; 1608 u8 reserved_at_40[0x40]; 1609 }; 1610 1611 enum { 1612 MLX5_WQ_TYPE_LINKED_LIST = 0x0, 1613 MLX5_WQ_TYPE_CYCLIC = 0x1, 1614 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, 1615 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3, 1616 }; 1617 1618 enum { 1619 MLX5_WQ_END_PAD_MODE_NONE = 0x0, 1620 MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, 1621 }; 1622 1623 struct mlx5_ifc_wq_bits { 1624 u8 wq_type[0x4]; 1625 u8 wq_signature[0x1]; 1626 u8 end_padding_mode[0x2]; 1627 u8 cd_slave[0x1]; 1628 u8 reserved_at_8[0x18]; 1629 u8 hds_skip_first_sge[0x1]; 1630 u8 log2_hds_buf_size[0x3]; 1631 u8 reserved_at_24[0x7]; 1632 u8 page_offset[0x5]; 1633 u8 lwm[0x10]; 1634 u8 reserved_at_40[0x8]; 1635 u8 pd[0x18]; 1636 u8 reserved_at_60[0x8]; 1637 u8 uar_page[0x18]; 1638 u8 dbr_addr[0x40]; 1639 u8 hw_counter[0x20]; 1640 u8 sw_counter[0x20]; 1641 u8 reserved_at_100[0xc]; 1642 u8 log_wq_stride[0x4]; 1643 u8 reserved_at_110[0x3]; 1644 u8 log_wq_pg_sz[0x5]; 1645 u8 reserved_at_118[0x3]; 1646 u8 log_wq_sz[0x5]; 1647 u8 dbr_umem_valid[0x1]; 1648 u8 wq_umem_valid[0x1]; 1649 u8 reserved_at_122[0x1]; 1650 u8 log_hairpin_num_packets[0x5]; 1651 u8 reserved_at_128[0x3]; 1652 u8 log_hairpin_data_sz[0x5]; 1653 u8 reserved_at_130[0x4]; 1654 u8 single_wqe_log_num_of_strides[0x4]; 1655 u8 two_byte_shift_en[0x1]; 1656 u8 reserved_at_139[0x4]; 1657 u8 single_stride_log_num_of_bytes[0x3]; 1658 u8 dbr_umem_id[0x20]; 1659 u8 wq_umem_id[0x20]; 1660 u8 wq_umem_offset[0x40]; 1661 u8 reserved_at_1c0[0x440]; 1662 }; 1663 1664 enum { 1665 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, 1666 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, 1667 }; 1668 1669 enum { 1670 MLX5_RQC_STATE_RST = 0x0, 1671 MLX5_RQC_STATE_RDY = 0x1, 1672 MLX5_RQC_STATE_ERR = 0x3, 1673 }; 1674 1675 struct mlx5_ifc_rqc_bits { 1676 u8 rlky[0x1]; 1677 u8 delay_drop_en[0x1]; 1678 u8 scatter_fcs[0x1]; 1679 u8 vsd[0x1]; 1680 u8 mem_rq_type[0x4]; 1681 u8 state[0x4]; 1682 u8 reserved_at_c[0x1]; 1683 u8 flush_in_error_en[0x1]; 1684 u8 hairpin[0x1]; 1685 u8 reserved_at_f[0x11]; 1686 u8 reserved_at_20[0x8]; 1687 u8 user_index[0x18]; 1688 u8 reserved_at_40[0x8]; 1689 u8 cqn[0x18]; 1690 u8 counter_set_id[0x8]; 1691 u8 reserved_at_68[0x18]; 1692 u8 reserved_at_80[0x8]; 1693 u8 rmpn[0x18]; 1694 u8 reserved_at_a0[0x8]; 1695 u8 hairpin_peer_sq[0x18]; 1696 u8 reserved_at_c0[0x10]; 1697 u8 hairpin_peer_vhca[0x10]; 1698 u8 reserved_at_e0[0xa0]; 1699 struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */ 1700 }; 1701 1702 struct mlx5_ifc_create_rq_out_bits { 1703 u8 status[0x8]; 1704 u8 reserved_at_8[0x18]; 1705 u8 syndrome[0x20]; 1706 u8 reserved_at_40[0x8]; 1707 u8 rqn[0x18]; 1708 u8 reserved_at_60[0x20]; 1709 }; 1710 1711 struct mlx5_ifc_create_rq_in_bits { 1712 u8 opcode[0x10]; 1713 u8 uid[0x10]; 1714 u8 reserved_at_20[0x10]; 1715 u8 op_mod[0x10]; 1716 u8 reserved_at_40[0xc0]; 1717 struct mlx5_ifc_rqc_bits ctx; 1718 }; 1719 1720 struct mlx5_ifc_modify_rq_out_bits { 1721 u8 status[0x8]; 1722 u8 reserved_at_8[0x18]; 1723 u8 syndrome[0x20]; 1724 u8 reserved_at_40[0x40]; 1725 }; 1726 1727 struct mlx5_ifc_create_tis_out_bits { 1728 u8 status[0x8]; 1729 u8 reserved_at_8[0x18]; 1730 u8 syndrome[0x20]; 1731 u8 reserved_at_40[0x8]; 1732 u8 tisn[0x18]; 1733 u8 reserved_at_60[0x20]; 1734 }; 1735 1736 struct mlx5_ifc_create_tis_in_bits { 1737 u8 opcode[0x10]; 1738 u8 uid[0x10]; 1739 u8 reserved_at_20[0x10]; 1740 u8 op_mod[0x10]; 1741 u8 reserved_at_40[0xc0]; 1742 struct mlx5_ifc_tisc_bits ctx; 1743 }; 1744 1745 enum { 1746 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0, 1747 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, 1748 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2, 1749 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, 1750 }; 1751 1752 struct mlx5_ifc_modify_rq_in_bits { 1753 u8 opcode[0x10]; 1754 u8 uid[0x10]; 1755 u8 reserved_at_20[0x10]; 1756 u8 op_mod[0x10]; 1757 u8 rq_state[0x4]; 1758 u8 reserved_at_44[0x4]; 1759 u8 rqn[0x18]; 1760 u8 reserved_at_60[0x20]; 1761 u8 modify_bitmask[0x40]; 1762 u8 reserved_at_c0[0x40]; 1763 struct mlx5_ifc_rqc_bits ctx; 1764 }; 1765 1766 enum { 1767 MLX5_L3_PROT_TYPE_IPV4 = 0, 1768 MLX5_L3_PROT_TYPE_IPV6 = 1, 1769 }; 1770 1771 enum { 1772 MLX5_L4_PROT_TYPE_TCP = 0, 1773 MLX5_L4_PROT_TYPE_UDP = 1, 1774 }; 1775 1776 enum { 1777 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, 1778 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, 1779 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, 1780 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, 1781 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, 1782 }; 1783 1784 struct mlx5_ifc_rx_hash_field_select_bits { 1785 u8 l3_prot_type[0x1]; 1786 u8 l4_prot_type[0x1]; 1787 u8 selected_fields[0x1e]; 1788 }; 1789 1790 enum { 1791 MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, 1792 MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, 1793 }; 1794 1795 enum { 1796 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, 1797 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, 1798 }; 1799 1800 enum { 1801 MLX5_RX_HASH_FN_NONE = 0x0, 1802 MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1, 1803 MLX5_RX_HASH_FN_TOEPLITZ = 0x2, 1804 }; 1805 1806 enum { 1807 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1, 1808 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2, 1809 }; 1810 1811 enum { 1812 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 = 0x0, 1813 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L2 = 0x1, 1814 }; 1815 1816 struct mlx5_ifc_tirc_bits { 1817 u8 reserved_at_0[0x20]; 1818 u8 disp_type[0x4]; 1819 u8 reserved_at_24[0x1c]; 1820 u8 reserved_at_40[0x40]; 1821 u8 reserved_at_80[0x4]; 1822 u8 lro_timeout_period_usecs[0x10]; 1823 u8 lro_enable_mask[0x4]; 1824 u8 lro_max_msg_sz[0x8]; 1825 u8 reserved_at_a0[0x40]; 1826 u8 reserved_at_e0[0x8]; 1827 u8 inline_rqn[0x18]; 1828 u8 rx_hash_symmetric[0x1]; 1829 u8 reserved_at_101[0x1]; 1830 u8 tunneled_offload_en[0x1]; 1831 u8 reserved_at_103[0x5]; 1832 u8 indirect_table[0x18]; 1833 u8 rx_hash_fn[0x4]; 1834 u8 reserved_at_124[0x2]; 1835 u8 self_lb_block[0x2]; 1836 u8 transport_domain[0x18]; 1837 u8 rx_hash_toeplitz_key[10][0x20]; 1838 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; 1839 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; 1840 u8 reserved_at_2c0[0x4c0]; 1841 }; 1842 1843 struct mlx5_ifc_create_tir_out_bits { 1844 u8 status[0x8]; 1845 u8 reserved_at_8[0x18]; 1846 u8 syndrome[0x20]; 1847 u8 reserved_at_40[0x8]; 1848 u8 tirn[0x18]; 1849 u8 reserved_at_60[0x20]; 1850 }; 1851 1852 struct mlx5_ifc_create_tir_in_bits { 1853 u8 opcode[0x10]; 1854 u8 uid[0x10]; 1855 u8 reserved_at_20[0x10]; 1856 u8 op_mod[0x10]; 1857 u8 reserved_at_40[0xc0]; 1858 struct mlx5_ifc_tirc_bits ctx; 1859 }; 1860 1861 enum { 1862 MLX5_INLINE_Q_TYPE_RQ = 0x0, 1863 MLX5_INLINE_Q_TYPE_VIRTQ = 0x1, 1864 }; 1865 1866 struct mlx5_ifc_rq_num_bits { 1867 u8 reserved_at_0[0x8]; 1868 u8 rq_num[0x18]; 1869 }; 1870 1871 struct mlx5_ifc_rqtc_bits { 1872 u8 reserved_at_0[0xa5]; 1873 u8 list_q_type[0x3]; 1874 u8 reserved_at_a8[0x8]; 1875 u8 rqt_max_size[0x10]; 1876 u8 reserved_at_c0[0x10]; 1877 u8 rqt_actual_size[0x10]; 1878 u8 reserved_at_e0[0x6a0]; 1879 struct mlx5_ifc_rq_num_bits rq_num[]; 1880 }; 1881 1882 struct mlx5_ifc_create_rqt_out_bits { 1883 u8 status[0x8]; 1884 u8 reserved_at_8[0x18]; 1885 u8 syndrome[0x20]; 1886 u8 reserved_at_40[0x8]; 1887 u8 rqtn[0x18]; 1888 u8 reserved_at_60[0x20]; 1889 }; 1890 1891 #ifdef PEDANTIC 1892 #pragma GCC diagnostic ignored "-Wpedantic" 1893 #endif 1894 struct mlx5_ifc_create_rqt_in_bits { 1895 u8 opcode[0x10]; 1896 u8 uid[0x10]; 1897 u8 reserved_at_20[0x10]; 1898 u8 op_mod[0x10]; 1899 u8 reserved_at_40[0xc0]; 1900 struct mlx5_ifc_rqtc_bits rqt_context; 1901 }; 1902 1903 struct mlx5_ifc_modify_rqt_in_bits { 1904 u8 opcode[0x10]; 1905 u8 uid[0x10]; 1906 u8 reserved_at_20[0x10]; 1907 u8 op_mod[0x10]; 1908 u8 reserved_at_40[0x8]; 1909 u8 rqtn[0x18]; 1910 u8 reserved_at_60[0x20]; 1911 u8 modify_bitmask[0x40]; 1912 u8 reserved_at_c0[0x40]; 1913 struct mlx5_ifc_rqtc_bits rqt_context; 1914 }; 1915 #ifdef PEDANTIC 1916 #pragma GCC diagnostic error "-Wpedantic" 1917 #endif 1918 1919 struct mlx5_ifc_modify_rqt_out_bits { 1920 u8 status[0x8]; 1921 u8 reserved_at_8[0x18]; 1922 u8 syndrome[0x20]; 1923 u8 reserved_at_40[0x40]; 1924 }; 1925 1926 enum { 1927 MLX5_SQC_STATE_RST = 0x0, 1928 MLX5_SQC_STATE_RDY = 0x1, 1929 MLX5_SQC_STATE_ERR = 0x3, 1930 }; 1931 1932 struct mlx5_ifc_sqc_bits { 1933 u8 rlky[0x1]; 1934 u8 cd_master[0x1]; 1935 u8 fre[0x1]; 1936 u8 flush_in_error_en[0x1]; 1937 u8 allow_multi_pkt_send_wqe[0x1]; 1938 u8 min_wqe_inline_mode[0x3]; 1939 u8 state[0x4]; 1940 u8 reg_umr[0x1]; 1941 u8 allow_swp[0x1]; 1942 u8 hairpin[0x1]; 1943 u8 non_wire[0x1]; 1944 u8 static_sq_wq[0x1]; 1945 u8 reserved_at_11[0xf]; 1946 u8 reserved_at_20[0x8]; 1947 u8 user_index[0x18]; 1948 u8 reserved_at_40[0x8]; 1949 u8 cqn[0x18]; 1950 u8 reserved_at_60[0x8]; 1951 u8 hairpin_peer_rq[0x18]; 1952 u8 reserved_at_80[0x10]; 1953 u8 hairpin_peer_vhca[0x10]; 1954 u8 reserved_at_a0[0x50]; 1955 u8 packet_pacing_rate_limit_index[0x10]; 1956 u8 tis_lst_sz[0x10]; 1957 u8 reserved_at_110[0x10]; 1958 u8 reserved_at_120[0x40]; 1959 u8 reserved_at_160[0x8]; 1960 u8 tis_num_0[0x18]; 1961 struct mlx5_ifc_wq_bits wq; 1962 }; 1963 1964 struct mlx5_ifc_query_sq_in_bits { 1965 u8 opcode[0x10]; 1966 u8 reserved_at_10[0x10]; 1967 u8 reserved_at_20[0x10]; 1968 u8 op_mod[0x10]; 1969 u8 reserved_at_40[0x8]; 1970 u8 sqn[0x18]; 1971 u8 reserved_at_60[0x20]; 1972 }; 1973 1974 struct mlx5_ifc_modify_sq_out_bits { 1975 u8 status[0x8]; 1976 u8 reserved_at_8[0x18]; 1977 u8 syndrome[0x20]; 1978 u8 reserved_at_40[0x40]; 1979 }; 1980 1981 struct mlx5_ifc_modify_sq_in_bits { 1982 u8 opcode[0x10]; 1983 u8 uid[0x10]; 1984 u8 reserved_at_20[0x10]; 1985 u8 op_mod[0x10]; 1986 u8 sq_state[0x4]; 1987 u8 reserved_at_44[0x4]; 1988 u8 sqn[0x18]; 1989 u8 reserved_at_60[0x20]; 1990 u8 modify_bitmask[0x40]; 1991 u8 reserved_at_c0[0x40]; 1992 struct mlx5_ifc_sqc_bits ctx; 1993 }; 1994 1995 struct mlx5_ifc_create_sq_out_bits { 1996 u8 status[0x8]; 1997 u8 reserved_at_8[0x18]; 1998 u8 syndrome[0x20]; 1999 u8 reserved_at_40[0x8]; 2000 u8 sqn[0x18]; 2001 u8 reserved_at_60[0x20]; 2002 }; 2003 2004 struct mlx5_ifc_create_sq_in_bits { 2005 u8 opcode[0x10]; 2006 u8 uid[0x10]; 2007 u8 reserved_at_20[0x10]; 2008 u8 op_mod[0x10]; 2009 u8 reserved_at_40[0xc0]; 2010 struct mlx5_ifc_sqc_bits ctx; 2011 }; 2012 2013 enum { 2014 MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0), 2015 MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1), 2016 MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2), 2017 MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3), 2018 MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4), 2019 }; 2020 2021 struct mlx5_ifc_flow_meter_parameters_bits { 2022 u8 valid[0x1]; // 00h 2023 u8 bucket_overflow[0x1]; 2024 u8 start_color[0x2]; 2025 u8 both_buckets_on_green[0x1]; 2026 u8 meter_mode[0x2]; 2027 u8 reserved_at_1[0x19]; 2028 u8 reserved_at_2[0x20]; //04h 2029 u8 reserved_at_3[0x3]; 2030 u8 cbs_exponent[0x5]; // 08h 2031 u8 cbs_mantissa[0x8]; 2032 u8 reserved_at_4[0x3]; 2033 u8 cir_exponent[0x5]; 2034 u8 cir_mantissa[0x8]; 2035 u8 reserved_at_5[0x20]; // 0Ch 2036 u8 reserved_at_6[0x3]; 2037 u8 ebs_exponent[0x5]; // 10h 2038 u8 ebs_mantissa[0x8]; 2039 u8 reserved_at_7[0x3]; 2040 u8 eir_exponent[0x5]; 2041 u8 eir_mantissa[0x8]; 2042 u8 reserved_at_8[0x60]; // 14h-1Ch 2043 }; 2044 2045 enum { 2046 MLX5_CQE_SIZE_64B = 0x0, 2047 MLX5_CQE_SIZE_128B = 0x1, 2048 }; 2049 2050 struct mlx5_ifc_cqc_bits { 2051 u8 status[0x4]; 2052 u8 as_notify[0x1]; 2053 u8 initiator_src_dct[0x1]; 2054 u8 dbr_umem_valid[0x1]; 2055 u8 reserved_at_7[0x1]; 2056 u8 cqe_sz[0x3]; 2057 u8 cc[0x1]; 2058 u8 reserved_at_c[0x1]; 2059 u8 scqe_break_moderation_en[0x1]; 2060 u8 oi[0x1]; 2061 u8 cq_period_mode[0x2]; 2062 u8 cqe_comp_en[0x1]; 2063 u8 mini_cqe_res_format[0x2]; 2064 u8 st[0x4]; 2065 u8 reserved_at_18[0x8]; 2066 u8 dbr_umem_id[0x20]; 2067 u8 reserved_at_40[0x14]; 2068 u8 page_offset[0x6]; 2069 u8 reserved_at_5a[0x6]; 2070 u8 reserved_at_60[0x3]; 2071 u8 log_cq_size[0x5]; 2072 u8 uar_page[0x18]; 2073 u8 reserved_at_80[0x4]; 2074 u8 cq_period[0xc]; 2075 u8 cq_max_count[0x10]; 2076 u8 reserved_at_a0[0x18]; 2077 u8 c_eqn[0x8]; 2078 u8 reserved_at_c0[0x3]; 2079 u8 log_page_size[0x5]; 2080 u8 reserved_at_c8[0x18]; 2081 u8 reserved_at_e0[0x20]; 2082 u8 reserved_at_100[0x8]; 2083 u8 last_notified_index[0x18]; 2084 u8 reserved_at_120[0x8]; 2085 u8 last_solicit_index[0x18]; 2086 u8 reserved_at_140[0x8]; 2087 u8 consumer_counter[0x18]; 2088 u8 reserved_at_160[0x8]; 2089 u8 producer_counter[0x18]; 2090 u8 local_partition_id[0xc]; 2091 u8 process_id[0x14]; 2092 u8 reserved_at_1A0[0x20]; 2093 u8 dbr_addr[0x40]; 2094 }; 2095 2096 struct mlx5_ifc_create_cq_out_bits { 2097 u8 status[0x8]; 2098 u8 reserved_at_8[0x18]; 2099 u8 syndrome[0x20]; 2100 u8 reserved_at_40[0x8]; 2101 u8 cqn[0x18]; 2102 u8 reserved_at_60[0x20]; 2103 }; 2104 2105 struct mlx5_ifc_create_cq_in_bits { 2106 u8 opcode[0x10]; 2107 u8 uid[0x10]; 2108 u8 reserved_at_20[0x10]; 2109 u8 op_mod[0x10]; 2110 u8 reserved_at_40[0x40]; 2111 struct mlx5_ifc_cqc_bits cq_context; 2112 u8 cq_umem_offset[0x40]; 2113 u8 cq_umem_id[0x20]; 2114 u8 cq_umem_valid[0x1]; 2115 u8 reserved_at_2e1[0x1f]; 2116 u8 reserved_at_300[0x580]; 2117 u8 pas[]; 2118 }; 2119 2120 enum { 2121 MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d, 2122 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c, 2123 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH = 0x0022, 2124 }; 2125 2126 struct mlx5_ifc_general_obj_in_cmd_hdr_bits { 2127 u8 opcode[0x10]; 2128 u8 reserved_at_10[0x20]; 2129 u8 obj_type[0x10]; 2130 u8 obj_id[0x20]; 2131 u8 reserved_at_60[0x20]; 2132 }; 2133 2134 struct mlx5_ifc_general_obj_out_cmd_hdr_bits { 2135 u8 status[0x8]; 2136 u8 reserved_at_8[0x18]; 2137 u8 syndrome[0x20]; 2138 u8 obj_id[0x20]; 2139 u8 reserved_at_60[0x20]; 2140 }; 2141 2142 struct mlx5_ifc_virtio_q_counters_bits { 2143 u8 modify_field_select[0x40]; 2144 u8 reserved_at_40[0x40]; 2145 u8 received_desc[0x40]; 2146 u8 completed_desc[0x40]; 2147 u8 error_cqes[0x20]; 2148 u8 bad_desc_errors[0x20]; 2149 u8 exceed_max_chain[0x20]; 2150 u8 invalid_buffer[0x20]; 2151 u8 reserved_at_180[0x50]; 2152 }; 2153 2154 struct mlx5_ifc_create_virtio_q_counters_in_bits { 2155 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 2156 struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters; 2157 }; 2158 2159 struct mlx5_ifc_query_virtio_q_counters_out_bits { 2160 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 2161 struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters; 2162 }; 2163 enum { 2164 MLX5_VIRTQ_STATE_INIT = 0, 2165 MLX5_VIRTQ_STATE_RDY = 1, 2166 MLX5_VIRTQ_STATE_SUSPEND = 2, 2167 MLX5_VIRTQ_STATE_ERROR = 3, 2168 }; 2169 2170 enum { 2171 MLX5_VIRTQ_MODIFY_TYPE_STATE = (1UL << 0), 2172 MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS = (1UL << 3), 2173 MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE = (1UL << 4), 2174 }; 2175 2176 struct mlx5_ifc_virtio_q_bits { 2177 u8 virtio_q_type[0x8]; 2178 u8 reserved_at_8[0x5]; 2179 u8 event_mode[0x3]; 2180 u8 queue_index[0x10]; 2181 u8 full_emulation[0x1]; 2182 u8 virtio_version_1_0[0x1]; 2183 u8 reserved_at_22[0x2]; 2184 u8 offload_type[0x4]; 2185 u8 event_qpn_or_msix[0x18]; 2186 u8 doorbell_stride_idx[0x10]; 2187 u8 queue_size[0x10]; 2188 u8 device_emulation_id[0x20]; 2189 u8 desc_addr[0x40]; 2190 u8 used_addr[0x40]; 2191 u8 available_addr[0x40]; 2192 u8 virtio_q_mkey[0x20]; 2193 u8 reserved_at_160[0x20]; 2194 u8 umem_1_id[0x20]; 2195 u8 umem_1_size[0x20]; 2196 u8 umem_1_offset[0x40]; 2197 u8 umem_2_id[0x20]; 2198 u8 umem_2_size[0x20]; 2199 u8 umem_2_offset[0x40]; 2200 u8 umem_3_id[0x20]; 2201 u8 umem_3_size[0x20]; 2202 u8 umem_3_offset[0x40]; 2203 u8 counter_set_id[0x20]; 2204 u8 reserved_at_320[0x8]; 2205 u8 pd[0x18]; 2206 u8 reserved_at_340[0xc0]; 2207 }; 2208 2209 struct mlx5_ifc_virtio_net_q_bits { 2210 u8 modify_field_select[0x40]; 2211 u8 reserved_at_40[0x40]; 2212 u8 tso_ipv4[0x1]; 2213 u8 tso_ipv6[0x1]; 2214 u8 tx_csum[0x1]; 2215 u8 rx_csum[0x1]; 2216 u8 reserved_at_84[0x6]; 2217 u8 dirty_bitmap_dump_enable[0x1]; 2218 u8 vhost_log_page[0x5]; 2219 u8 reserved_at_90[0xc]; 2220 u8 state[0x4]; 2221 u8 error_type[0x8]; 2222 u8 tisn_or_qpn[0x18]; 2223 u8 dirty_bitmap_mkey[0x20]; 2224 u8 dirty_bitmap_size[0x20]; 2225 u8 dirty_bitmap_addr[0x40]; 2226 u8 hw_available_index[0x10]; 2227 u8 hw_used_index[0x10]; 2228 u8 reserved_at_160[0xa0]; 2229 struct mlx5_ifc_virtio_q_bits virtio_q_context; 2230 }; 2231 2232 struct mlx5_ifc_create_virtq_in_bits { 2233 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 2234 struct mlx5_ifc_virtio_net_q_bits virtq; 2235 }; 2236 2237 struct mlx5_ifc_query_virtq_out_bits { 2238 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 2239 struct mlx5_ifc_virtio_net_q_bits virtq; 2240 }; 2241 2242 enum { 2243 MLX5_QP_ST_RC = 0x0, 2244 }; 2245 2246 enum { 2247 MLX5_QP_PM_MIGRATED = 0x3, 2248 }; 2249 2250 enum { 2251 MLX5_NON_ZERO_RQ = 0x0, 2252 MLX5_SRQ_RQ = 0x1, 2253 MLX5_CRQ_RQ = 0x2, 2254 MLX5_ZERO_LEN_RQ = 0x3, 2255 }; 2256 2257 struct mlx5_ifc_ads_bits { 2258 u8 fl[0x1]; 2259 u8 free_ar[0x1]; 2260 u8 reserved_at_2[0xe]; 2261 u8 pkey_index[0x10]; 2262 u8 reserved_at_20[0x8]; 2263 u8 grh[0x1]; 2264 u8 mlid[0x7]; 2265 u8 rlid[0x10]; 2266 u8 ack_timeout[0x5]; 2267 u8 reserved_at_45[0x3]; 2268 u8 src_addr_index[0x8]; 2269 u8 reserved_at_50[0x4]; 2270 u8 stat_rate[0x4]; 2271 u8 hop_limit[0x8]; 2272 u8 reserved_at_60[0x4]; 2273 u8 tclass[0x8]; 2274 u8 flow_label[0x14]; 2275 u8 rgid_rip[16][0x8]; 2276 u8 reserved_at_100[0x4]; 2277 u8 f_dscp[0x1]; 2278 u8 f_ecn[0x1]; 2279 u8 reserved_at_106[0x1]; 2280 u8 f_eth_prio[0x1]; 2281 u8 ecn[0x2]; 2282 u8 dscp[0x6]; 2283 u8 udp_sport[0x10]; 2284 u8 dei_cfi[0x1]; 2285 u8 eth_prio[0x3]; 2286 u8 sl[0x4]; 2287 u8 vhca_port_num[0x8]; 2288 u8 rmac_47_32[0x10]; 2289 u8 rmac_31_0[0x20]; 2290 }; 2291 2292 struct mlx5_ifc_qpc_bits { 2293 u8 state[0x4]; 2294 u8 lag_tx_port_affinity[0x4]; 2295 u8 st[0x8]; 2296 u8 reserved_at_10[0x3]; 2297 u8 pm_state[0x2]; 2298 u8 reserved_at_15[0x1]; 2299 u8 req_e2e_credit_mode[0x2]; 2300 u8 offload_type[0x4]; 2301 u8 end_padding_mode[0x2]; 2302 u8 reserved_at_1e[0x2]; 2303 u8 wq_signature[0x1]; 2304 u8 block_lb_mc[0x1]; 2305 u8 atomic_like_write_en[0x1]; 2306 u8 latency_sensitive[0x1]; 2307 u8 reserved_at_24[0x1]; 2308 u8 drain_sigerr[0x1]; 2309 u8 reserved_at_26[0x2]; 2310 u8 pd[0x18]; 2311 u8 mtu[0x3]; 2312 u8 log_msg_max[0x5]; 2313 u8 reserved_at_48[0x1]; 2314 u8 log_rq_size[0x4]; 2315 u8 log_rq_stride[0x3]; 2316 u8 no_sq[0x1]; 2317 u8 log_sq_size[0x4]; 2318 u8 reserved_at_55[0x6]; 2319 u8 rlky[0x1]; 2320 u8 ulp_stateless_offload_mode[0x4]; 2321 u8 counter_set_id[0x8]; 2322 u8 uar_page[0x18]; 2323 u8 reserved_at_80[0x8]; 2324 u8 user_index[0x18]; 2325 u8 reserved_at_a0[0x3]; 2326 u8 log_page_size[0x5]; 2327 u8 remote_qpn[0x18]; 2328 struct mlx5_ifc_ads_bits primary_address_path; 2329 struct mlx5_ifc_ads_bits secondary_address_path; 2330 u8 log_ack_req_freq[0x4]; 2331 u8 reserved_at_384[0x4]; 2332 u8 log_sra_max[0x3]; 2333 u8 reserved_at_38b[0x2]; 2334 u8 retry_count[0x3]; 2335 u8 rnr_retry[0x3]; 2336 u8 reserved_at_393[0x1]; 2337 u8 fre[0x1]; 2338 u8 cur_rnr_retry[0x3]; 2339 u8 cur_retry_count[0x3]; 2340 u8 reserved_at_39b[0x5]; 2341 u8 reserved_at_3a0[0x20]; 2342 u8 reserved_at_3c0[0x8]; 2343 u8 next_send_psn[0x18]; 2344 u8 reserved_at_3e0[0x8]; 2345 u8 cqn_snd[0x18]; 2346 u8 reserved_at_400[0x8]; 2347 u8 deth_sqpn[0x18]; 2348 u8 reserved_at_420[0x20]; 2349 u8 reserved_at_440[0x8]; 2350 u8 last_acked_psn[0x18]; 2351 u8 reserved_at_460[0x8]; 2352 u8 ssn[0x18]; 2353 u8 reserved_at_480[0x8]; 2354 u8 log_rra_max[0x3]; 2355 u8 reserved_at_48b[0x1]; 2356 u8 atomic_mode[0x4]; 2357 u8 rre[0x1]; 2358 u8 rwe[0x1]; 2359 u8 rae[0x1]; 2360 u8 reserved_at_493[0x1]; 2361 u8 page_offset[0x6]; 2362 u8 reserved_at_49a[0x3]; 2363 u8 cd_slave_receive[0x1]; 2364 u8 cd_slave_send[0x1]; 2365 u8 cd_master[0x1]; 2366 u8 reserved_at_4a0[0x3]; 2367 u8 min_rnr_nak[0x5]; 2368 u8 next_rcv_psn[0x18]; 2369 u8 reserved_at_4c0[0x8]; 2370 u8 xrcd[0x18]; 2371 u8 reserved_at_4e0[0x8]; 2372 u8 cqn_rcv[0x18]; 2373 u8 dbr_addr[0x40]; 2374 u8 q_key[0x20]; 2375 u8 reserved_at_560[0x5]; 2376 u8 rq_type[0x3]; 2377 u8 srqn_rmpn_xrqn[0x18]; 2378 u8 reserved_at_580[0x8]; 2379 u8 rmsn[0x18]; 2380 u8 hw_sq_wqebb_counter[0x10]; 2381 u8 sw_sq_wqebb_counter[0x10]; 2382 u8 hw_rq_counter[0x20]; 2383 u8 sw_rq_counter[0x20]; 2384 u8 reserved_at_600[0x20]; 2385 u8 reserved_at_620[0xf]; 2386 u8 cgs[0x1]; 2387 u8 cs_req[0x8]; 2388 u8 cs_res[0x8]; 2389 u8 dc_access_key[0x40]; 2390 u8 reserved_at_680[0x3]; 2391 u8 dbr_umem_valid[0x1]; 2392 u8 reserved_at_684[0x9c]; 2393 u8 dbr_umem_id[0x20]; 2394 }; 2395 2396 struct mlx5_ifc_create_qp_out_bits { 2397 u8 status[0x8]; 2398 u8 reserved_at_8[0x18]; 2399 u8 syndrome[0x20]; 2400 u8 reserved_at_40[0x8]; 2401 u8 qpn[0x18]; 2402 u8 reserved_at_60[0x20]; 2403 }; 2404 2405 #ifdef PEDANTIC 2406 #pragma GCC diagnostic ignored "-Wpedantic" 2407 #endif 2408 struct mlx5_ifc_create_qp_in_bits { 2409 u8 opcode[0x10]; 2410 u8 uid[0x10]; 2411 u8 reserved_at_20[0x10]; 2412 u8 op_mod[0x10]; 2413 u8 reserved_at_40[0x40]; 2414 u8 opt_param_mask[0x20]; 2415 u8 reserved_at_a0[0x20]; 2416 struct mlx5_ifc_qpc_bits qpc; 2417 u8 wq_umem_offset[0x40]; 2418 u8 wq_umem_id[0x20]; 2419 u8 wq_umem_valid[0x1]; 2420 u8 reserved_at_861[0x1f]; 2421 u8 pas[0][0x40]; 2422 }; 2423 #ifdef PEDANTIC 2424 #pragma GCC diagnostic error "-Wpedantic" 2425 #endif 2426 2427 struct mlx5_ifc_sqerr2rts_qp_out_bits { 2428 u8 status[0x8]; 2429 u8 reserved_at_8[0x18]; 2430 u8 syndrome[0x20]; 2431 u8 reserved_at_40[0x40]; 2432 }; 2433 2434 struct mlx5_ifc_sqerr2rts_qp_in_bits { 2435 u8 opcode[0x10]; 2436 u8 uid[0x10]; 2437 u8 reserved_at_20[0x10]; 2438 u8 op_mod[0x10]; 2439 u8 reserved_at_40[0x8]; 2440 u8 qpn[0x18]; 2441 u8 reserved_at_60[0x20]; 2442 u8 opt_param_mask[0x20]; 2443 u8 reserved_at_a0[0x20]; 2444 struct mlx5_ifc_qpc_bits qpc; 2445 u8 reserved_at_800[0x80]; 2446 }; 2447 2448 struct mlx5_ifc_sqd2rts_qp_out_bits { 2449 u8 status[0x8]; 2450 u8 reserved_at_8[0x18]; 2451 u8 syndrome[0x20]; 2452 u8 reserved_at_40[0x40]; 2453 }; 2454 2455 struct mlx5_ifc_sqd2rts_qp_in_bits { 2456 u8 opcode[0x10]; 2457 u8 uid[0x10]; 2458 u8 reserved_at_20[0x10]; 2459 u8 op_mod[0x10]; 2460 u8 reserved_at_40[0x8]; 2461 u8 qpn[0x18]; 2462 u8 reserved_at_60[0x20]; 2463 u8 opt_param_mask[0x20]; 2464 u8 reserved_at_a0[0x20]; 2465 struct mlx5_ifc_qpc_bits qpc; 2466 u8 reserved_at_800[0x80]; 2467 }; 2468 2469 struct mlx5_ifc_rts2rts_qp_out_bits { 2470 u8 status[0x8]; 2471 u8 reserved_at_8[0x18]; 2472 u8 syndrome[0x20]; 2473 u8 reserved_at_40[0x40]; 2474 }; 2475 2476 struct mlx5_ifc_rts2rts_qp_in_bits { 2477 u8 opcode[0x10]; 2478 u8 uid[0x10]; 2479 u8 reserved_at_20[0x10]; 2480 u8 op_mod[0x10]; 2481 u8 reserved_at_40[0x8]; 2482 u8 qpn[0x18]; 2483 u8 reserved_at_60[0x20]; 2484 u8 opt_param_mask[0x20]; 2485 u8 reserved_at_a0[0x20]; 2486 struct mlx5_ifc_qpc_bits qpc; 2487 u8 reserved_at_800[0x80]; 2488 }; 2489 2490 struct mlx5_ifc_rtr2rts_qp_out_bits { 2491 u8 status[0x8]; 2492 u8 reserved_at_8[0x18]; 2493 u8 syndrome[0x20]; 2494 u8 reserved_at_40[0x40]; 2495 }; 2496 2497 struct mlx5_ifc_rtr2rts_qp_in_bits { 2498 u8 opcode[0x10]; 2499 u8 uid[0x10]; 2500 u8 reserved_at_20[0x10]; 2501 u8 op_mod[0x10]; 2502 u8 reserved_at_40[0x8]; 2503 u8 qpn[0x18]; 2504 u8 reserved_at_60[0x20]; 2505 u8 opt_param_mask[0x20]; 2506 u8 reserved_at_a0[0x20]; 2507 struct mlx5_ifc_qpc_bits qpc; 2508 u8 reserved_at_800[0x80]; 2509 }; 2510 2511 struct mlx5_ifc_rst2init_qp_out_bits { 2512 u8 status[0x8]; 2513 u8 reserved_at_8[0x18]; 2514 u8 syndrome[0x20]; 2515 u8 reserved_at_40[0x40]; 2516 }; 2517 2518 struct mlx5_ifc_rst2init_qp_in_bits { 2519 u8 opcode[0x10]; 2520 u8 uid[0x10]; 2521 u8 reserved_at_20[0x10]; 2522 u8 op_mod[0x10]; 2523 u8 reserved_at_40[0x8]; 2524 u8 qpn[0x18]; 2525 u8 reserved_at_60[0x20]; 2526 u8 opt_param_mask[0x20]; 2527 u8 reserved_at_a0[0x20]; 2528 struct mlx5_ifc_qpc_bits qpc; 2529 u8 reserved_at_800[0x80]; 2530 }; 2531 2532 struct mlx5_ifc_init2rtr_qp_out_bits { 2533 u8 status[0x8]; 2534 u8 reserved_at_8[0x18]; 2535 u8 syndrome[0x20]; 2536 u8 reserved_at_40[0x40]; 2537 }; 2538 2539 struct mlx5_ifc_init2rtr_qp_in_bits { 2540 u8 opcode[0x10]; 2541 u8 uid[0x10]; 2542 u8 reserved_at_20[0x10]; 2543 u8 op_mod[0x10]; 2544 u8 reserved_at_40[0x8]; 2545 u8 qpn[0x18]; 2546 u8 reserved_at_60[0x20]; 2547 u8 opt_param_mask[0x20]; 2548 u8 reserved_at_a0[0x20]; 2549 struct mlx5_ifc_qpc_bits qpc; 2550 u8 reserved_at_800[0x80]; 2551 }; 2552 2553 struct mlx5_ifc_init2init_qp_out_bits { 2554 u8 status[0x8]; 2555 u8 reserved_at_8[0x18]; 2556 u8 syndrome[0x20]; 2557 u8 reserved_at_40[0x40]; 2558 }; 2559 2560 struct mlx5_ifc_init2init_qp_in_bits { 2561 u8 opcode[0x10]; 2562 u8 uid[0x10]; 2563 u8 reserved_at_20[0x10]; 2564 u8 op_mod[0x10]; 2565 u8 reserved_at_40[0x8]; 2566 u8 qpn[0x18]; 2567 u8 reserved_at_60[0x20]; 2568 u8 opt_param_mask[0x20]; 2569 u8 reserved_at_a0[0x20]; 2570 struct mlx5_ifc_qpc_bits qpc; 2571 u8 reserved_at_800[0x80]; 2572 }; 2573 2574 #ifdef PEDANTIC 2575 #pragma GCC diagnostic ignored "-Wpedantic" 2576 #endif 2577 struct mlx5_ifc_query_qp_out_bits { 2578 u8 status[0x8]; 2579 u8 reserved_at_8[0x18]; 2580 u8 syndrome[0x20]; 2581 u8 reserved_at_40[0x40]; 2582 u8 opt_param_mask[0x20]; 2583 u8 reserved_at_a0[0x20]; 2584 struct mlx5_ifc_qpc_bits qpc; 2585 u8 reserved_at_800[0x80]; 2586 u8 pas[0][0x40]; 2587 }; 2588 #ifdef PEDANTIC 2589 #pragma GCC diagnostic error "-Wpedantic" 2590 #endif 2591 2592 struct mlx5_ifc_query_qp_in_bits { 2593 u8 opcode[0x10]; 2594 u8 reserved_at_10[0x10]; 2595 u8 reserved_at_20[0x10]; 2596 u8 op_mod[0x10]; 2597 u8 reserved_at_40[0x8]; 2598 u8 qpn[0x18]; 2599 u8 reserved_at_60[0x20]; 2600 }; 2601 2602 enum { 2603 MLX5_DATA_RATE = 0x0, 2604 MLX5_WQE_RATE = 0x1, 2605 }; 2606 2607 struct mlx5_ifc_set_pp_rate_limit_context_bits { 2608 u8 rate_limit[0x20]; 2609 u8 burst_upper_bound[0x20]; 2610 u8 reserved_at_40[0xC]; 2611 u8 rate_mode[0x4]; 2612 u8 typical_packet_size[0x10]; 2613 u8 reserved_at_60[0x120]; 2614 }; 2615 2616 #define MLX5_ACCESS_REGISTER_DATA_DWORD_MAX 8u 2617 2618 #ifdef PEDANTIC 2619 #pragma GCC diagnostic ignored "-Wpedantic" 2620 #endif 2621 struct mlx5_ifc_access_register_out_bits { 2622 u8 status[0x8]; 2623 u8 reserved_at_8[0x18]; 2624 u8 syndrome[0x20]; 2625 u8 reserved_at_40[0x40]; 2626 u8 register_data[0][0x20]; 2627 }; 2628 2629 struct mlx5_ifc_access_register_in_bits { 2630 u8 opcode[0x10]; 2631 u8 reserved_at_10[0x10]; 2632 u8 reserved_at_20[0x10]; 2633 u8 op_mod[0x10]; 2634 u8 reserved_at_40[0x10]; 2635 u8 register_id[0x10]; 2636 u8 argument[0x20]; 2637 u8 register_data[0][0x20]; 2638 }; 2639 #ifdef PEDANTIC 2640 #pragma GCC diagnostic error "-Wpedantic" 2641 #endif 2642 2643 enum { 2644 MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, 2645 MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, 2646 }; 2647 2648 enum { 2649 MLX5_REGISTER_ID_MTUTC = 0x9055, 2650 }; 2651 2652 struct mlx5_ifc_register_mtutc_bits { 2653 u8 time_stamp_mode[0x2]; 2654 u8 time_stamp_state[0x2]; 2655 u8 reserved_at_4[0x18]; 2656 u8 operation[0x4]; 2657 u8 freq_adjustment[0x20]; 2658 u8 reserved_at_40[0x40]; 2659 u8 utc_sec[0x20]; 2660 u8 utc_nsec[0x20]; 2661 u8 time_adjustment[0x20]; 2662 }; 2663 2664 #define MLX5_MTUTC_TIMESTAMP_MODE_INTERNAL_TIMER 0 2665 #define MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME 1 2666 2667 struct mlx5_ifc_parse_graph_arc_bits { 2668 u8 start_inner_tunnel[0x1]; 2669 u8 reserved_at_1[0x7]; 2670 u8 arc_parse_graph_node[0x8]; 2671 u8 compare_condition_value[0x10]; 2672 u8 parse_graph_node_handle[0x20]; 2673 u8 reserved_at_40[0x40]; 2674 }; 2675 2676 struct mlx5_ifc_parse_graph_flow_match_sample_bits { 2677 u8 flow_match_sample_en[0x1]; 2678 u8 reserved_at_1[0x3]; 2679 u8 flow_match_sample_offset_mode[0x4]; 2680 u8 reserved_at_5[0x8]; 2681 u8 flow_match_sample_field_offset[0x10]; 2682 u8 reserved_at_32[0x4]; 2683 u8 flow_match_sample_field_offset_shift[0x4]; 2684 u8 flow_match_sample_field_base_offset[0x8]; 2685 u8 reserved_at_48[0xd]; 2686 u8 flow_match_sample_tunnel_mode[0x3]; 2687 u8 flow_match_sample_field_offset_mask[0x20]; 2688 u8 flow_match_sample_field_id[0x20]; 2689 }; 2690 2691 struct mlx5_ifc_parse_graph_flex_bits { 2692 u8 modify_field_select[0x40]; 2693 u8 reserved_at_64[0x20]; 2694 u8 header_length_base_value[0x10]; 2695 u8 reserved_at_112[0x4]; 2696 u8 header_length_field_shift[0x4]; 2697 u8 reserved_at_120[0x4]; 2698 u8 header_length_mode[0x4]; 2699 u8 header_length_field_offset[0x10]; 2700 u8 next_header_field_offset[0x10]; 2701 u8 reserved_at_160[0x1b]; 2702 u8 next_header_field_size[0x5]; 2703 u8 header_length_field_mask[0x20]; 2704 u8 reserved_at_224[0x20]; 2705 struct mlx5_ifc_parse_graph_flow_match_sample_bits sample_table[0x8]; 2706 struct mlx5_ifc_parse_graph_arc_bits input_arc[0x8]; 2707 struct mlx5_ifc_parse_graph_arc_bits output_arc[0x8]; 2708 }; 2709 2710 struct mlx5_ifc_create_flex_parser_in_bits { 2711 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 2712 struct mlx5_ifc_parse_graph_flex_bits flex; 2713 }; 2714 2715 struct mlx5_ifc_create_flex_parser_out_bits { 2716 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 2717 struct mlx5_ifc_parse_graph_flex_bits flex; 2718 }; 2719 2720 struct mlx5_ifc_parse_graph_flex_out_bits { 2721 u8 status[0x8]; 2722 u8 reserved_at_8[0x18]; 2723 u8 syndrome[0x20]; 2724 u8 reserved_at_40[0x40]; 2725 struct mlx5_ifc_parse_graph_flex_bits capability; 2726 }; 2727 2728 struct regexp_params_field_select_bits { 2729 u8 reserved_at_0[0x1e]; 2730 u8 stop_engine[0x1]; 2731 u8 db_umem_id[0x1]; 2732 }; 2733 2734 struct mlx5_ifc_regexp_params_bits { 2735 u8 reserved_at_0[0x1f]; 2736 u8 stop_engine[0x1]; 2737 u8 db_umem_id[0x20]; 2738 u8 db_umem_offset[0x40]; 2739 u8 reserved_at_80[0x100]; 2740 }; 2741 2742 struct mlx5_ifc_set_regexp_params_in_bits { 2743 u8 opcode[0x10]; 2744 u8 uid[0x10]; 2745 u8 reserved_at_20[0x10]; 2746 u8 op_mod[0x10]; 2747 u8 reserved_at_40[0x18]; 2748 u8 engine_id[0x8]; 2749 struct regexp_params_field_select_bits field_select; 2750 struct mlx5_ifc_regexp_params_bits regexp_params; 2751 }; 2752 2753 struct mlx5_ifc_set_regexp_params_out_bits { 2754 u8 status[0x8]; 2755 u8 reserved_at_8[0x18]; 2756 u8 syndrome[0x20]; 2757 u8 reserved_at_18[0x40]; 2758 }; 2759 2760 struct mlx5_ifc_query_regexp_params_in_bits { 2761 u8 opcode[0x10]; 2762 u8 uid[0x10]; 2763 u8 reserved_at_20[0x10]; 2764 u8 op_mod[0x10]; 2765 u8 reserved_at_40[0x18]; 2766 u8 engine_id[0x8]; 2767 u8 reserved[0x20]; 2768 }; 2769 2770 struct mlx5_ifc_query_regexp_params_out_bits { 2771 u8 status[0x8]; 2772 u8 reserved_at_8[0x18]; 2773 u8 syndrome[0x20]; 2774 u8 reserved[0x40]; 2775 struct mlx5_ifc_regexp_params_bits regexp_params; 2776 }; 2777 2778 struct mlx5_ifc_set_regexp_register_in_bits { 2779 u8 opcode[0x10]; 2780 u8 uid[0x10]; 2781 u8 reserved_at_20[0x10]; 2782 u8 op_mod[0x10]; 2783 u8 reserved_at_40[0x18]; 2784 u8 engine_id[0x8]; 2785 u8 register_address[0x20]; 2786 u8 register_data[0x20]; 2787 u8 reserved[0x60]; 2788 }; 2789 2790 struct mlx5_ifc_set_regexp_register_out_bits { 2791 u8 status[0x8]; 2792 u8 reserved_at_8[0x18]; 2793 u8 syndrome[0x20]; 2794 u8 reserved[0x40]; 2795 }; 2796 2797 struct mlx5_ifc_query_regexp_register_in_bits { 2798 u8 opcode[0x10]; 2799 u8 uid[0x10]; 2800 u8 reserved_at_20[0x10]; 2801 u8 op_mod[0x10]; 2802 u8 reserved_at_40[0x18]; 2803 u8 engine_id[0x8]; 2804 u8 register_address[0x20]; 2805 }; 2806 2807 struct mlx5_ifc_query_regexp_register_out_bits { 2808 u8 status[0x8]; 2809 u8 reserved_at_8[0x18]; 2810 u8 syndrome[0x20]; 2811 u8 reserved[0x20]; 2812 u8 register_data[0x20]; 2813 }; 2814 2815 /* CQE format mask. */ 2816 #define MLX5E_CQE_FORMAT_MASK 0xc 2817 2818 /* MPW opcode. */ 2819 #define MLX5_OPC_MOD_MPW 0x01 2820 2821 /* Compressed Rx CQE structure. */ 2822 struct mlx5_mini_cqe8 { 2823 union { 2824 uint32_t rx_hash_result; 2825 struct { 2826 uint16_t checksum; 2827 uint16_t stride_idx; 2828 }; 2829 struct { 2830 uint16_t wqe_counter; 2831 uint8_t s_wqe_opcode; 2832 uint8_t reserved; 2833 } s_wqe_info; 2834 }; 2835 uint32_t byte_cnt; 2836 }; 2837 2838 /* srTCM PRM flow meter parameters. */ 2839 enum { 2840 MLX5_FLOW_COLOR_RED = 0, 2841 MLX5_FLOW_COLOR_YELLOW, 2842 MLX5_FLOW_COLOR_GREEN, 2843 MLX5_FLOW_COLOR_UNDEFINED, 2844 }; 2845 2846 /* Maximum value of srTCM metering parameters. */ 2847 #define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F)) 2848 #define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF) 2849 #define MLX5_SRTCM_EBS_MAX 0 2850 2851 /* The bits meter color use. */ 2852 #define MLX5_MTR_COLOR_BITS 8 2853 2854 /* Length mode of dynamic flex parser graph node. */ 2855 enum mlx5_parse_graph_node_len_mode { 2856 MLX5_GRAPH_NODE_LEN_FIXED = 0x0, 2857 MLX5_GRAPH_NODE_LEN_FIELD = 0x1, 2858 MLX5_GRAPH_NODE_LEN_BITMASK = 0x2, 2859 }; 2860 2861 /* Offset mode of the samples of flex parser. */ 2862 enum mlx5_parse_graph_flow_match_sample_offset_mode { 2863 MLX5_GRAPH_SAMPLE_OFFSET_FIXED = 0x0, 2864 MLX5_GRAPH_SAMPLE_OFFSET_FIELD = 0x1, 2865 MLX5_GRAPH_SAMPLE_OFFSET_BITMASK = 0x2, 2866 }; 2867 2868 /* Node index for an input / output arc of the flex parser graph. */ 2869 enum mlx5_parse_graph_arc_node_index { 2870 MLX5_GRAPH_ARC_NODE_NULL = 0x0, 2871 MLX5_GRAPH_ARC_NODE_HEAD = 0x1, 2872 MLX5_GRAPH_ARC_NODE_MAC = 0x2, 2873 MLX5_GRAPH_ARC_NODE_IP = 0x3, 2874 MLX5_GRAPH_ARC_NODE_GRE = 0x4, 2875 MLX5_GRAPH_ARC_NODE_UDP = 0x5, 2876 MLX5_GRAPH_ARC_NODE_MPLS = 0x6, 2877 MLX5_GRAPH_ARC_NODE_TCP = 0x7, 2878 MLX5_GRAPH_ARC_NODE_VXLAN_GPE = 0x8, 2879 MLX5_GRAPH_ARC_NODE_GENEVE = 0x9, 2880 MLX5_GRAPH_ARC_NODE_IPSEC_ESP = 0xa, 2881 MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f, 2882 }; 2883 2884 /** 2885 * Convert a user mark to flow mark. 2886 * 2887 * @param val 2888 * Mark value to convert. 2889 * 2890 * @return 2891 * Converted mark value. 2892 */ 2893 static inline uint32_t 2894 mlx5_flow_mark_set(uint32_t val) 2895 { 2896 uint32_t ret; 2897 2898 /* 2899 * Add one to the user value to differentiate un-marked flows from 2900 * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it 2901 * remains untouched. 2902 */ 2903 if (val != MLX5_FLOW_MARK_DEFAULT) 2904 ++val; 2905 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 2906 /* 2907 * Mark is 24 bits (minus reserved values) but is stored on a 32 bit 2908 * word, byte-swapped by the kernel on little-endian systems. In this 2909 * case, left-shifting the resulting big-endian value ensures the 2910 * least significant 24 bits are retained when converting it back. 2911 */ 2912 ret = rte_cpu_to_be_32(val) >> 8; 2913 #else 2914 ret = val; 2915 #endif 2916 return ret; 2917 } 2918 2919 /** 2920 * Convert a mark to user mark. 2921 * 2922 * @param val 2923 * Mark value to convert. 2924 * 2925 * @return 2926 * Converted mark value. 2927 */ 2928 static inline uint32_t 2929 mlx5_flow_mark_get(uint32_t val) 2930 { 2931 /* 2932 * Subtract one from the retrieved value. It was added by 2933 * mlx5_flow_mark_set() to distinguish unmarked flows. 2934 */ 2935 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 2936 return (val >> 8) - 1; 2937 #else 2938 return val - 1; 2939 #endif 2940 } 2941 2942 #endif /* RTE_PMD_MLX5_PRM_H_ */ 2943