1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_FLOW_H_ 6 #define RTE_PMD_MLX5_FLOW_H_ 7 8 #include <stdalign.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <sys/queue.h> 12 13 #include <rte_alarm.h> 14 #include <rte_mtr.h> 15 16 #include <mlx5_glue.h> 17 #include <mlx5_prm.h> 18 19 #include "mlx5.h" 20 #include "hws/mlx5dr.h" 21 22 /* E-Switch Manager port, used for rte_flow_item_port_id. */ 23 #define MLX5_PORT_ESW_MGR UINT32_MAX 24 25 /* E-Switch Manager port, used for rte_flow_item_ethdev. */ 26 #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX 27 28 /* Private rte flow items. */ 29 enum mlx5_rte_flow_item_type { 30 MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN, 31 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 32 MLX5_RTE_FLOW_ITEM_TYPE_SQ, 33 MLX5_RTE_FLOW_ITEM_TYPE_VLAN, 34 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL, 35 }; 36 37 /* Private (internal) rte flow actions. */ 38 enum mlx5_rte_flow_action_type { 39 MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN, 40 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 41 MLX5_RTE_FLOW_ACTION_TYPE_MARK, 42 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 43 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, 44 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET, 45 MLX5_RTE_FLOW_ACTION_TYPE_AGE, 46 MLX5_RTE_FLOW_ACTION_TYPE_COUNT, 47 MLX5_RTE_FLOW_ACTION_TYPE_JUMP, 48 MLX5_RTE_FLOW_ACTION_TYPE_RSS, 49 MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK, 50 }; 51 52 /* Private (internal) Field IDs for MODIFY_FIELD action. */ 53 enum mlx5_rte_flow_field_id { 54 MLX5_RTE_FLOW_FIELD_END = INT_MIN, 55 MLX5_RTE_FLOW_FIELD_META_REG, 56 }; 57 58 #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29 59 60 #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \ 61 (((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) 62 63 #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \ 64 (((uint32_t)(uintptr_t)(handle)) & \ 65 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)) 66 67 enum { 68 MLX5_INDIRECT_ACTION_TYPE_RSS, 69 MLX5_INDIRECT_ACTION_TYPE_AGE, 70 MLX5_INDIRECT_ACTION_TYPE_COUNT, 71 MLX5_INDIRECT_ACTION_TYPE_CT, 72 MLX5_INDIRECT_ACTION_TYPE_METER_MARK, 73 }; 74 75 /* Now, the maximal ports will be supported is 16, action number is 32M. */ 76 #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10 77 78 #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22 79 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) 80 81 /* 29-31: type, 25-28: owner port, 0-24: index */ 82 #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \ 83 ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \ 84 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \ 85 MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index)) 86 87 #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \ 88 (((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \ 89 MLX5_INDIRECT_ACT_CT_OWNER_MASK) 90 91 #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \ 92 ((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1)) 93 94 #define MLX5_ACTION_CTX_CT_GET_IDX MLX5_INDIRECT_ACT_CT_GET_IDX 95 #define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER 96 #define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX 97 98 /* Matches on selected register. */ 99 struct mlx5_rte_flow_item_tag { 100 enum modify_reg id; 101 uint32_t data; 102 }; 103 104 /* Modify selected register. */ 105 struct mlx5_rte_flow_action_set_tag { 106 enum modify_reg id; 107 uint8_t offset; 108 uint8_t length; 109 uint32_t data; 110 }; 111 112 struct mlx5_flow_action_copy_mreg { 113 enum modify_reg dst; 114 enum modify_reg src; 115 }; 116 117 /* Matches on source queue. */ 118 struct mlx5_rte_flow_item_sq { 119 uint32_t queue; /* DevX SQ number */ 120 }; 121 122 /* Feature name to allocate metadata register. */ 123 enum mlx5_feature_name { 124 MLX5_HAIRPIN_RX, 125 MLX5_HAIRPIN_TX, 126 MLX5_METADATA_RX, 127 MLX5_METADATA_TX, 128 MLX5_METADATA_FDB, 129 MLX5_FLOW_MARK, 130 MLX5_APP_TAG, 131 MLX5_COPY_MARK, 132 MLX5_MTR_COLOR, 133 MLX5_MTR_ID, 134 MLX5_ASO_FLOW_HIT, 135 MLX5_ASO_CONNTRACK, 136 MLX5_SAMPLE_ID, 137 }; 138 139 /* Default queue number. */ 140 #define MLX5_RSSQ_DEFAULT_NUM 16 141 142 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) 143 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) 144 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) 145 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) 146 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) 147 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) 148 149 /* Pattern inner Layer bits. */ 150 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) 151 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) 152 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) 153 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) 154 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) 155 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) 156 157 /* Pattern tunnel Layer bits. */ 158 #define MLX5_FLOW_LAYER_VXLAN (1u << 12) 159 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) 160 #define MLX5_FLOW_LAYER_GRE (1u << 14) 161 #define MLX5_FLOW_LAYER_MPLS (1u << 15) 162 /* List of tunnel Layer bits continued below. */ 163 164 /* General pattern items bits. */ 165 #define MLX5_FLOW_ITEM_METADATA (1u << 16) 166 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17) 167 #define MLX5_FLOW_ITEM_TAG (1u << 18) 168 #define MLX5_FLOW_ITEM_MARK (1u << 19) 169 170 /* Pattern MISC bits. */ 171 #define MLX5_FLOW_LAYER_ICMP (1u << 20) 172 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21) 173 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22) 174 175 /* Pattern tunnel Layer bits (continued). */ 176 #define MLX5_FLOW_LAYER_IPIP (1u << 23) 177 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24) 178 #define MLX5_FLOW_LAYER_NVGRE (1u << 25) 179 #define MLX5_FLOW_LAYER_GENEVE (1u << 26) 180 181 /* Queue items. */ 182 #define MLX5_FLOW_ITEM_SQ (1u << 27) 183 184 /* Pattern tunnel Layer bits (continued). */ 185 #define MLX5_FLOW_LAYER_GTP (1u << 28) 186 187 /* Pattern eCPRI Layer bit. */ 188 #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29) 189 190 /* IPv6 Fragment Extension Header bit. */ 191 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30) 192 #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31) 193 194 /* Pattern tunnel Layer bits (continued). */ 195 #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32) 196 #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33) 197 198 /* INTEGRITY item bits */ 199 #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34) 200 #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35) 201 #define MLX5_FLOW_ITEM_INTEGRITY \ 202 (MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY) 203 204 /* Conntrack item. */ 205 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36) 206 207 /* Flex item */ 208 #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37) 209 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38) 210 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39) 211 212 /* ESP item */ 213 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40) 214 215 /* Port Representor/Represented Port item */ 216 #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41) 217 #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42) 218 219 /* Meter color item */ 220 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44) 221 222 /* IPv6 routing extension item */ 223 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45) 224 #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46) 225 226 /* Outer Masks. */ 227 #define MLX5_FLOW_LAYER_OUTER_L3 \ 228 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) 229 #define MLX5_FLOW_LAYER_OUTER_L4 \ 230 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) 231 #define MLX5_FLOW_LAYER_OUTER \ 232 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ 233 MLX5_FLOW_LAYER_OUTER_L4) 234 235 /* Tunnel Masks. */ 236 #define MLX5_FLOW_LAYER_TUNNEL \ 237 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ 238 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ 239 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ 240 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \ 241 MLX5_FLOW_ITEM_FLEX_TUNNEL) 242 243 /* Inner Masks. */ 244 #define MLX5_FLOW_LAYER_INNER_L3 \ 245 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 246 #define MLX5_FLOW_LAYER_INNER_L4 \ 247 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) 248 #define MLX5_FLOW_LAYER_INNER \ 249 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ 250 MLX5_FLOW_LAYER_INNER_L4) 251 252 /* Layer Masks. */ 253 #define MLX5_FLOW_LAYER_L2 \ 254 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2) 255 #define MLX5_FLOW_LAYER_L3_IPV4 \ 256 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4) 257 #define MLX5_FLOW_LAYER_L3_IPV6 \ 258 (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 259 #define MLX5_FLOW_LAYER_L3 \ 260 (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6) 261 #define MLX5_FLOW_LAYER_L4 \ 262 (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) 263 264 /* Actions */ 265 #define MLX5_FLOW_ACTION_DROP (1ull << 0) 266 #define MLX5_FLOW_ACTION_QUEUE (1ull << 1) 267 #define MLX5_FLOW_ACTION_RSS (1ull << 2) 268 #define MLX5_FLOW_ACTION_FLAG (1ull << 3) 269 #define MLX5_FLOW_ACTION_MARK (1ull << 4) 270 #define MLX5_FLOW_ACTION_COUNT (1ull << 5) 271 #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6) 272 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7) 273 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8) 274 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9) 275 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10) 276 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11) 277 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12) 278 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13) 279 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14) 280 #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15) 281 #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16) 282 #define MLX5_FLOW_ACTION_JUMP (1ull << 17) 283 #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18) 284 #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19) 285 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20) 286 #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21) 287 #define MLX5_FLOW_ACTION_ENCAP (1ull << 22) 288 #define MLX5_FLOW_ACTION_DECAP (1ull << 23) 289 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24) 290 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25) 291 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26) 292 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27) 293 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28) 294 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29) 295 #define MLX5_FLOW_ACTION_SET_META (1ull << 30) 296 #define MLX5_FLOW_ACTION_METER (1ull << 31) 297 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32) 298 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33) 299 #define MLX5_FLOW_ACTION_AGE (1ull << 34) 300 #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35) 301 #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36) 302 #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37) 303 #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38) 304 #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39) 305 #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40) 306 #define MLX5_FLOW_ACTION_CT (1ull << 41) 307 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42) 308 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43) 309 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44) 310 311 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \ 312 (MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE) 313 314 #define MLX5_FLOW_FATE_ACTIONS \ 315 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ 316 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \ 317 MLX5_FLOW_ACTION_DEFAULT_MISS | \ 318 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \ 319 MLX5_FLOW_ACTION_SEND_TO_KERNEL) 320 321 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ 322 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ 323 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) 324 325 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ 326 MLX5_FLOW_ACTION_SET_IPV4_DST | \ 327 MLX5_FLOW_ACTION_SET_IPV6_SRC | \ 328 MLX5_FLOW_ACTION_SET_IPV6_DST | \ 329 MLX5_FLOW_ACTION_SET_TP_SRC | \ 330 MLX5_FLOW_ACTION_SET_TP_DST | \ 331 MLX5_FLOW_ACTION_SET_TTL | \ 332 MLX5_FLOW_ACTION_DEC_TTL | \ 333 MLX5_FLOW_ACTION_SET_MAC_SRC | \ 334 MLX5_FLOW_ACTION_SET_MAC_DST | \ 335 MLX5_FLOW_ACTION_INC_TCP_SEQ | \ 336 MLX5_FLOW_ACTION_DEC_TCP_SEQ | \ 337 MLX5_FLOW_ACTION_INC_TCP_ACK | \ 338 MLX5_FLOW_ACTION_DEC_TCP_ACK | \ 339 MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \ 340 MLX5_FLOW_ACTION_SET_TAG | \ 341 MLX5_FLOW_ACTION_MARK_EXT | \ 342 MLX5_FLOW_ACTION_SET_META | \ 343 MLX5_FLOW_ACTION_SET_IPV4_DSCP | \ 344 MLX5_FLOW_ACTION_SET_IPV6_DSCP | \ 345 MLX5_FLOW_ACTION_MODIFY_FIELD) 346 347 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \ 348 MLX5_FLOW_ACTION_OF_PUSH_VLAN) 349 350 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP) 351 352 #ifndef IPPROTO_MPLS 353 #define IPPROTO_MPLS 137 354 #endif 355 356 /* UDP port number for MPLS */ 357 #define MLX5_UDP_PORT_MPLS 6635 358 359 /* UDP port numbers for VxLAN. */ 360 #define MLX5_UDP_PORT_VXLAN 4789 361 #define MLX5_UDP_PORT_VXLAN_GPE 4790 362 363 /* UDP port numbers for GENEVE. */ 364 #define MLX5_UDP_PORT_GENEVE 6081 365 366 /* Lowest priority indicator. */ 367 #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1) 368 369 /* 370 * Max priority for ingress\egress flow groups 371 * greater than 0 and for any transfer flow group. 372 * From user configation: 0 - 21843. 373 */ 374 #define MLX5_NON_ROOT_FLOW_MAX_PRIO (21843 + 1) 375 376 /* 377 * Number of sub priorities. 378 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct 379 * matching on the NIC (firmware dependent) L4 most have the higher priority 380 * followed by L3 and ending with L2. 381 */ 382 #define MLX5_PRIORITY_MAP_L2 2 383 #define MLX5_PRIORITY_MAP_L3 1 384 #define MLX5_PRIORITY_MAP_L4 0 385 #define MLX5_PRIORITY_MAP_MAX 3 386 387 /* Valid layer type for IPV4 RSS. */ 388 #define MLX5_IPV4_LAYER_TYPES \ 389 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \ 390 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ 391 RTE_ETH_RSS_NONFRAG_IPV4_OTHER) 392 393 /* IBV hash source bits for IPV4. */ 394 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 395 396 /* Valid layer type for IPV6 RSS. */ 397 #define MLX5_IPV6_LAYER_TYPES \ 398 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ 399 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | \ 400 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) 401 402 /* IBV hash source bits for IPV6. */ 403 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 404 405 /* IBV hash bits for L3 SRC. */ 406 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6) 407 408 /* IBV hash bits for L3 DST. */ 409 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6) 410 411 /* IBV hash bits for TCP. */ 412 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 413 IBV_RX_HASH_DST_PORT_TCP) 414 415 /* IBV hash bits for UDP. */ 416 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \ 417 IBV_RX_HASH_DST_PORT_UDP) 418 419 /* IBV hash bits for L4 SRC. */ 420 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 421 IBV_RX_HASH_SRC_PORT_UDP) 422 423 /* IBV hash bits for L4 DST. */ 424 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \ 425 IBV_RX_HASH_DST_PORT_UDP) 426 427 /* Geneve header first 16Bit */ 428 #define MLX5_GENEVE_VER_MASK 0x3 429 #define MLX5_GENEVE_VER_SHIFT 14 430 #define MLX5_GENEVE_VER_VAL(a) \ 431 (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK)) 432 #define MLX5_GENEVE_OPTLEN_MASK 0x3F 433 #define MLX5_GENEVE_OPTLEN_SHIFT 8 434 #define MLX5_GENEVE_OPTLEN_VAL(a) \ 435 (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK)) 436 #define MLX5_GENEVE_OAMF_MASK 0x1 437 #define MLX5_GENEVE_OAMF_SHIFT 7 438 #define MLX5_GENEVE_OAMF_VAL(a) \ 439 (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK)) 440 #define MLX5_GENEVE_CRITO_MASK 0x1 441 #define MLX5_GENEVE_CRITO_SHIFT 6 442 #define MLX5_GENEVE_CRITO_VAL(a) \ 443 (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK)) 444 #define MLX5_GENEVE_RSVD_MASK 0x3F 445 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK)) 446 /* 447 * The length of the Geneve options fields, expressed in four byte multiples, 448 * not including the eight byte fixed tunnel. 449 */ 450 #define MLX5_GENEVE_OPT_LEN_0 14 451 #define MLX5_GENEVE_OPT_LEN_1 63 452 453 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \ 454 sizeof(struct rte_ipv4_hdr)) 455 /* GTP extension header flag. */ 456 #define MLX5_GTP_EXT_HEADER_FLAG 4 457 458 /* GTP extension header PDU type shift. */ 459 #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4) 460 461 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */ 462 #define MLX5_IPV4_FRAG_OFFSET_MASK \ 463 (RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG) 464 465 /* Specific item's fields can accept a range of values (using spec and last). */ 466 #define MLX5_ITEM_RANGE_NOT_ACCEPTED false 467 #define MLX5_ITEM_RANGE_ACCEPTED true 468 469 /* Software header modify action numbers of a flow. */ 470 #define MLX5_ACT_NUM_MDF_IPV4 1 471 #define MLX5_ACT_NUM_MDF_IPV6 4 472 #define MLX5_ACT_NUM_MDF_MAC 2 473 #define MLX5_ACT_NUM_MDF_VID 1 474 #define MLX5_ACT_NUM_MDF_PORT 1 475 #define MLX5_ACT_NUM_MDF_TTL 1 476 #define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL 477 #define MLX5_ACT_NUM_MDF_TCPSEQ 1 478 #define MLX5_ACT_NUM_MDF_TCPACK 1 479 #define MLX5_ACT_NUM_SET_REG 1 480 #define MLX5_ACT_NUM_SET_TAG 1 481 #define MLX5_ACT_NUM_CPY_MREG MLX5_ACT_NUM_SET_TAG 482 #define MLX5_ACT_NUM_SET_MARK MLX5_ACT_NUM_SET_TAG 483 #define MLX5_ACT_NUM_SET_META MLX5_ACT_NUM_SET_TAG 484 #define MLX5_ACT_NUM_SET_DSCP 1 485 486 /* Maximum number of fields to modify in MODIFY_FIELD */ 487 #define MLX5_ACT_MAX_MOD_FIELDS 5 488 489 /* Syndrome bits definition for connection tracking. */ 490 #define MLX5_CT_SYNDROME_VALID (0x0 << 6) 491 #define MLX5_CT_SYNDROME_INVALID (0x1 << 6) 492 #define MLX5_CT_SYNDROME_TRAP (0x2 << 6) 493 #define MLX5_CT_SYNDROME_STATE_CHANGE (0x1 << 1) 494 #define MLX5_CT_SYNDROME_BAD_PACKET (0x1 << 0) 495 496 enum mlx5_flow_drv_type { 497 MLX5_FLOW_TYPE_MIN, 498 MLX5_FLOW_TYPE_DV, 499 MLX5_FLOW_TYPE_VERBS, 500 MLX5_FLOW_TYPE_HW, 501 MLX5_FLOW_TYPE_MAX, 502 }; 503 504 /* Fate action type. */ 505 enum mlx5_flow_fate_type { 506 MLX5_FLOW_FATE_NONE, /* Egress flow. */ 507 MLX5_FLOW_FATE_QUEUE, 508 MLX5_FLOW_FATE_JUMP, 509 MLX5_FLOW_FATE_PORT_ID, 510 MLX5_FLOW_FATE_DROP, 511 MLX5_FLOW_FATE_DEFAULT_MISS, 512 MLX5_FLOW_FATE_SHARED_RSS, 513 MLX5_FLOW_FATE_MTR, 514 MLX5_FLOW_FATE_SEND_TO_KERNEL, 515 MLX5_FLOW_FATE_MAX, 516 }; 517 518 /* Matcher PRM representation */ 519 struct mlx5_flow_dv_match_params { 520 size_t size; 521 /**< Size of match value. Do NOT split size and key! */ 522 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)]; 523 /**< Matcher value. This value is used as the mask or as a key. */ 524 }; 525 526 /* Matcher structure. */ 527 struct mlx5_flow_dv_matcher { 528 struct mlx5_list_entry entry; /**< Pointer to the next element. */ 529 struct mlx5_flow_tbl_resource *tbl; 530 /**< Pointer to the table(group) the matcher associated with. */ 531 void *matcher_object; /**< Pointer to DV matcher */ 532 uint16_t crc; /**< CRC of key. */ 533 uint16_t priority; /**< Priority of matcher. */ 534 struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */ 535 }; 536 537 #define MLX5_ENCAP_MAX_LEN 132 538 539 /* Encap/decap resource structure. */ 540 struct mlx5_flow_dv_encap_decap_resource { 541 struct mlx5_list_entry entry; 542 /* Pointer to next element. */ 543 uint32_t refcnt; /**< Reference counter. */ 544 void *action; 545 /**< Encap/decap action object. */ 546 uint8_t buf[MLX5_ENCAP_MAX_LEN]; 547 size_t size; 548 uint8_t reformat_type; 549 uint8_t ft_type; 550 uint64_t flags; /**< Flags for RDMA API. */ 551 uint32_t idx; /**< Index for the index memory pool. */ 552 }; 553 554 /* Tag resource structure. */ 555 struct mlx5_flow_dv_tag_resource { 556 struct mlx5_list_entry entry; 557 /**< hash list entry for tag resource, tag value as the key. */ 558 void *action; 559 /**< Tag action object. */ 560 uint32_t refcnt; /**< Reference counter. */ 561 uint32_t idx; /**< Index for the index memory pool. */ 562 uint32_t tag_id; /**< Tag ID. */ 563 }; 564 565 /* Modify resource structure */ 566 struct mlx5_flow_dv_modify_hdr_resource { 567 struct mlx5_list_entry entry; 568 void *action; /**< Modify header action object. */ 569 uint32_t idx; 570 /* Key area for hash list matching: */ 571 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 572 uint8_t actions_num; /**< Number of modification actions. */ 573 bool root; /**< Whether action is in root table. */ 574 struct mlx5_modification_cmd actions[]; 575 /**< Modification actions. */ 576 } __rte_packed; 577 578 /* Modify resource key of the hash organization. */ 579 union mlx5_flow_modify_hdr_key { 580 struct { 581 uint32_t ft_type:8; /**< Flow table type, Rx or Tx. */ 582 uint32_t actions_num:5; /**< Number of modification actions. */ 583 uint32_t group:19; /**< Flow group id. */ 584 uint32_t cksum; /**< Actions check sum. */ 585 }; 586 uint64_t v64; /**< full 64bits value of key */ 587 }; 588 589 /* Jump action resource structure. */ 590 struct mlx5_flow_dv_jump_tbl_resource { 591 void *action; /**< Pointer to the rdma core action. */ 592 }; 593 594 /* Port ID resource structure. */ 595 struct mlx5_flow_dv_port_id_action_resource { 596 struct mlx5_list_entry entry; 597 void *action; /**< Action object. */ 598 uint32_t port_id; /**< Port ID value. */ 599 uint32_t idx; /**< Indexed pool memory index. */ 600 }; 601 602 /* Push VLAN action resource structure */ 603 struct mlx5_flow_dv_push_vlan_action_resource { 604 struct mlx5_list_entry entry; /* Cache entry. */ 605 void *action; /**< Action object. */ 606 uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ 607 rte_be32_t vlan_tag; /**< VLAN tag value. */ 608 uint32_t idx; /**< Indexed pool memory index. */ 609 }; 610 611 /* Metadata register copy table entry. */ 612 struct mlx5_flow_mreg_copy_resource { 613 /* 614 * Hash list entry for copy table. 615 * - Key is 32/64-bit MARK action ID. 616 * - MUST be the first entry. 617 */ 618 struct mlx5_list_entry hlist_ent; 619 LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; 620 /* List entry for device flows. */ 621 uint32_t idx; 622 uint32_t rix_flow; /* Built flow for copy. */ 623 uint32_t mark_id; 624 }; 625 626 /* Table tunnel parameter. */ 627 struct mlx5_flow_tbl_tunnel_prm { 628 const struct mlx5_flow_tunnel *tunnel; 629 uint32_t group_id; 630 bool external; 631 }; 632 633 /* Table data structure of the hash organization. */ 634 struct mlx5_flow_tbl_data_entry { 635 struct mlx5_list_entry entry; 636 /**< hash list entry, 64-bits key inside. */ 637 struct mlx5_flow_tbl_resource tbl; 638 /**< flow table resource. */ 639 struct mlx5_list *matchers; 640 /**< matchers' header associated with the flow table. */ 641 struct mlx5_flow_dv_jump_tbl_resource jump; 642 /**< jump resource, at most one for each table created. */ 643 uint32_t idx; /**< index for the indexed mempool. */ 644 /**< tunnel offload */ 645 const struct mlx5_flow_tunnel *tunnel; 646 uint32_t group_id; 647 uint32_t external:1; 648 uint32_t tunnel_offload:1; /* Tunnel offload table or not. */ 649 uint32_t is_egress:1; /**< Egress table. */ 650 uint32_t is_transfer:1; /**< Transfer table. */ 651 uint32_t dummy:1; /**< DR table. */ 652 uint32_t id:22; /**< Table ID. */ 653 uint32_t reserve:5; /**< Reserved to future using. */ 654 uint32_t level; /**< Table level. */ 655 }; 656 657 /* Sub rdma-core actions list. */ 658 struct mlx5_flow_sub_actions_list { 659 uint32_t actions_num; /**< Number of sample actions. */ 660 uint64_t action_flags; 661 void *dr_queue_action; 662 void *dr_tag_action; 663 void *dr_cnt_action; 664 void *dr_port_id_action; 665 void *dr_encap_action; 666 void *dr_jump_action; 667 }; 668 669 /* Sample sub-actions resource list. */ 670 struct mlx5_flow_sub_actions_idx { 671 uint32_t rix_hrxq; /**< Hash Rx queue object index. */ 672 uint32_t rix_tag; /**< Index to the tag action. */ 673 uint32_t rix_port_id_action; /**< Index to port ID action resource. */ 674 uint32_t rix_encap_decap; /**< Index to encap/decap resource. */ 675 uint32_t rix_jump; /**< Index to the jump action resource. */ 676 }; 677 678 /* Sample action resource structure. */ 679 struct mlx5_flow_dv_sample_resource { 680 struct mlx5_list_entry entry; /**< Cache entry. */ 681 union { 682 void *verbs_action; /**< Verbs sample action object. */ 683 void **sub_actions; /**< Sample sub-action array. */ 684 }; 685 struct rte_eth_dev *dev; /**< Device registers the action. */ 686 uint32_t idx; /** Sample object index. */ 687 uint8_t ft_type; /** Flow Table Type */ 688 uint32_t ft_id; /** Flow Table Level */ 689 uint32_t ratio; /** Sample Ratio */ 690 uint64_t set_action; /** Restore reg_c0 value */ 691 void *normal_path_tbl; /** Flow Table pointer */ 692 struct mlx5_flow_sub_actions_idx sample_idx; 693 /**< Action index resources. */ 694 struct mlx5_flow_sub_actions_list sample_act; 695 /**< Action resources. */ 696 }; 697 698 #define MLX5_MAX_DEST_NUM 2 699 700 /* Destination array action resource structure. */ 701 struct mlx5_flow_dv_dest_array_resource { 702 struct mlx5_list_entry entry; /**< Cache entry. */ 703 uint32_t idx; /** Destination array action object index. */ 704 uint8_t ft_type; /** Flow Table Type */ 705 uint8_t num_of_dest; /**< Number of destination actions. */ 706 struct rte_eth_dev *dev; /**< Device registers the action. */ 707 void *action; /**< Pointer to the rdma core action. */ 708 struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM]; 709 /**< Action index resources. */ 710 struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM]; 711 /**< Action resources. */ 712 }; 713 714 /* PMD flow priority for tunnel */ 715 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \ 716 ((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4) 717 718 719 /** Device flow handle structure for DV mode only. */ 720 struct mlx5_flow_handle_dv { 721 /* Flow DV api: */ 722 struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ 723 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 724 /**< Pointer to modify header resource in cache. */ 725 uint32_t rix_encap_decap; 726 /**< Index to encap/decap resource in cache. */ 727 uint32_t rix_push_vlan; 728 /**< Index to push VLAN action resource in cache. */ 729 uint32_t rix_tag; 730 /**< Index to the tag action. */ 731 uint32_t rix_sample; 732 /**< Index to sample action resource in cache. */ 733 uint32_t rix_dest_array; 734 /**< Index to destination array resource in cache. */ 735 } __rte_packed; 736 737 /** Device flow handle structure: used both for creating & destroying. */ 738 struct mlx5_flow_handle { 739 SILIST_ENTRY(uint32_t)next; 740 struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */ 741 /**< Index to next device flow handle. */ 742 uint64_t layers; 743 /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ 744 void *drv_flow; /**< pointer to driver flow object. */ 745 uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */ 746 uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */ 747 uint32_t fate_action:4; /**< Fate action type. */ 748 union { 749 uint32_t rix_hrxq; /**< Hash Rx queue object index. */ 750 uint32_t rix_jump; /**< Index to the jump action resource. */ 751 uint32_t rix_port_id_action; 752 /**< Index to port ID action resource. */ 753 uint32_t rix_fate; 754 /**< Generic value indicates the fate action. */ 755 uint32_t rix_default_fate; 756 /**< Indicates default miss fate action. */ 757 uint32_t rix_srss; 758 /**< Indicates shared RSS fate action. */ 759 }; 760 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 761 struct mlx5_flow_handle_dv dvh; 762 #endif 763 uint8_t flex_item; /**< referenced Flex Item bitmask. */ 764 } __rte_packed; 765 766 /* 767 * Size for Verbs device flow handle structure only. Do not use the DV only 768 * structure in Verbs. No DV flows attributes will be accessed. 769 * Macro offsetof() could also be used here. 770 */ 771 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 772 #define MLX5_FLOW_HANDLE_VERBS_SIZE \ 773 (sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv)) 774 #else 775 #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle)) 776 #endif 777 778 /** Device flow structure only for DV flow creation. */ 779 struct mlx5_flow_dv_workspace { 780 uint32_t group; /**< The group index. */ 781 uint32_t table_id; /**< Flow table identifier. */ 782 uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ 783 int actions_n; /**< number of actions. */ 784 void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */ 785 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 786 /**< Pointer to encap/decap resource in cache. */ 787 struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; 788 /**< Pointer to push VLAN action resource in cache. */ 789 struct mlx5_flow_dv_tag_resource *tag_resource; 790 /**< pointer to the tag action. */ 791 struct mlx5_flow_dv_port_id_action_resource *port_id_action; 792 /**< Pointer to port ID action resource. */ 793 struct mlx5_flow_dv_jump_tbl_resource *jump; 794 /**< Pointer to the jump action resource. */ 795 struct mlx5_flow_dv_match_params value; 796 /**< Holds the value that the packet is compared to. */ 797 struct mlx5_flow_dv_sample_resource *sample_res; 798 /**< Pointer to the sample action resource. */ 799 struct mlx5_flow_dv_dest_array_resource *dest_array_res; 800 /**< Pointer to the destination array resource. */ 801 }; 802 803 #ifdef HAVE_INFINIBAND_VERBS_H 804 /* 805 * Maximal Verbs flow specifications & actions size. 806 * Some elements are mutually exclusive, but enough space should be allocated. 807 * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers. 808 * 2. One tunnel header (exception: GRE + MPLS), 809 * SPEC length: GRE == tunnel. 810 * Actions: 1. 1 Mark OR Flag. 811 * 2. 1 Drop (if any). 812 * 3. No limitation for counters, but it makes no sense to support too 813 * many counters in a single device flow. 814 */ 815 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 816 #define MLX5_VERBS_MAX_SPEC_SIZE \ 817 ( \ 818 (2 * (sizeof(struct ibv_flow_spec_eth) + \ 819 sizeof(struct ibv_flow_spec_ipv6) + \ 820 sizeof(struct ibv_flow_spec_tcp_udp)) + \ 821 sizeof(struct ibv_flow_spec_gre) + \ 822 sizeof(struct ibv_flow_spec_mpls)) \ 823 ) 824 #else 825 #define MLX5_VERBS_MAX_SPEC_SIZE \ 826 ( \ 827 (2 * (sizeof(struct ibv_flow_spec_eth) + \ 828 sizeof(struct ibv_flow_spec_ipv6) + \ 829 sizeof(struct ibv_flow_spec_tcp_udp)) + \ 830 sizeof(struct ibv_flow_spec_tunnel)) \ 831 ) 832 #endif 833 834 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 835 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 836 #define MLX5_VERBS_MAX_ACT_SIZE \ 837 ( \ 838 sizeof(struct ibv_flow_spec_action_tag) + \ 839 sizeof(struct ibv_flow_spec_action_drop) + \ 840 sizeof(struct ibv_flow_spec_counter_action) * 4 \ 841 ) 842 #else 843 #define MLX5_VERBS_MAX_ACT_SIZE \ 844 ( \ 845 sizeof(struct ibv_flow_spec_action_tag) + \ 846 sizeof(struct ibv_flow_spec_action_drop) \ 847 ) 848 #endif 849 850 #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \ 851 (MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE) 852 853 /** Device flow structure only for Verbs flow creation. */ 854 struct mlx5_flow_verbs_workspace { 855 unsigned int size; /**< Size of the attribute. */ 856 struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */ 857 uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE]; 858 /**< Specifications & actions buffer of verbs flow. */ 859 }; 860 #endif /* HAVE_INFINIBAND_VERBS_H */ 861 862 #define MLX5_SCALE_FLOW_GROUP_BIT 0 863 #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1 864 865 /** Maximal number of device sub-flows supported. */ 866 #define MLX5_NUM_MAX_DEV_FLOWS 32 867 868 /** 869 * tunnel offload rules type 870 */ 871 enum mlx5_tof_rule_type { 872 MLX5_TUNNEL_OFFLOAD_NONE = 0, 873 MLX5_TUNNEL_OFFLOAD_SET_RULE, 874 MLX5_TUNNEL_OFFLOAD_MATCH_RULE, 875 MLX5_TUNNEL_OFFLOAD_MISS_RULE, 876 }; 877 878 /** Device flow structure. */ 879 __extension__ 880 struct mlx5_flow { 881 struct rte_flow *flow; /**< Pointer to the main flow. */ 882 uint32_t flow_idx; /**< The memory pool index to the main flow. */ 883 uint64_t hash_fields; /**< Hash Rx queue hash fields. */ 884 uint64_t act_flags; 885 /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ 886 bool external; /**< true if the flow is created external to PMD. */ 887 uint8_t ingress:1; /**< 1 if the flow is ingress. */ 888 uint8_t skip_scale:2; 889 /** 890 * Each Bit be set to 1 if Skip the scale the flow group with factor. 891 * If bit0 be set to 1, then skip the scale the original flow group; 892 * If bit1 be set to 1, then skip the scale the jump flow group if 893 * having jump action. 894 * 00: Enable scale in a flow, default value. 895 * 01: Skip scale the flow group with factor, enable scale the group 896 * of jump action. 897 * 10: Enable scale the group with factor, skip scale the group of 898 * jump action. 899 * 11: Skip scale the table with factor both for flow group and jump 900 * group. 901 */ 902 union { 903 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 904 struct mlx5_flow_dv_workspace dv; 905 #endif 906 #ifdef HAVE_INFINIBAND_VERBS_H 907 struct mlx5_flow_verbs_workspace verbs; 908 #endif 909 }; 910 struct mlx5_flow_handle *handle; 911 uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */ 912 const struct mlx5_flow_tunnel *tunnel; 913 enum mlx5_tof_rule_type tof_type; 914 }; 915 916 /* Flow meter state. */ 917 #define MLX5_FLOW_METER_DISABLE 0 918 #define MLX5_FLOW_METER_ENABLE 1 919 920 #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u 921 #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u 922 923 #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES 924 925 #define MLX5_MAN_WIDTH 8 926 /* Legacy Meter parameter structure. */ 927 struct mlx5_legacy_flow_meter { 928 struct mlx5_flow_meter_info fm; 929 /* Must be the first in struct. */ 930 TAILQ_ENTRY(mlx5_legacy_flow_meter) next; 931 /**< Pointer to the next flow meter structure. */ 932 uint32_t idx; 933 /* Index to meter object. */ 934 }; 935 936 #define MLX5_MAX_TUNNELS 256 937 #define MLX5_TNL_MISS_RULE_PRIORITY 3 938 #define MLX5_TNL_MISS_FDB_JUMP_GRP 0x1234faac 939 940 /* 941 * When tunnel offload is active, all JUMP group ids are converted 942 * using the same method. That conversion is applied both to tunnel and 943 * regular rule types. 944 * Group ids used in tunnel rules are relative to it's tunnel (!). 945 * Application can create number of steer rules, using the same 946 * tunnel, with different group id in each rule. 947 * Each tunnel stores its groups internally in PMD tunnel object. 948 * Groups used in regular rules do not belong to any tunnel and are stored 949 * in tunnel hub. 950 */ 951 952 struct mlx5_flow_tunnel { 953 LIST_ENTRY(mlx5_flow_tunnel) chain; 954 struct rte_flow_tunnel app_tunnel; /** app tunnel copy */ 955 uint32_t tunnel_id; /** unique tunnel ID */ 956 uint32_t refctn; 957 struct rte_flow_action action; 958 struct rte_flow_item item; 959 struct mlx5_hlist *groups; /** tunnel groups */ 960 }; 961 962 /** PMD tunnel related context */ 963 struct mlx5_flow_tunnel_hub { 964 /* Tunnels list 965 * Access to the list MUST be MT protected 966 */ 967 LIST_HEAD(, mlx5_flow_tunnel) tunnels; 968 /* protect access to the tunnels list */ 969 rte_spinlock_t sl; 970 struct mlx5_hlist *groups; /** non tunnel groups */ 971 }; 972 973 /* convert jump group to flow table ID in tunnel rules */ 974 struct tunnel_tbl_entry { 975 struct mlx5_list_entry hash; 976 uint32_t flow_table; 977 uint32_t tunnel_id; 978 uint32_t group; 979 }; 980 981 static inline uint32_t 982 tunnel_id_to_flow_tbl(uint32_t id) 983 { 984 return id | (1u << 16); 985 } 986 987 static inline uint32_t 988 tunnel_flow_tbl_to_id(uint32_t flow_tbl) 989 { 990 return flow_tbl & ~(1u << 16); 991 } 992 993 union tunnel_tbl_key { 994 uint64_t val; 995 struct { 996 uint32_t tunnel_id; 997 uint32_t group; 998 }; 999 }; 1000 1001 static inline struct mlx5_flow_tunnel_hub * 1002 mlx5_tunnel_hub(struct rte_eth_dev *dev) 1003 { 1004 struct mlx5_priv *priv = dev->data->dev_private; 1005 return priv->sh->tunnel_hub; 1006 } 1007 1008 static inline bool 1009 is_tunnel_offload_active(const struct rte_eth_dev *dev) 1010 { 1011 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1012 const struct mlx5_priv *priv = dev->data->dev_private; 1013 return !!priv->sh->config.dv_miss_info; 1014 #else 1015 RTE_SET_USED(dev); 1016 return false; 1017 #endif 1018 } 1019 1020 static inline bool 1021 is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type) 1022 { 1023 return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE; 1024 } 1025 1026 static inline bool 1027 is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type) 1028 { 1029 return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE; 1030 } 1031 1032 static inline const struct mlx5_flow_tunnel * 1033 flow_actions_to_tunnel(const struct rte_flow_action actions[]) 1034 { 1035 return actions[0].conf; 1036 } 1037 1038 static inline const struct mlx5_flow_tunnel * 1039 flow_items_to_tunnel(const struct rte_flow_item items[]) 1040 { 1041 return items[0].spec; 1042 } 1043 1044 /** 1045 * Fetch 1, 2, 3 or 4 byte field from the byte array 1046 * and return as unsigned integer in host-endian format. 1047 * 1048 * @param[in] data 1049 * Pointer to data array. 1050 * @param[in] size 1051 * Size of field to extract. 1052 * 1053 * @return 1054 * converted field in host endian format. 1055 */ 1056 static inline uint32_t 1057 flow_dv_fetch_field(const uint8_t *data, uint32_t size) 1058 { 1059 uint32_t ret; 1060 1061 switch (size) { 1062 case 1: 1063 ret = *data; 1064 break; 1065 case 2: 1066 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 1067 break; 1068 case 3: 1069 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 1070 ret = (ret << 8) | *(data + sizeof(uint16_t)); 1071 break; 1072 case 4: 1073 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data); 1074 break; 1075 default: 1076 MLX5_ASSERT(false); 1077 ret = 0; 1078 break; 1079 } 1080 return ret; 1081 } 1082 1083 struct field_modify_info { 1084 uint32_t size; /* Size of field in protocol header, in bytes. */ 1085 uint32_t offset; /* Offset of field in protocol header, in bytes. */ 1086 enum mlx5_modification_field id; 1087 }; 1088 1089 /* HW steering flow attributes. */ 1090 struct mlx5_flow_attr { 1091 uint32_t port_id; /* Port index. */ 1092 uint32_t group; /* Flow group. */ 1093 uint32_t priority; /* Original Priority. */ 1094 /* rss level, used by priority adjustment. */ 1095 uint32_t rss_level; 1096 /* Action flags, used by priority adjustment. */ 1097 uint32_t act_flags; 1098 uint32_t tbl_type; /* Flow table type. */ 1099 }; 1100 1101 /* Flow structure. */ 1102 struct rte_flow { 1103 uint32_t dev_handles; 1104 /**< Device flow handles that are part of the flow. */ 1105 uint32_t type:2; 1106 uint32_t drv_type:2; /**< Driver type. */ 1107 uint32_t tunnel:1; 1108 uint32_t meter:24; /**< Holds flow meter id. */ 1109 uint32_t indirect_type:2; /**< Indirect action type. */ 1110 uint32_t rix_mreg_copy; 1111 /**< Index to metadata register copy table resource. */ 1112 uint32_t counter; /**< Holds flow counter. */ 1113 uint32_t tunnel_id; /**< Tunnel id */ 1114 union { 1115 uint32_t age; /**< Holds ASO age bit index. */ 1116 uint32_t ct; /**< Holds ASO CT index. */ 1117 }; 1118 uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */ 1119 } __rte_packed; 1120 1121 /* 1122 * HWS COUNTER ID's layout 1123 * 3 2 1 0 1124 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 1125 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 1126 * | T | | D | | 1127 * ~ Y | | C | IDX ~ 1128 * | P | | S | | 1129 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 1130 * 1131 * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10 1132 * Bit 25:24 = DCS index 1133 * Bit 23:00 = IDX in this counter belonged DCS bulk. 1134 */ 1135 typedef uint32_t cnt_id_t; 1136 1137 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 1138 1139 #ifdef PEDANTIC 1140 #pragma GCC diagnostic ignored "-Wpedantic" 1141 #endif 1142 1143 /* HWS flow struct. */ 1144 struct rte_flow_hw { 1145 uint32_t idx; /* Flow index from indexed pool. */ 1146 uint32_t fate_type; /* Fate action type. */ 1147 union { 1148 /* Jump action. */ 1149 struct mlx5_hw_jump_action *jump; 1150 struct mlx5_hrxq *hrxq; /* TIR action. */ 1151 }; 1152 struct rte_flow_template_table *table; /* The table flow allcated from. */ 1153 uint32_t age_idx; 1154 cnt_id_t cnt_id; 1155 uint32_t mtr_id; 1156 uint8_t rule[0]; /* HWS layer data struct. */ 1157 } __rte_packed; 1158 1159 #ifdef PEDANTIC 1160 #pragma GCC diagnostic error "-Wpedantic" 1161 #endif 1162 1163 /* rte flow action translate to DR action struct. */ 1164 struct mlx5_action_construct_data { 1165 LIST_ENTRY(mlx5_action_construct_data) next; 1166 /* Ensure the action types are matched. */ 1167 int type; 1168 uint32_t idx; /* Data index. */ 1169 uint16_t action_src; /* rte_flow_action src offset. */ 1170 uint16_t action_dst; /* mlx5dr_rule_action dst offset. */ 1171 union { 1172 struct { 1173 /* encap data len. */ 1174 uint16_t len; 1175 } encap; 1176 struct { 1177 /* Modify header action offset in pattern. */ 1178 uint16_t mhdr_cmds_off; 1179 /* Offset in pattern after modify header actions. */ 1180 uint16_t mhdr_cmds_end; 1181 /* 1182 * True if this action is masked and does not need to 1183 * be generated. 1184 */ 1185 bool shared; 1186 /* 1187 * Modified field definitions in dst field (SET, ADD) 1188 * or src field (COPY). 1189 */ 1190 struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS]; 1191 /* Modified field definitions in dst field (COPY). */ 1192 struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS]; 1193 /* 1194 * Masks applied to field values to generate 1195 * PRM actions. 1196 */ 1197 uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS]; 1198 } modify_header; 1199 struct { 1200 uint64_t types; /* RSS hash types. */ 1201 uint32_t level; /* RSS level. */ 1202 uint32_t idx; /* Shared action index. */ 1203 } shared_rss; 1204 struct { 1205 cnt_id_t id; 1206 } shared_counter; 1207 struct { 1208 uint32_t id; 1209 } shared_meter; 1210 }; 1211 }; 1212 1213 /* Flow item template struct. */ 1214 struct rte_flow_pattern_template { 1215 LIST_ENTRY(rte_flow_pattern_template) next; 1216 /* Template attributes. */ 1217 struct rte_flow_pattern_template_attr attr; 1218 struct mlx5dr_match_template *mt; /* mlx5 match template. */ 1219 uint64_t item_flags; /* Item layer flags. */ 1220 uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */ 1221 uint32_t refcnt; /* Reference counter. */ 1222 /* 1223 * If true, then rule pattern should be prepended with 1224 * represented_port pattern item. 1225 */ 1226 bool implicit_port; 1227 /* 1228 * If true, then rule pattern should be prepended with 1229 * tag pattern item for representor matching. 1230 */ 1231 bool implicit_tag; 1232 }; 1233 1234 /* Flow action template struct. */ 1235 struct rte_flow_actions_template { 1236 LIST_ENTRY(rte_flow_actions_template) next; 1237 /* Template attributes. */ 1238 struct rte_flow_actions_template_attr attr; 1239 struct rte_flow_action *actions; /* Cached flow actions. */ 1240 struct rte_flow_action *masks; /* Cached action masks.*/ 1241 struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */ 1242 uint64_t action_flags; /* Bit-map of all valid action in template. */ 1243 uint16_t dr_actions_num; /* Amount of DR rules actions. */ 1244 uint16_t actions_num; /* Amount of flow actions */ 1245 uint16_t *actions_off; /* DR action offset for given rte action offset. */ 1246 uint16_t reformat_off; /* Offset of DR reformat action. */ 1247 uint16_t mhdr_off; /* Offset of DR modify header action. */ 1248 uint32_t refcnt; /* Reference counter. */ 1249 uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */ 1250 }; 1251 1252 /* Jump action struct. */ 1253 struct mlx5_hw_jump_action { 1254 /* Action jump from root. */ 1255 struct mlx5dr_action *root_action; 1256 /* HW steering jump action. */ 1257 struct mlx5dr_action *hws_action; 1258 }; 1259 1260 /* Encap decap action struct. */ 1261 struct mlx5_hw_encap_decap_action { 1262 struct mlx5dr_action *action; /* Action object. */ 1263 /* Is header_reformat action shared across flows in table. */ 1264 bool shared; 1265 size_t data_size; /* Action metadata size. */ 1266 uint8_t data[]; /* Action data. */ 1267 }; 1268 1269 #define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1) 1270 1271 /* Modify field action struct. */ 1272 struct mlx5_hw_modify_header_action { 1273 /* Reference to DR action */ 1274 struct mlx5dr_action *action; 1275 /* Modify header action position in action rule table. */ 1276 uint16_t pos; 1277 /* Is MODIFY_HEADER action shared across flows in table. */ 1278 bool shared; 1279 /* Amount of modification commands stored in the precompiled buffer. */ 1280 uint32_t mhdr_cmds_num; 1281 /* Precompiled modification commands. */ 1282 struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD]; 1283 }; 1284 1285 /* The maximum actions support in the flow. */ 1286 #define MLX5_HW_MAX_ACTS 16 1287 1288 /* DR action set struct. */ 1289 struct mlx5_hw_actions { 1290 /* Dynamic action list. */ 1291 LIST_HEAD(act_list, mlx5_action_construct_data) act_list; 1292 struct mlx5_hw_jump_action *jump; /* Jump action. */ 1293 struct mlx5_hrxq *tir; /* TIR action. */ 1294 struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */ 1295 /* Encap/Decap action. */ 1296 struct mlx5_hw_encap_decap_action *encap_decap; 1297 uint16_t encap_decap_pos; /* Encap/Decap action position. */ 1298 uint32_t mark:1; /* Indicate the mark action. */ 1299 cnt_id_t cnt_id; /* Counter id. */ 1300 uint32_t mtr_id; /* Meter id. */ 1301 /* Translated DR action array from action template. */ 1302 struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS]; 1303 }; 1304 1305 /* mlx5 action template struct. */ 1306 struct mlx5_hw_action_template { 1307 /* Action template pointer. */ 1308 struct rte_flow_actions_template *action_template; 1309 struct mlx5_hw_actions acts; /* Template actions. */ 1310 }; 1311 1312 /* mlx5 flow group struct. */ 1313 struct mlx5_flow_group { 1314 struct mlx5_list_entry entry; 1315 struct rte_eth_dev *dev; /* Reference to corresponding device. */ 1316 struct mlx5dr_table *tbl; /* HWS table object. */ 1317 struct mlx5_hw_jump_action jump; /* Jump action. */ 1318 enum mlx5dr_table_type type; /* Table type. */ 1319 uint32_t group_id; /* Group id. */ 1320 uint32_t idx; /* Group memory index. */ 1321 }; 1322 1323 1324 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2 1325 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32 1326 1327 struct mlx5_flow_template_table_cfg { 1328 struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */ 1329 bool external; /* True if created by flow API, false if table is internal to PMD. */ 1330 }; 1331 1332 struct rte_flow_template_table { 1333 LIST_ENTRY(rte_flow_template_table) next; 1334 struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */ 1335 struct mlx5dr_matcher *matcher; /* Template matcher. */ 1336 /* Item templates bind to the table. */ 1337 struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE]; 1338 /* Action templates bind to the table. */ 1339 struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE]; 1340 struct mlx5_indexed_pool *flow; /* The table's flow ipool. */ 1341 struct mlx5_flow_template_table_cfg cfg; 1342 uint32_t type; /* Flow table type RX/TX/FDB. */ 1343 uint8_t nb_item_templates; /* Item template number. */ 1344 uint8_t nb_action_templates; /* Action template number. */ 1345 uint32_t refcnt; /* Table reference counter. */ 1346 }; 1347 1348 #endif 1349 1350 /* 1351 * Define list of valid combinations of RX Hash fields 1352 * (see enum ibv_rx_hash_fields). 1353 */ 1354 #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 1355 #define MLX5_RSS_HASH_IPV4_TCP \ 1356 (MLX5_RSS_HASH_IPV4 | \ 1357 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP) 1358 #define MLX5_RSS_HASH_IPV4_UDP \ 1359 (MLX5_RSS_HASH_IPV4 | \ 1360 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP) 1361 #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 1362 #define MLX5_RSS_HASH_IPV6_TCP \ 1363 (MLX5_RSS_HASH_IPV6 | \ 1364 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP) 1365 #define MLX5_RSS_HASH_IPV6_UDP \ 1366 (MLX5_RSS_HASH_IPV6 | \ 1367 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP) 1368 #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4 1369 #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4 1370 #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6 1371 #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6 1372 #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \ 1373 (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP) 1374 #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \ 1375 (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP) 1376 #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \ 1377 (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP) 1378 #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \ 1379 (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP) 1380 #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \ 1381 (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP) 1382 #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \ 1383 (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP) 1384 #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \ 1385 (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP) 1386 #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \ 1387 (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP) 1388 1389 #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI 1390 #define IBV_RX_HASH_IPSEC_SPI (1U << 8) 1391 #endif 1392 1393 #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI 1394 #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \ 1395 MLX5_RSS_HASH_ESP_SPI) 1396 #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \ 1397 MLX5_RSS_HASH_ESP_SPI) 1398 #define MLX5_RSS_HASH_NONE 0ULL 1399 1400 1401 /* extract next protocol type from Ethernet & VLAN headers */ 1402 #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \ 1403 (_prt) = ((const struct _s *)(_itm)->mask)->_m; \ 1404 (_prt) &= ((const struct _s *)(_itm)->spec)->_m; \ 1405 (_prt) = rte_be_to_cpu_16((_prt)); \ 1406 } while (0) 1407 1408 /* array of valid combinations of RX Hash fields for RSS */ 1409 static const uint64_t mlx5_rss_hash_fields[] = { 1410 MLX5_RSS_HASH_IPV4, 1411 MLX5_RSS_HASH_IPV4_TCP, 1412 MLX5_RSS_HASH_IPV4_UDP, 1413 MLX5_RSS_HASH_IPV4_ESP, 1414 MLX5_RSS_HASH_IPV6, 1415 MLX5_RSS_HASH_IPV6_TCP, 1416 MLX5_RSS_HASH_IPV6_UDP, 1417 MLX5_RSS_HASH_IPV6_ESP, 1418 MLX5_RSS_HASH_ESP_SPI, 1419 MLX5_RSS_HASH_NONE, 1420 }; 1421 1422 /* Shared RSS action structure */ 1423 struct mlx5_shared_action_rss { 1424 ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */ 1425 uint32_t refcnt; /**< Atomically accessed refcnt. */ 1426 struct rte_flow_action_rss origin; /**< Original rte RSS action. */ 1427 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ 1428 struct mlx5_ind_table_obj *ind_tbl; 1429 /**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */ 1430 uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN]; 1431 /**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */ 1432 rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */ 1433 }; 1434 1435 struct rte_flow_action_handle { 1436 uint32_t id; 1437 }; 1438 1439 /* Thread specific flow workspace intermediate data. */ 1440 struct mlx5_flow_workspace { 1441 /* If creating another flow in same thread, push new as stack. */ 1442 struct mlx5_flow_workspace *prev; 1443 struct mlx5_flow_workspace *next; 1444 uint32_t inuse; /* can't create new flow with current. */ 1445 struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS]; 1446 struct mlx5_flow_rss_desc rss_desc; 1447 uint32_t rssq_num; /* Allocated queue num in rss_desc. */ 1448 uint32_t flow_idx; /* Intermediate device flow index. */ 1449 struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ 1450 struct mlx5_flow_meter_policy *policy; 1451 /* The meter policy used by meter in flow. */ 1452 struct mlx5_flow_meter_policy *final_policy; 1453 /* The final policy when meter policy is hierarchy. */ 1454 uint32_t skip_matcher_reg:1; 1455 /* Indicates if need to skip matcher register in translate. */ 1456 uint32_t mark:1; /* Indicates if flow contains mark action. */ 1457 uint32_t vport_meta_tag; /* Used for vport index match. */ 1458 }; 1459 1460 /* Matcher translate type. */ 1461 enum MLX5_SET_MATCHER { 1462 MLX5_SET_MATCHER_SW_V = 1 << 0, 1463 MLX5_SET_MATCHER_SW_M = 1 << 1, 1464 MLX5_SET_MATCHER_HS_V = 1 << 2, 1465 MLX5_SET_MATCHER_HS_M = 1 << 3, 1466 }; 1467 1468 #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M) 1469 #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M) 1470 #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V) 1471 #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M) 1472 1473 /* Flow matcher workspace intermediate data. */ 1474 struct mlx5_dv_matcher_workspace { 1475 uint8_t priority; /* Flow priority. */ 1476 uint64_t last_item; /* Last item in pattern. */ 1477 uint64_t item_flags; /* Flow item pattern flags. */ 1478 uint64_t action_flags; /* Flow action flags. */ 1479 bool external; /* External flow or not. */ 1480 uint32_t vlan_tag:12; /* Flow item VLAN tag. */ 1481 uint8_t next_protocol; /* Tunnel next protocol */ 1482 uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */ 1483 uint32_t group; /* Flow group. */ 1484 uint16_t udp_dport; /* Flow item UDP port. */ 1485 const struct rte_flow_attr *attr; /* Flow attribute. */ 1486 struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */ 1487 const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */ 1488 const struct rte_flow_item *gre_item; /* Flow GRE item. */ 1489 const struct rte_flow_item *integrity_items[2]; 1490 }; 1491 1492 struct mlx5_flow_split_info { 1493 uint32_t external:1; 1494 /**< True if flow is created by request external to PMD. */ 1495 uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */ 1496 uint32_t skip_scale:8; /**< Skip the scale the table with factor. */ 1497 uint32_t flow_idx; /**< This memory pool index to the flow. */ 1498 uint32_t table_id; /**< Flow table identifier. */ 1499 uint64_t prefix_layers; /**< Prefix subflow layers. */ 1500 }; 1501 1502 struct flow_hw_port_info { 1503 uint32_t regc_mask; 1504 uint32_t regc_value; 1505 uint32_t is_wire:1; 1506 }; 1507 1508 extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS]; 1509 1510 #define MLX5_FLOW_HW_TAGS_MAX 8 1511 extern uint32_t mlx5_flow_hw_avl_tags_init_cnt; 1512 extern enum modify_reg mlx5_flow_hw_avl_tags[]; 1513 extern enum modify_reg mlx5_flow_hw_aso_tag; 1514 1515 /* 1516 * Get metadata match tag and mask for given rte_eth_dev port. 1517 * Used in HWS rule creation. 1518 */ 1519 static __rte_always_inline const struct flow_hw_port_info * 1520 flow_hw_conv_port_id(const uint16_t port_id) 1521 { 1522 struct flow_hw_port_info *port_info; 1523 1524 if (port_id >= RTE_MAX_ETHPORTS) 1525 return NULL; 1526 port_info = &mlx5_flow_hw_port_infos[port_id]; 1527 return !!port_info->regc_mask ? port_info : NULL; 1528 } 1529 1530 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1531 /* 1532 * Get metadata match tag and mask for the uplink port represented 1533 * by given IB context. Used in HWS context creation. 1534 */ 1535 static __rte_always_inline const struct flow_hw_port_info * 1536 flow_hw_get_wire_port(struct ibv_context *ibctx) 1537 { 1538 struct ibv_device *ibdev = ibctx->device; 1539 uint16_t port_id; 1540 1541 MLX5_ETH_FOREACH_DEV(port_id, NULL) { 1542 const struct mlx5_priv *priv = 1543 rte_eth_devices[port_id].data->dev_private; 1544 1545 if (priv && priv->master) { 1546 struct ibv_context *port_ibctx = priv->sh->cdev->ctx; 1547 1548 if (port_ibctx->device == ibdev) 1549 return flow_hw_conv_port_id(port_id); 1550 } 1551 } 1552 return NULL; 1553 } 1554 #endif 1555 1556 extern uint32_t mlx5_flow_hw_flow_metadata_config_refcnt; 1557 extern uint8_t mlx5_flow_hw_flow_metadata_esw_en; 1558 extern uint8_t mlx5_flow_hw_flow_metadata_xmeta_en; 1559 1560 void flow_hw_init_flow_metadata_config(struct rte_eth_dev *dev); 1561 void flow_hw_clear_flow_metadata_config(void); 1562 1563 /* 1564 * Convert metadata or tag to the actual register. 1565 * META: Can only be used to match in the FDB in this stage, fixed C_1. 1566 * TAG: C_x expect meter color reg and the reserved ones. 1567 * TODO: Per port / device, FDB or NIC for Meta matching. 1568 */ 1569 static __rte_always_inline int 1570 flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t id) 1571 { 1572 switch (type) { 1573 case RTE_FLOW_ITEM_TYPE_META: 1574 #ifdef HAVE_MLX5_HWS_SUPPORT 1575 if (mlx5_flow_hw_flow_metadata_esw_en && 1576 mlx5_flow_hw_flow_metadata_xmeta_en == MLX5_XMETA_MODE_META32_HWS) { 1577 return REG_C_1; 1578 } 1579 #endif 1580 /* 1581 * On root table - PMD allows only egress META matching, thus 1582 * REG_A matching is sufficient. 1583 * 1584 * On non-root tables - REG_A corresponds to general_purpose_lookup_field, 1585 * which translates to REG_A in NIC TX and to REG_B in NIC RX. 1586 * However, current FW does not implement REG_B case right now, so 1587 * REG_B case should be rejected on pattern template validation. 1588 */ 1589 return REG_A; 1590 case RTE_FLOW_ITEM_TYPE_CONNTRACK: 1591 case RTE_FLOW_ITEM_TYPE_METER_COLOR: 1592 return mlx5_flow_hw_aso_tag; 1593 case RTE_FLOW_ITEM_TYPE_TAG: 1594 MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX); 1595 return mlx5_flow_hw_avl_tags[id]; 1596 default: 1597 return REG_NON; 1598 } 1599 } 1600 1601 void flow_hw_set_port_info(struct rte_eth_dev *dev); 1602 void flow_hw_clear_port_info(struct rte_eth_dev *dev); 1603 1604 void flow_hw_init_tags_set(struct rte_eth_dev *dev); 1605 void flow_hw_clear_tags_set(struct rte_eth_dev *dev); 1606 1607 int flow_hw_create_vport_action(struct rte_eth_dev *dev); 1608 void flow_hw_destroy_vport_action(struct rte_eth_dev *dev); 1609 1610 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, 1611 const struct rte_flow_attr *attr, 1612 const struct rte_flow_item items[], 1613 const struct rte_flow_action actions[], 1614 bool external, 1615 int hairpin, 1616 struct rte_flow_error *error); 1617 typedef struct mlx5_flow *(*mlx5_flow_prepare_t) 1618 (struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 1619 const struct rte_flow_item items[], 1620 const struct rte_flow_action actions[], struct rte_flow_error *error); 1621 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev, 1622 struct mlx5_flow *dev_flow, 1623 const struct rte_flow_attr *attr, 1624 const struct rte_flow_item items[], 1625 const struct rte_flow_action actions[], 1626 struct rte_flow_error *error); 1627 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow, 1628 struct rte_flow_error *error); 1629 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev, 1630 struct rte_flow *flow); 1631 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev, 1632 struct rte_flow *flow); 1633 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev, 1634 struct rte_flow *flow, 1635 const struct rte_flow_action *actions, 1636 void *data, 1637 struct rte_flow_error *error); 1638 typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev, 1639 struct mlx5_flow_meter_info *fm, 1640 uint32_t mtr_idx, 1641 uint8_t domain_bitmap); 1642 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev, 1643 struct mlx5_flow_meter_info *fm); 1644 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev); 1645 typedef struct mlx5_flow_meter_sub_policy * 1646 (*mlx5_flow_meter_sub_policy_rss_prepare_t) 1647 (struct rte_eth_dev *dev, 1648 struct mlx5_flow_meter_policy *mtr_policy, 1649 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]); 1650 typedef int (*mlx5_flow_meter_hierarchy_rule_create_t) 1651 (struct rte_eth_dev *dev, 1652 struct mlx5_flow_meter_info *fm, 1653 int32_t src_port, 1654 const struct rte_flow_item *item, 1655 struct rte_flow_error *error); 1656 typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t) 1657 (struct rte_eth_dev *dev, 1658 struct mlx5_flow_meter_policy *mtr_policy); 1659 typedef uint32_t (*mlx5_flow_mtr_alloc_t) 1660 (struct rte_eth_dev *dev); 1661 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev, 1662 uint32_t mtr_idx); 1663 typedef uint32_t (*mlx5_flow_counter_alloc_t) 1664 (struct rte_eth_dev *dev); 1665 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev, 1666 uint32_t cnt); 1667 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev, 1668 uint32_t cnt, 1669 bool clear, uint64_t *pkts, 1670 uint64_t *bytes, void **action); 1671 typedef int (*mlx5_flow_get_aged_flows_t) 1672 (struct rte_eth_dev *dev, 1673 void **context, 1674 uint32_t nb_contexts, 1675 struct rte_flow_error *error); 1676 typedef int (*mlx5_flow_get_q_aged_flows_t) 1677 (struct rte_eth_dev *dev, 1678 uint32_t queue_id, 1679 void **context, 1680 uint32_t nb_contexts, 1681 struct rte_flow_error *error); 1682 typedef int (*mlx5_flow_action_validate_t) 1683 (struct rte_eth_dev *dev, 1684 const struct rte_flow_indir_action_conf *conf, 1685 const struct rte_flow_action *action, 1686 struct rte_flow_error *error); 1687 typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t) 1688 (struct rte_eth_dev *dev, 1689 const struct rte_flow_indir_action_conf *conf, 1690 const struct rte_flow_action *action, 1691 struct rte_flow_error *error); 1692 typedef int (*mlx5_flow_action_destroy_t) 1693 (struct rte_eth_dev *dev, 1694 struct rte_flow_action_handle *action, 1695 struct rte_flow_error *error); 1696 typedef int (*mlx5_flow_action_update_t) 1697 (struct rte_eth_dev *dev, 1698 struct rte_flow_action_handle *action, 1699 const void *update, 1700 struct rte_flow_error *error); 1701 typedef int (*mlx5_flow_action_query_t) 1702 (struct rte_eth_dev *dev, 1703 const struct rte_flow_action_handle *action, 1704 void *data, 1705 struct rte_flow_error *error); 1706 typedef int (*mlx5_flow_sync_domain_t) 1707 (struct rte_eth_dev *dev, 1708 uint32_t domains, 1709 uint32_t flags); 1710 typedef int (*mlx5_flow_validate_mtr_acts_t) 1711 (struct rte_eth_dev *dev, 1712 const struct rte_flow_action *actions[RTE_COLORS], 1713 struct rte_flow_attr *attr, 1714 bool *is_rss, 1715 uint8_t *domain_bitmap, 1716 uint8_t *policy_mode, 1717 struct rte_mtr_error *error); 1718 typedef int (*mlx5_flow_create_mtr_acts_t) 1719 (struct rte_eth_dev *dev, 1720 struct mlx5_flow_meter_policy *mtr_policy, 1721 const struct rte_flow_action *actions[RTE_COLORS], 1722 struct rte_flow_attr *attr, 1723 struct rte_mtr_error *error); 1724 typedef void (*mlx5_flow_destroy_mtr_acts_t) 1725 (struct rte_eth_dev *dev, 1726 struct mlx5_flow_meter_policy *mtr_policy); 1727 typedef int (*mlx5_flow_create_policy_rules_t) 1728 (struct rte_eth_dev *dev, 1729 struct mlx5_flow_meter_policy *mtr_policy); 1730 typedef void (*mlx5_flow_destroy_policy_rules_t) 1731 (struct rte_eth_dev *dev, 1732 struct mlx5_flow_meter_policy *mtr_policy); 1733 typedef int (*mlx5_flow_create_def_policy_t) 1734 (struct rte_eth_dev *dev); 1735 typedef void (*mlx5_flow_destroy_def_policy_t) 1736 (struct rte_eth_dev *dev); 1737 typedef int (*mlx5_flow_discover_priorities_t) 1738 (struct rte_eth_dev *dev, 1739 const uint16_t *vprio, int vprio_n); 1740 typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t) 1741 (struct rte_eth_dev *dev, 1742 const struct rte_flow_item_flex_conf *conf, 1743 struct rte_flow_error *error); 1744 typedef int (*mlx5_flow_item_release_t) 1745 (struct rte_eth_dev *dev, 1746 const struct rte_flow_item_flex_handle *handle, 1747 struct rte_flow_error *error); 1748 typedef int (*mlx5_flow_item_update_t) 1749 (struct rte_eth_dev *dev, 1750 const struct rte_flow_item_flex_handle *handle, 1751 const struct rte_flow_item_flex_conf *conf, 1752 struct rte_flow_error *error); 1753 typedef int (*mlx5_flow_info_get_t) 1754 (struct rte_eth_dev *dev, 1755 struct rte_flow_port_info *port_info, 1756 struct rte_flow_queue_info *queue_info, 1757 struct rte_flow_error *error); 1758 typedef int (*mlx5_flow_port_configure_t) 1759 (struct rte_eth_dev *dev, 1760 const struct rte_flow_port_attr *port_attr, 1761 uint16_t nb_queue, 1762 const struct rte_flow_queue_attr *queue_attr[], 1763 struct rte_flow_error *err); 1764 typedef int (*mlx5_flow_pattern_validate_t) 1765 (struct rte_eth_dev *dev, 1766 const struct rte_flow_pattern_template_attr *attr, 1767 const struct rte_flow_item items[], 1768 struct rte_flow_error *error); 1769 typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t) 1770 (struct rte_eth_dev *dev, 1771 const struct rte_flow_pattern_template_attr *attr, 1772 const struct rte_flow_item items[], 1773 struct rte_flow_error *error); 1774 typedef int (*mlx5_flow_pattern_template_destroy_t) 1775 (struct rte_eth_dev *dev, 1776 struct rte_flow_pattern_template *template, 1777 struct rte_flow_error *error); 1778 typedef int (*mlx5_flow_actions_validate_t) 1779 (struct rte_eth_dev *dev, 1780 const struct rte_flow_actions_template_attr *attr, 1781 const struct rte_flow_action actions[], 1782 const struct rte_flow_action masks[], 1783 struct rte_flow_error *error); 1784 typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t) 1785 (struct rte_eth_dev *dev, 1786 const struct rte_flow_actions_template_attr *attr, 1787 const struct rte_flow_action actions[], 1788 const struct rte_flow_action masks[], 1789 struct rte_flow_error *error); 1790 typedef int (*mlx5_flow_actions_template_destroy_t) 1791 (struct rte_eth_dev *dev, 1792 struct rte_flow_actions_template *template, 1793 struct rte_flow_error *error); 1794 typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t) 1795 (struct rte_eth_dev *dev, 1796 const struct rte_flow_template_table_attr *attr, 1797 struct rte_flow_pattern_template *item_templates[], 1798 uint8_t nb_item_templates, 1799 struct rte_flow_actions_template *action_templates[], 1800 uint8_t nb_action_templates, 1801 struct rte_flow_error *error); 1802 typedef int (*mlx5_flow_table_destroy_t) 1803 (struct rte_eth_dev *dev, 1804 struct rte_flow_template_table *table, 1805 struct rte_flow_error *error); 1806 typedef struct rte_flow *(*mlx5_flow_async_flow_create_t) 1807 (struct rte_eth_dev *dev, 1808 uint32_t queue, 1809 const struct rte_flow_op_attr *attr, 1810 struct rte_flow_template_table *table, 1811 const struct rte_flow_item items[], 1812 uint8_t pattern_template_index, 1813 const struct rte_flow_action actions[], 1814 uint8_t action_template_index, 1815 void *user_data, 1816 struct rte_flow_error *error); 1817 typedef int (*mlx5_flow_async_flow_destroy_t) 1818 (struct rte_eth_dev *dev, 1819 uint32_t queue, 1820 const struct rte_flow_op_attr *attr, 1821 struct rte_flow *flow, 1822 void *user_data, 1823 struct rte_flow_error *error); 1824 typedef int (*mlx5_flow_pull_t) 1825 (struct rte_eth_dev *dev, 1826 uint32_t queue, 1827 struct rte_flow_op_result res[], 1828 uint16_t n_res, 1829 struct rte_flow_error *error); 1830 typedef int (*mlx5_flow_push_t) 1831 (struct rte_eth_dev *dev, 1832 uint32_t queue, 1833 struct rte_flow_error *error); 1834 1835 typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t) 1836 (struct rte_eth_dev *dev, 1837 uint32_t queue, 1838 const struct rte_flow_op_attr *attr, 1839 const struct rte_flow_indir_action_conf *conf, 1840 const struct rte_flow_action *action, 1841 void *user_data, 1842 struct rte_flow_error *error); 1843 1844 typedef int (*mlx5_flow_async_action_handle_update_t) 1845 (struct rte_eth_dev *dev, 1846 uint32_t queue, 1847 const struct rte_flow_op_attr *attr, 1848 struct rte_flow_action_handle *handle, 1849 const void *update, 1850 void *user_data, 1851 struct rte_flow_error *error); 1852 1853 typedef int (*mlx5_flow_async_action_handle_query_t) 1854 (struct rte_eth_dev *dev, 1855 uint32_t queue, 1856 const struct rte_flow_op_attr *attr, 1857 const struct rte_flow_action_handle *handle, 1858 void *data, 1859 void *user_data, 1860 struct rte_flow_error *error); 1861 1862 typedef int (*mlx5_flow_async_action_handle_destroy_t) 1863 (struct rte_eth_dev *dev, 1864 uint32_t queue, 1865 const struct rte_flow_op_attr *attr, 1866 struct rte_flow_action_handle *handle, 1867 void *user_data, 1868 struct rte_flow_error *error); 1869 1870 struct mlx5_flow_driver_ops { 1871 mlx5_flow_validate_t validate; 1872 mlx5_flow_prepare_t prepare; 1873 mlx5_flow_translate_t translate; 1874 mlx5_flow_apply_t apply; 1875 mlx5_flow_remove_t remove; 1876 mlx5_flow_destroy_t destroy; 1877 mlx5_flow_query_t query; 1878 mlx5_flow_create_mtr_tbls_t create_mtr_tbls; 1879 mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls; 1880 mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls; 1881 mlx5_flow_mtr_alloc_t create_meter; 1882 mlx5_flow_mtr_free_t free_meter; 1883 mlx5_flow_validate_mtr_acts_t validate_mtr_acts; 1884 mlx5_flow_create_mtr_acts_t create_mtr_acts; 1885 mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts; 1886 mlx5_flow_create_policy_rules_t create_policy_rules; 1887 mlx5_flow_destroy_policy_rules_t destroy_policy_rules; 1888 mlx5_flow_create_def_policy_t create_def_policy; 1889 mlx5_flow_destroy_def_policy_t destroy_def_policy; 1890 mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare; 1891 mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create; 1892 mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq; 1893 mlx5_flow_counter_alloc_t counter_alloc; 1894 mlx5_flow_counter_free_t counter_free; 1895 mlx5_flow_counter_query_t counter_query; 1896 mlx5_flow_get_aged_flows_t get_aged_flows; 1897 mlx5_flow_get_q_aged_flows_t get_q_aged_flows; 1898 mlx5_flow_action_validate_t action_validate; 1899 mlx5_flow_action_create_t action_create; 1900 mlx5_flow_action_destroy_t action_destroy; 1901 mlx5_flow_action_update_t action_update; 1902 mlx5_flow_action_query_t action_query; 1903 mlx5_flow_sync_domain_t sync_domain; 1904 mlx5_flow_discover_priorities_t discover_priorities; 1905 mlx5_flow_item_create_t item_create; 1906 mlx5_flow_item_release_t item_release; 1907 mlx5_flow_item_update_t item_update; 1908 mlx5_flow_info_get_t info_get; 1909 mlx5_flow_port_configure_t configure; 1910 mlx5_flow_pattern_validate_t pattern_validate; 1911 mlx5_flow_pattern_template_create_t pattern_template_create; 1912 mlx5_flow_pattern_template_destroy_t pattern_template_destroy; 1913 mlx5_flow_actions_validate_t actions_validate; 1914 mlx5_flow_actions_template_create_t actions_template_create; 1915 mlx5_flow_actions_template_destroy_t actions_template_destroy; 1916 mlx5_flow_table_create_t template_table_create; 1917 mlx5_flow_table_destroy_t template_table_destroy; 1918 mlx5_flow_async_flow_create_t async_flow_create; 1919 mlx5_flow_async_flow_destroy_t async_flow_destroy; 1920 mlx5_flow_pull_t pull; 1921 mlx5_flow_push_t push; 1922 mlx5_flow_async_action_handle_create_t async_action_create; 1923 mlx5_flow_async_action_handle_update_t async_action_update; 1924 mlx5_flow_async_action_handle_query_t async_action_query; 1925 mlx5_flow_async_action_handle_destroy_t async_action_destroy; 1926 }; 1927 1928 /* mlx5_flow.c */ 1929 1930 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); 1931 void mlx5_flow_pop_thread_workspace(void); 1932 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); 1933 __extension__ 1934 struct flow_grp_info { 1935 uint64_t external:1; 1936 uint64_t transfer:1; 1937 uint64_t fdb_def_rule:1; 1938 /* force standard group translation */ 1939 uint64_t std_tbl_fix:1; 1940 uint64_t skip_scale:2; 1941 }; 1942 1943 static inline bool 1944 tunnel_use_standard_attr_group_translate 1945 (const struct rte_eth_dev *dev, 1946 const struct rte_flow_attr *attr, 1947 const struct mlx5_flow_tunnel *tunnel, 1948 enum mlx5_tof_rule_type tof_rule_type) 1949 { 1950 bool verdict; 1951 1952 if (!is_tunnel_offload_active(dev)) 1953 /* no tunnel offload API */ 1954 verdict = true; 1955 else if (tunnel) { 1956 /* 1957 * OvS will use jump to group 0 in tunnel steer rule. 1958 * If tunnel steer rule starts from group 0 (attr.group == 0) 1959 * that 0 group must be translated with standard method. 1960 * attr.group == 0 in tunnel match rule translated with tunnel 1961 * method 1962 */ 1963 verdict = !attr->group && 1964 is_flow_tunnel_steer_rule(tof_rule_type); 1965 } else { 1966 /* 1967 * non-tunnel group translation uses standard method for 1968 * root group only: attr.group == 0 1969 */ 1970 verdict = !attr->group; 1971 } 1972 1973 return verdict; 1974 } 1975 1976 /** 1977 * Get DV flow aso meter by index. 1978 * 1979 * @param[in] dev 1980 * Pointer to the Ethernet device structure. 1981 * @param[in] idx 1982 * mlx5 flow aso meter index in the container. 1983 * @param[out] ppool 1984 * mlx5 flow aso meter pool in the container, 1985 * 1986 * @return 1987 * Pointer to the aso meter, NULL otherwise. 1988 */ 1989 static inline struct mlx5_aso_mtr * 1990 mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx) 1991 { 1992 struct mlx5_aso_mtr_pool *pool; 1993 struct mlx5_aso_mtr_pools_mng *pools_mng = 1994 &priv->sh->mtrmng->pools_mng; 1995 1996 if (priv->mtr_bulk.aso) 1997 return priv->mtr_bulk.aso + idx; 1998 /* Decrease to original index. */ 1999 idx--; 2000 MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n); 2001 rte_rwlock_read_lock(&pools_mng->resize_mtrwl); 2002 pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL]; 2003 rte_rwlock_read_unlock(&pools_mng->resize_mtrwl); 2004 return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL]; 2005 } 2006 2007 static __rte_always_inline const struct rte_flow_item * 2008 mlx5_find_end_item(const struct rte_flow_item *item) 2009 { 2010 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++); 2011 return item; 2012 } 2013 2014 static __rte_always_inline bool 2015 mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item) 2016 { 2017 struct rte_flow_item_integrity test = *item; 2018 test.l3_ok = 0; 2019 test.l4_ok = 0; 2020 test.ipv4_csum_ok = 0; 2021 test.l4_csum_ok = 0; 2022 return (test.value == 0); 2023 } 2024 2025 /* 2026 * Get ASO CT action by device and index. 2027 * 2028 * @param[in] dev 2029 * Pointer to the Ethernet device structure. 2030 * @param[in] idx 2031 * Index to the ASO CT action. 2032 * 2033 * @return 2034 * The specified ASO CT action pointer. 2035 */ 2036 static inline struct mlx5_aso_ct_action * 2037 flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx) 2038 { 2039 struct mlx5_priv *priv = dev->data->dev_private; 2040 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; 2041 struct mlx5_aso_ct_pool *pool; 2042 2043 idx--; 2044 MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n); 2045 /* Bit operation AND could be used. */ 2046 rte_rwlock_read_lock(&mng->resize_rwl); 2047 pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL]; 2048 rte_rwlock_read_unlock(&mng->resize_rwl); 2049 return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL]; 2050 } 2051 2052 /* 2053 * Get ASO CT action by owner & index. 2054 * 2055 * @param[in] dev 2056 * Pointer to the Ethernet device structure. 2057 * @param[in] idx 2058 * Index to the ASO CT action and owner port combination. 2059 * 2060 * @return 2061 * The specified ASO CT action pointer. 2062 */ 2063 static inline struct mlx5_aso_ct_action * 2064 flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx) 2065 { 2066 struct mlx5_priv *priv = dev->data->dev_private; 2067 struct mlx5_aso_ct_action *ct; 2068 uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx); 2069 uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx); 2070 2071 if (owner == PORT_ID(priv)) { 2072 ct = flow_aso_ct_get_by_dev_idx(dev, idx); 2073 } else { 2074 struct rte_eth_dev *owndev = &rte_eth_devices[owner]; 2075 2076 MLX5_ASSERT(owner < RTE_MAX_ETHPORTS); 2077 if (dev->data->dev_started != 1) 2078 return NULL; 2079 ct = flow_aso_ct_get_by_dev_idx(owndev, idx); 2080 if (ct->peer != PORT_ID(priv)) 2081 return NULL; 2082 } 2083 return ct; 2084 } 2085 2086 static inline uint16_t 2087 mlx5_translate_tunnel_etypes(uint64_t pattern_flags) 2088 { 2089 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) 2090 return RTE_ETHER_TYPE_TEB; 2091 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) 2092 return RTE_ETHER_TYPE_IPV4; 2093 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) 2094 return RTE_ETHER_TYPE_IPV6; 2095 else if (pattern_flags & MLX5_FLOW_LAYER_MPLS) 2096 return RTE_ETHER_TYPE_MPLS; 2097 return 0; 2098 } 2099 2100 int flow_hw_q_flow_flush(struct rte_eth_dev *dev, 2101 struct rte_flow_error *error); 2102 2103 /* 2104 * Convert rte_mtr_color to mlx5 color. 2105 * 2106 * @param[in] rcol 2107 * rte_mtr_color. 2108 * 2109 * @return 2110 * mlx5 color. 2111 */ 2112 static inline int 2113 rte_col_2_mlx5_col(enum rte_color rcol) 2114 { 2115 switch (rcol) { 2116 case RTE_COLOR_GREEN: 2117 return MLX5_FLOW_COLOR_GREEN; 2118 case RTE_COLOR_YELLOW: 2119 return MLX5_FLOW_COLOR_YELLOW; 2120 case RTE_COLOR_RED: 2121 return MLX5_FLOW_COLOR_RED; 2122 default: 2123 break; 2124 } 2125 return MLX5_FLOW_COLOR_UNDEFINED; 2126 } 2127 2128 /** 2129 * Indicates whether flow source vport is representor port. 2130 * 2131 * @param[in] priv 2132 * Pointer to device private context structure. 2133 * @param[in] act_priv 2134 * Pointer to actual device private context structure if have. 2135 * 2136 * @return 2137 * True when the flow source vport is representor port, false otherwise. 2138 */ 2139 static inline bool 2140 flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv) 2141 { 2142 MLX5_ASSERT(priv); 2143 return (!act_priv ? (priv->representor_id != UINT16_MAX) : 2144 (act_priv->representor_id != UINT16_MAX)); 2145 } 2146 2147 /* All types of Ethernet patterns used in control flow rules. */ 2148 enum mlx5_flow_ctrl_rx_eth_pattern_type { 2149 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0, 2150 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST, 2151 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST, 2152 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN, 2153 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST, 2154 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN, 2155 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST, 2156 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN, 2157 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC, 2158 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN, 2159 MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX, 2160 }; 2161 2162 /* All types of RSS actions used in control flow rules. */ 2163 enum mlx5_flow_ctrl_rx_expanded_rss_type { 2164 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0, 2165 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4, 2166 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP, 2167 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP, 2168 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6, 2169 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP, 2170 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP, 2171 MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX, 2172 }; 2173 2174 /** 2175 * Contains pattern template, template table and its attributes for a single 2176 * combination of Ethernet pattern and RSS action. Used to create control flow rules 2177 * with HWS. 2178 */ 2179 struct mlx5_flow_hw_ctrl_rx_table { 2180 struct rte_flow_template_table_attr attr; 2181 struct rte_flow_pattern_template *pt; 2182 struct rte_flow_template_table *tbl; 2183 }; 2184 2185 /* Contains all templates required to create control flow rules with HWS. */ 2186 struct mlx5_flow_hw_ctrl_rx { 2187 struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; 2188 struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] 2189 [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; 2190 }; 2191 2192 #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) 2193 #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) 2194 #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) 2195 #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3)) 2196 #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4)) 2197 #define MLX5_CTRL_DMAC (RTE_BIT32(5)) 2198 #define MLX5_CTRL_VLAN_FILTER (RTE_BIT32(6)) 2199 2200 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags); 2201 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev); 2202 2203 int mlx5_flow_group_to_table(struct rte_eth_dev *dev, 2204 const struct mlx5_flow_tunnel *tunnel, 2205 uint32_t group, uint32_t *table, 2206 const struct flow_grp_info *flags, 2207 struct rte_flow_error *error); 2208 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, 2209 int tunnel, uint64_t layer_types, 2210 uint64_t hash_fields); 2211 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); 2212 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 2213 uint32_t subpriority); 2214 uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev, 2215 const struct rte_flow_attr *attr); 2216 uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev, 2217 const struct rte_flow_attr *attr, 2218 uint32_t subpriority, bool external); 2219 uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev); 2220 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 2221 enum mlx5_feature_name feature, 2222 uint32_t id, 2223 struct rte_flow_error *error); 2224 const struct rte_flow_action *mlx5_flow_find_action 2225 (const struct rte_flow_action *actions, 2226 enum rte_flow_action_type action); 2227 int mlx5_validate_action_rss(struct rte_eth_dev *dev, 2228 const struct rte_flow_action *action, 2229 struct rte_flow_error *error); 2230 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, 2231 const struct rte_flow_attr *attr, 2232 struct rte_flow_error *error); 2233 int mlx5_flow_validate_action_drop(uint64_t action_flags, 2234 const struct rte_flow_attr *attr, 2235 struct rte_flow_error *error); 2236 int mlx5_flow_validate_action_flag(uint64_t action_flags, 2237 const struct rte_flow_attr *attr, 2238 struct rte_flow_error *error); 2239 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 2240 uint64_t action_flags, 2241 const struct rte_flow_attr *attr, 2242 struct rte_flow_error *error); 2243 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 2244 uint64_t action_flags, 2245 struct rte_eth_dev *dev, 2246 const struct rte_flow_attr *attr, 2247 struct rte_flow_error *error); 2248 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 2249 uint64_t action_flags, 2250 struct rte_eth_dev *dev, 2251 const struct rte_flow_attr *attr, 2252 uint64_t item_flags, 2253 struct rte_flow_error *error); 2254 int mlx5_flow_validate_action_default_miss(uint64_t action_flags, 2255 const struct rte_flow_attr *attr, 2256 struct rte_flow_error *error); 2257 int mlx5_flow_item_acceptable(const struct rte_flow_item *item, 2258 const uint8_t *mask, 2259 const uint8_t *nic_mask, 2260 unsigned int size, 2261 bool range_accepted, 2262 struct rte_flow_error *error); 2263 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 2264 uint64_t item_flags, bool ext_vlan_sup, 2265 struct rte_flow_error *error); 2266 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 2267 uint64_t item_flags, 2268 uint8_t target_protocol, 2269 struct rte_flow_error *error); 2270 int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 2271 uint64_t item_flags, 2272 const struct rte_flow_item *gre_item, 2273 struct rte_flow_error *error); 2274 int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev, 2275 const struct rte_flow_item *item, 2276 uint64_t item_flags, 2277 const struct rte_flow_attr *attr, 2278 const struct rte_flow_item *gre_item, 2279 struct rte_flow_error *error); 2280 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 2281 uint64_t item_flags, 2282 uint64_t last_item, 2283 uint16_t ether_type, 2284 const struct rte_flow_item_ipv4 *acc_mask, 2285 bool range_accepted, 2286 struct rte_flow_error *error); 2287 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 2288 uint64_t item_flags, 2289 uint64_t last_item, 2290 uint16_t ether_type, 2291 const struct rte_flow_item_ipv6 *acc_mask, 2292 struct rte_flow_error *error); 2293 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, 2294 const struct rte_flow_item *item, 2295 uint64_t item_flags, 2296 uint64_t prev_layer, 2297 struct rte_flow_error *error); 2298 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 2299 uint64_t item_flags, 2300 uint8_t target_protocol, 2301 const struct rte_flow_item_tcp *flow_mask, 2302 struct rte_flow_error *error); 2303 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 2304 uint64_t item_flags, 2305 uint8_t target_protocol, 2306 struct rte_flow_error *error); 2307 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 2308 uint64_t item_flags, 2309 struct rte_eth_dev *dev, 2310 struct rte_flow_error *error); 2311 int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, 2312 uint16_t udp_dport, 2313 const struct rte_flow_item *item, 2314 uint64_t item_flags, 2315 bool root, 2316 struct rte_flow_error *error); 2317 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 2318 uint64_t item_flags, 2319 struct rte_eth_dev *dev, 2320 struct rte_flow_error *error); 2321 int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 2322 uint64_t item_flags, 2323 uint8_t target_protocol, 2324 struct rte_flow_error *error); 2325 int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 2326 uint64_t item_flags, 2327 uint8_t target_protocol, 2328 struct rte_flow_error *error); 2329 int mlx5_flow_validate_item_icmp6_echo(const struct rte_flow_item *item, 2330 uint64_t item_flags, 2331 uint8_t target_protocol, 2332 struct rte_flow_error *error); 2333 int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 2334 uint64_t item_flags, 2335 uint8_t target_protocol, 2336 struct rte_flow_error *error); 2337 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 2338 uint64_t item_flags, 2339 struct rte_eth_dev *dev, 2340 struct rte_flow_error *error); 2341 int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, 2342 uint64_t last_item, 2343 const struct rte_flow_item *geneve_item, 2344 struct rte_eth_dev *dev, 2345 struct rte_flow_error *error); 2346 int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item, 2347 uint64_t item_flags, 2348 uint64_t last_item, 2349 uint16_t ether_type, 2350 const struct rte_flow_item_ecpri *acc_mask, 2351 struct rte_flow_error *error); 2352 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 2353 struct mlx5_flow_meter_info *fm, 2354 uint32_t mtr_idx, 2355 uint8_t domain_bitmap); 2356 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 2357 struct mlx5_flow_meter_info *fm); 2358 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev); 2359 struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare 2360 (struct rte_eth_dev *dev, 2361 struct mlx5_flow_meter_policy *mtr_policy, 2362 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]); 2363 void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, 2364 struct mlx5_flow_meter_policy *mtr_policy); 2365 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev); 2366 int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev); 2367 int mlx5_action_handle_attach(struct rte_eth_dev *dev); 2368 int mlx5_action_handle_detach(struct rte_eth_dev *dev); 2369 int mlx5_action_handle_flush(struct rte_eth_dev *dev); 2370 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id); 2371 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh); 2372 2373 struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx); 2374 int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2375 void *cb_ctx); 2376 void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2377 struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx, 2378 struct mlx5_list_entry *oentry, 2379 void *entry_ctx); 2380 void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2381 struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev, 2382 uint32_t table_level, uint8_t egress, uint8_t transfer, 2383 bool external, const struct mlx5_flow_tunnel *tunnel, 2384 uint32_t group_id, uint8_t dummy, 2385 uint32_t table_id, struct rte_flow_error *error); 2386 int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, 2387 struct mlx5_flow_tbl_resource *tbl); 2388 2389 struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx); 2390 int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2391 void *cb_ctx); 2392 void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2393 struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx, 2394 struct mlx5_list_entry *oentry, 2395 void *cb_ctx); 2396 void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2397 2398 int flow_dv_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2399 void *cb_ctx); 2400 struct mlx5_list_entry *flow_dv_modify_create_cb(void *tool_ctx, void *ctx); 2401 void flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2402 struct mlx5_list_entry *flow_dv_modify_clone_cb(void *tool_ctx, 2403 struct mlx5_list_entry *oentry, 2404 void *ctx); 2405 void flow_dv_modify_clone_free_cb(void *tool_ctx, 2406 struct mlx5_list_entry *entry); 2407 2408 struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx); 2409 int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2410 void *cb_ctx); 2411 void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2412 struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx, 2413 struct mlx5_list_entry *entry, 2414 void *ctx); 2415 void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2416 2417 int flow_dv_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2418 void *cb_ctx); 2419 struct mlx5_list_entry *flow_dv_encap_decap_create_cb(void *tool_ctx, 2420 void *cb_ctx); 2421 void flow_dv_encap_decap_remove_cb(void *tool_ctx, 2422 struct mlx5_list_entry *entry); 2423 struct mlx5_list_entry *flow_dv_encap_decap_clone_cb(void *tool_ctx, 2424 struct mlx5_list_entry *entry, 2425 void *cb_ctx); 2426 void flow_dv_encap_decap_clone_free_cb(void *tool_ctx, 2427 struct mlx5_list_entry *entry); 2428 2429 int flow_dv_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2430 void *ctx); 2431 struct mlx5_list_entry *flow_dv_matcher_create_cb(void *tool_ctx, void *ctx); 2432 void flow_dv_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2433 2434 int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2435 void *cb_ctx); 2436 struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx); 2437 void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2438 struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx, 2439 struct mlx5_list_entry *entry, void *cb_ctx); 2440 void flow_dv_port_id_clone_free_cb(void *tool_ctx, 2441 struct mlx5_list_entry *entry); 2442 2443 int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2444 void *cb_ctx); 2445 struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx, 2446 void *cb_ctx); 2447 void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2448 struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx, 2449 struct mlx5_list_entry *entry, void *cb_ctx); 2450 void flow_dv_push_vlan_clone_free_cb(void *tool_ctx, 2451 struct mlx5_list_entry *entry); 2452 2453 int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2454 void *cb_ctx); 2455 struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx); 2456 void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2457 struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx, 2458 struct mlx5_list_entry *entry, void *cb_ctx); 2459 void flow_dv_sample_clone_free_cb(void *tool_ctx, 2460 struct mlx5_list_entry *entry); 2461 2462 int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2463 void *cb_ctx); 2464 struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx, 2465 void *cb_ctx); 2466 void flow_dv_dest_array_remove_cb(void *tool_ctx, 2467 struct mlx5_list_entry *entry); 2468 struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx, 2469 struct mlx5_list_entry *entry, void *cb_ctx); 2470 void flow_dv_dest_array_clone_free_cb(void *tool_ctx, 2471 struct mlx5_list_entry *entry); 2472 void flow_dv_hashfields_set(uint64_t item_flags, 2473 struct mlx5_flow_rss_desc *rss_desc, 2474 uint64_t *hash_fields); 2475 void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types, 2476 uint64_t *hash_field); 2477 uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, 2478 const uint64_t hash_fields); 2479 2480 struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx); 2481 void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2482 int flow_hw_grp_match_cb(void *tool_ctx, 2483 struct mlx5_list_entry *entry, 2484 void *cb_ctx); 2485 struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx, 2486 struct mlx5_list_entry *oentry, 2487 void *cb_ctx); 2488 void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 2489 2490 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev, 2491 uint32_t age_idx); 2492 int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, 2493 const struct rte_flow_item *item, 2494 struct rte_flow_error *error); 2495 void flow_dev_geneve_tlv_option_resource_release(struct mlx5_dev_ctx_shared *sh); 2496 2497 void flow_release_workspace(void *data); 2498 int mlx5_flow_os_init_workspace_once(void); 2499 void *mlx5_flow_os_get_specific_workspace(void); 2500 int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data); 2501 void mlx5_flow_os_release_workspace(void); 2502 uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev); 2503 void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx); 2504 int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, 2505 const struct rte_flow_action *actions[RTE_COLORS], 2506 struct rte_flow_attr *attr, 2507 bool *is_rss, 2508 uint8_t *domain_bitmap, 2509 uint8_t *policy_mode, 2510 struct rte_mtr_error *error); 2511 void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev, 2512 struct mlx5_flow_meter_policy *mtr_policy); 2513 int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev, 2514 struct mlx5_flow_meter_policy *mtr_policy, 2515 const struct rte_flow_action *actions[RTE_COLORS], 2516 struct rte_flow_attr *attr, 2517 struct rte_mtr_error *error); 2518 int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev, 2519 struct mlx5_flow_meter_policy *mtr_policy); 2520 void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev, 2521 struct mlx5_flow_meter_policy *mtr_policy); 2522 int mlx5_flow_create_def_policy(struct rte_eth_dev *dev); 2523 void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev); 2524 void flow_drv_rxq_flags_set(struct rte_eth_dev *dev, 2525 struct mlx5_flow_handle *dev_handle); 2526 const struct mlx5_flow_tunnel * 2527 mlx5_get_tof(const struct rte_flow_item *items, 2528 const struct rte_flow_action *actions, 2529 enum mlx5_tof_rule_type *rule_type); 2530 void 2531 flow_hw_resource_release(struct rte_eth_dev *dev); 2532 void 2533 flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable); 2534 int flow_dv_action_validate(struct rte_eth_dev *dev, 2535 const struct rte_flow_indir_action_conf *conf, 2536 const struct rte_flow_action *action, 2537 struct rte_flow_error *err); 2538 struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev, 2539 const struct rte_flow_indir_action_conf *conf, 2540 const struct rte_flow_action *action, 2541 struct rte_flow_error *err); 2542 int flow_dv_action_destroy(struct rte_eth_dev *dev, 2543 struct rte_flow_action_handle *handle, 2544 struct rte_flow_error *error); 2545 int flow_dv_action_update(struct rte_eth_dev *dev, 2546 struct rte_flow_action_handle *handle, 2547 const void *update, 2548 struct rte_flow_error *err); 2549 int flow_dv_action_query(struct rte_eth_dev *dev, 2550 const struct rte_flow_action_handle *handle, 2551 void *data, 2552 struct rte_flow_error *error); 2553 size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type); 2554 int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, 2555 size_t *size, struct rte_flow_error *error); 2556 void mlx5_flow_field_id_to_modify_info 2557 (const struct rte_flow_action_modify_data *data, 2558 struct field_modify_info *info, uint32_t *mask, 2559 uint32_t width, struct rte_eth_dev *dev, 2560 const struct rte_flow_attr *attr, struct rte_flow_error *error); 2561 int flow_dv_convert_modify_action(struct rte_flow_item *item, 2562 struct field_modify_info *field, 2563 struct field_modify_info *dcopy, 2564 struct mlx5_flow_dv_modify_hdr_resource *resource, 2565 uint32_t type, struct rte_flow_error *error); 2566 2567 #define MLX5_PF_VPORT_ID 0 2568 #define MLX5_ECPF_VPORT_ID 0xFFFE 2569 2570 int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev); 2571 int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev, 2572 const struct rte_flow_item *item, 2573 uint16_t *vport_id, 2574 bool *all_ports, 2575 struct rte_flow_error *error); 2576 2577 int flow_dv_translate_items_hws(const struct rte_flow_item *items, 2578 struct mlx5_flow_attr *attr, void *key, 2579 uint32_t key_type, uint64_t *item_flags, 2580 uint8_t *match_criteria, 2581 struct rte_flow_error *error); 2582 2583 int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, 2584 uint16_t *proxy_port_id, 2585 struct rte_flow_error *error); 2586 int flow_null_get_aged_flows(struct rte_eth_dev *dev, 2587 void **context, 2588 uint32_t nb_contexts, 2589 struct rte_flow_error *error); 2590 uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev); 2591 void flow_null_counter_free(struct rte_eth_dev *dev, 2592 uint32_t counter); 2593 int flow_null_counter_query(struct rte_eth_dev *dev, 2594 uint32_t counter, 2595 bool clear, 2596 uint64_t *pkts, 2597 uint64_t *bytes, 2598 void **action); 2599 2600 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev); 2601 2602 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, 2603 uint32_t sqn); 2604 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev); 2605 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev); 2606 int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn); 2607 int mlx5_flow_actions_validate(struct rte_eth_dev *dev, 2608 const struct rte_flow_actions_template_attr *attr, 2609 const struct rte_flow_action actions[], 2610 const struct rte_flow_action masks[], 2611 struct rte_flow_error *error); 2612 int mlx5_flow_pattern_validate(struct rte_eth_dev *dev, 2613 const struct rte_flow_pattern_template_attr *attr, 2614 const struct rte_flow_item items[], 2615 struct rte_flow_error *error); 2616 int flow_hw_table_update(struct rte_eth_dev *dev, 2617 struct rte_flow_error *error); 2618 int mlx5_flow_item_field_width(struct rte_eth_dev *dev, 2619 enum rte_flow_field_id field, int inherit, 2620 const struct rte_flow_attr *attr, 2621 struct rte_flow_error *error); 2622 2623 static __rte_always_inline int 2624 flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused) 2625 { 2626 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 2627 uint16_t port; 2628 2629 MLX5_ETH_FOREACH_DEV(port, NULL) { 2630 struct mlx5_priv *priv; 2631 struct mlx5_hca_flex_attr *attr; 2632 2633 priv = rte_eth_devices[port].data->dev_private; 2634 attr = &priv->sh->cdev->config.hca_attr.flex; 2635 if (priv->dr_ctx == dr_ctx && attr->ext_sample_id) { 2636 if (priv->sh->srh_flex_parser.num) 2637 return priv->sh->srh_flex_parser.ids[0].format_select_dw * 2638 sizeof(uint32_t); 2639 else 2640 return UINT32_MAX; 2641 } 2642 } 2643 #endif 2644 return UINT32_MAX; 2645 } 2646 #endif /* RTE_PMD_MLX5_FLOW_H_ */ 2647