1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_FLOW_H_ 6 #define RTE_PMD_MLX5_FLOW_H_ 7 8 #include <netinet/in.h> 9 #include <sys/queue.h> 10 #include <stdalign.h> 11 #include <stdint.h> 12 #include <string.h> 13 14 /* Verbs header. */ 15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 16 #ifdef PEDANTIC 17 #pragma GCC diagnostic ignored "-Wpedantic" 18 #endif 19 #include <infiniband/verbs.h> 20 #ifdef PEDANTIC 21 #pragma GCC diagnostic error "-Wpedantic" 22 #endif 23 24 #include <rte_atomic.h> 25 #include <rte_alarm.h> 26 #include <rte_mtr.h> 27 28 #include <mlx5_prm.h> 29 30 #include "mlx5.h" 31 32 /* Private rte flow items. */ 33 enum mlx5_rte_flow_item_type { 34 MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN, 35 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 36 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 37 MLX5_RTE_FLOW_ITEM_TYPE_VLAN, 38 }; 39 40 /* Private (internal) rte flow actions. */ 41 enum mlx5_rte_flow_action_type { 42 MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN, 43 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 44 MLX5_RTE_FLOW_ACTION_TYPE_MARK, 45 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 46 }; 47 48 /* Matches on selected register. */ 49 struct mlx5_rte_flow_item_tag { 50 enum modify_reg id; 51 uint32_t data; 52 }; 53 54 /* Modify selected register. */ 55 struct mlx5_rte_flow_action_set_tag { 56 enum modify_reg id; 57 uint32_t data; 58 }; 59 60 struct mlx5_flow_action_copy_mreg { 61 enum modify_reg dst; 62 enum modify_reg src; 63 }; 64 65 /* Matches on source queue. */ 66 struct mlx5_rte_flow_item_tx_queue { 67 uint32_t queue; 68 }; 69 70 /* Feature name to allocate metadata register. */ 71 enum mlx5_feature_name { 72 MLX5_HAIRPIN_RX, 73 MLX5_HAIRPIN_TX, 74 MLX5_METADATA_RX, 75 MLX5_METADATA_TX, 76 MLX5_METADATA_FDB, 77 MLX5_FLOW_MARK, 78 MLX5_APP_TAG, 79 MLX5_COPY_MARK, 80 MLX5_MTR_COLOR, 81 MLX5_MTR_SFX, 82 }; 83 84 /* Pattern outer Layer bits. */ 85 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) 86 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) 87 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) 88 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) 89 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) 90 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) 91 92 /* Pattern inner Layer bits. */ 93 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) 94 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) 95 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) 96 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) 97 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) 98 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) 99 100 /* Pattern tunnel Layer bits. */ 101 #define MLX5_FLOW_LAYER_VXLAN (1u << 12) 102 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) 103 #define MLX5_FLOW_LAYER_GRE (1u << 14) 104 #define MLX5_FLOW_LAYER_MPLS (1u << 15) 105 /* List of tunnel Layer bits continued below. */ 106 107 /* General pattern items bits. */ 108 #define MLX5_FLOW_ITEM_METADATA (1u << 16) 109 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17) 110 #define MLX5_FLOW_ITEM_TAG (1u << 18) 111 #define MLX5_FLOW_ITEM_MARK (1u << 19) 112 113 /* Pattern MISC bits. */ 114 #define MLX5_FLOW_LAYER_ICMP (1u << 20) 115 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21) 116 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22) 117 118 /* Pattern tunnel Layer bits (continued). */ 119 #define MLX5_FLOW_LAYER_IPIP (1u << 23) 120 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24) 121 #define MLX5_FLOW_LAYER_NVGRE (1u << 25) 122 #define MLX5_FLOW_LAYER_GENEVE (1u << 26) 123 124 /* Queue items. */ 125 #define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27) 126 127 /* Pattern tunnel Layer bits (continued). */ 128 #define MLX5_FLOW_LAYER_GTP (1u << 28) 129 130 /* Outer Masks. */ 131 #define MLX5_FLOW_LAYER_OUTER_L3 \ 132 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) 133 #define MLX5_FLOW_LAYER_OUTER_L4 \ 134 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) 135 #define MLX5_FLOW_LAYER_OUTER \ 136 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ 137 MLX5_FLOW_LAYER_OUTER_L4) 138 139 /* Tunnel Masks. */ 140 #define MLX5_FLOW_LAYER_TUNNEL \ 141 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ 142 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ 143 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ 144 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP) 145 146 /* Inner Masks. */ 147 #define MLX5_FLOW_LAYER_INNER_L3 \ 148 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 149 #define MLX5_FLOW_LAYER_INNER_L4 \ 150 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) 151 #define MLX5_FLOW_LAYER_INNER \ 152 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ 153 MLX5_FLOW_LAYER_INNER_L4) 154 155 /* Layer Masks. */ 156 #define MLX5_FLOW_LAYER_L2 \ 157 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2) 158 #define MLX5_FLOW_LAYER_L3_IPV4 \ 159 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4) 160 #define MLX5_FLOW_LAYER_L3_IPV6 \ 161 (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 162 #define MLX5_FLOW_LAYER_L3 \ 163 (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6) 164 #define MLX5_FLOW_LAYER_L4 \ 165 (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) 166 167 /* Actions */ 168 #define MLX5_FLOW_ACTION_DROP (1u << 0) 169 #define MLX5_FLOW_ACTION_QUEUE (1u << 1) 170 #define MLX5_FLOW_ACTION_RSS (1u << 2) 171 #define MLX5_FLOW_ACTION_FLAG (1u << 3) 172 #define MLX5_FLOW_ACTION_MARK (1u << 4) 173 #define MLX5_FLOW_ACTION_COUNT (1u << 5) 174 #define MLX5_FLOW_ACTION_PORT_ID (1u << 6) 175 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7) 176 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8) 177 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9) 178 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10) 179 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11) 180 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12) 181 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13) 182 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14) 183 #define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15) 184 #define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16) 185 #define MLX5_FLOW_ACTION_JUMP (1u << 17) 186 #define MLX5_FLOW_ACTION_SET_TTL (1u << 18) 187 #define MLX5_FLOW_ACTION_DEC_TTL (1u << 19) 188 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20) 189 #define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21) 190 #define MLX5_FLOW_ACTION_ENCAP (1u << 22) 191 #define MLX5_FLOW_ACTION_DECAP (1u << 23) 192 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1u << 24) 193 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1u << 25) 194 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1u << 26) 195 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1u << 27) 196 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28) 197 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29) 198 #define MLX5_FLOW_ACTION_SET_META (1ull << 30) 199 #define MLX5_FLOW_ACTION_METER (1ull << 31) 200 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32) 201 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33) 202 203 #define MLX5_FLOW_FATE_ACTIONS \ 204 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ 205 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP) 206 207 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ 208 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ 209 MLX5_FLOW_ACTION_JUMP) 210 211 212 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ 213 MLX5_FLOW_ACTION_SET_IPV4_DST | \ 214 MLX5_FLOW_ACTION_SET_IPV6_SRC | \ 215 MLX5_FLOW_ACTION_SET_IPV6_DST | \ 216 MLX5_FLOW_ACTION_SET_TP_SRC | \ 217 MLX5_FLOW_ACTION_SET_TP_DST | \ 218 MLX5_FLOW_ACTION_SET_TTL | \ 219 MLX5_FLOW_ACTION_DEC_TTL | \ 220 MLX5_FLOW_ACTION_SET_MAC_SRC | \ 221 MLX5_FLOW_ACTION_SET_MAC_DST | \ 222 MLX5_FLOW_ACTION_INC_TCP_SEQ | \ 223 MLX5_FLOW_ACTION_DEC_TCP_SEQ | \ 224 MLX5_FLOW_ACTION_INC_TCP_ACK | \ 225 MLX5_FLOW_ACTION_DEC_TCP_ACK | \ 226 MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \ 227 MLX5_FLOW_ACTION_SET_TAG | \ 228 MLX5_FLOW_ACTION_MARK_EXT | \ 229 MLX5_FLOW_ACTION_SET_META | \ 230 MLX5_FLOW_ACTION_SET_IPV4_DSCP | \ 231 MLX5_FLOW_ACTION_SET_IPV6_DSCP) 232 233 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \ 234 MLX5_FLOW_ACTION_OF_PUSH_VLAN) 235 236 #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP) 237 238 #ifndef IPPROTO_MPLS 239 #define IPPROTO_MPLS 137 240 #endif 241 242 /* UDP port number for MPLS */ 243 #define MLX5_UDP_PORT_MPLS 6635 244 245 /* UDP port numbers for VxLAN. */ 246 #define MLX5_UDP_PORT_VXLAN 4789 247 #define MLX5_UDP_PORT_VXLAN_GPE 4790 248 249 /* UDP port numbers for GENEVE. */ 250 #define MLX5_UDP_PORT_GENEVE 6081 251 252 /* Priority reserved for default flows. */ 253 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) 254 255 /* 256 * Number of sub priorities. 257 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct 258 * matching on the NIC (firmware dependent) L4 most have the higher priority 259 * followed by L3 and ending with L2. 260 */ 261 #define MLX5_PRIORITY_MAP_L2 2 262 #define MLX5_PRIORITY_MAP_L3 1 263 #define MLX5_PRIORITY_MAP_L4 0 264 #define MLX5_PRIORITY_MAP_MAX 3 265 266 /* Valid layer type for IPV4 RSS. */ 267 #define MLX5_IPV4_LAYER_TYPES \ 268 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \ 269 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \ 270 ETH_RSS_NONFRAG_IPV4_OTHER) 271 272 /* IBV hash source bits for IPV4. */ 273 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 274 275 /* Valid layer type for IPV6 RSS. */ 276 #define MLX5_IPV6_LAYER_TYPES \ 277 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \ 278 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \ 279 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER) 280 281 /* IBV hash source bits for IPV6. */ 282 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 283 284 /* IBV hash bits for L3 SRC. */ 285 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6) 286 287 /* IBV hash bits for L3 DST. */ 288 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6) 289 290 /* IBV hash bits for TCP. */ 291 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 292 IBV_RX_HASH_DST_PORT_TCP) 293 294 /* IBV hash bits for UDP. */ 295 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \ 296 IBV_RX_HASH_DST_PORT_UDP) 297 298 /* IBV hash bits for L4 SRC. */ 299 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 300 IBV_RX_HASH_SRC_PORT_UDP) 301 302 /* IBV hash bits for L4 DST. */ 303 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \ 304 IBV_RX_HASH_DST_PORT_UDP) 305 306 /* Geneve header first 16Bit */ 307 #define MLX5_GENEVE_VER_MASK 0x3 308 #define MLX5_GENEVE_VER_SHIFT 14 309 #define MLX5_GENEVE_VER_VAL(a) \ 310 (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK)) 311 #define MLX5_GENEVE_OPTLEN_MASK 0x3F 312 #define MLX5_GENEVE_OPTLEN_SHIFT 7 313 #define MLX5_GENEVE_OPTLEN_VAL(a) \ 314 (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK)) 315 #define MLX5_GENEVE_OAMF_MASK 0x1 316 #define MLX5_GENEVE_OAMF_SHIFT 7 317 #define MLX5_GENEVE_OAMF_VAL(a) \ 318 (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK)) 319 #define MLX5_GENEVE_CRITO_MASK 0x1 320 #define MLX5_GENEVE_CRITO_SHIFT 6 321 #define MLX5_GENEVE_CRITO_VAL(a) \ 322 (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK)) 323 #define MLX5_GENEVE_RSVD_MASK 0x3F 324 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK)) 325 /* 326 * The length of the Geneve options fields, expressed in four byte multiples, 327 * not including the eight byte fixed tunnel. 328 */ 329 #define MLX5_GENEVE_OPT_LEN_0 14 330 #define MLX5_GENEVE_OPT_LEN_1 63 331 332 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \ 333 sizeof(struct rte_flow_item_ipv4)) 334 335 enum mlx5_flow_drv_type { 336 MLX5_FLOW_TYPE_MIN, 337 MLX5_FLOW_TYPE_DV, 338 MLX5_FLOW_TYPE_VERBS, 339 MLX5_FLOW_TYPE_MAX, 340 }; 341 342 /* Matcher PRM representation */ 343 struct mlx5_flow_dv_match_params { 344 size_t size; 345 /**< Size of match value. Do NOT split size and key! */ 346 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)]; 347 /**< Matcher value. This value is used as the mask or as a key. */ 348 }; 349 350 /* Matcher structure. */ 351 struct mlx5_flow_dv_matcher { 352 LIST_ENTRY(mlx5_flow_dv_matcher) next; 353 /**< Pointer to the next element. */ 354 struct mlx5_flow_tbl_resource *tbl; 355 /**< Pointer to the table(group) the matcher associated with. */ 356 rte_atomic32_t refcnt; /**< Reference counter. */ 357 void *matcher_object; /**< Pointer to DV matcher */ 358 uint16_t crc; /**< CRC of key. */ 359 uint16_t priority; /**< Priority of matcher. */ 360 struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */ 361 }; 362 363 #define MLX5_ENCAP_MAX_LEN 132 364 365 /* Encap/decap resource structure. */ 366 struct mlx5_flow_dv_encap_decap_resource { 367 LIST_ENTRY(mlx5_flow_dv_encap_decap_resource) next; 368 /* Pointer to next element. */ 369 rte_atomic32_t refcnt; /**< Reference counter. */ 370 void *verbs_action; 371 /**< Verbs encap/decap action object. */ 372 uint8_t buf[MLX5_ENCAP_MAX_LEN]; 373 size_t size; 374 uint8_t reformat_type; 375 uint8_t ft_type; 376 uint64_t flags; /**< Flags for RDMA API. */ 377 }; 378 379 /* Tag resource structure. */ 380 struct mlx5_flow_dv_tag_resource { 381 struct mlx5_hlist_entry entry; 382 /**< hash list entry for tag resource, tag value as the key. */ 383 void *action; 384 /**< Verbs tag action object. */ 385 rte_atomic32_t refcnt; /**< Reference counter. */ 386 }; 387 388 /* 389 * Number of modification commands. 390 * If extensive metadata registers are supported, the maximal actions amount is 391 * 16 and 8 otherwise on root table. The validation could also be done in the 392 * lower driver layer. 393 * On non-root table, there is no limitation, but 32 is enough right now. 394 */ 395 #define MLX5_MAX_MODIFY_NUM 32 396 #define MLX5_ROOT_TBL_MODIFY_NUM 16 397 #define MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG 8 398 399 /* Modify resource structure */ 400 struct mlx5_flow_dv_modify_hdr_resource { 401 LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next; 402 /* Pointer to next element. */ 403 rte_atomic32_t refcnt; /**< Reference counter. */ 404 struct ibv_flow_action *verbs_action; 405 /**< Verbs modify header action object. */ 406 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 407 uint32_t actions_num; /**< Number of modification actions. */ 408 uint64_t flags; /**< Flags for RDMA API. */ 409 struct mlx5_modification_cmd actions[]; 410 /**< Modification actions. */ 411 }; 412 413 /* Jump action resource structure. */ 414 struct mlx5_flow_dv_jump_tbl_resource { 415 rte_atomic32_t refcnt; /**< Reference counter. */ 416 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 417 void *action; /**< Pointer to the rdma core action. */ 418 }; 419 420 /* Port ID resource structure. */ 421 struct mlx5_flow_dv_port_id_action_resource { 422 LIST_ENTRY(mlx5_flow_dv_port_id_action_resource) next; 423 /* Pointer to next element. */ 424 rte_atomic32_t refcnt; /**< Reference counter. */ 425 void *action; 426 /**< Verbs tag action object. */ 427 uint32_t port_id; /**< Port ID value. */ 428 }; 429 430 /* Push VLAN action resource structure */ 431 struct mlx5_flow_dv_push_vlan_action_resource { 432 LIST_ENTRY(mlx5_flow_dv_push_vlan_action_resource) next; 433 /* Pointer to next element. */ 434 rte_atomic32_t refcnt; /**< Reference counter. */ 435 void *action; /**< Direct verbs action object. */ 436 uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ 437 rte_be32_t vlan_tag; /**< VLAN tag value. */ 438 }; 439 440 /* Metadata register copy table entry. */ 441 struct mlx5_flow_mreg_copy_resource { 442 /* 443 * Hash list entry for copy table. 444 * - Key is 32/64-bit MARK action ID. 445 * - MUST be the first entry. 446 */ 447 struct mlx5_hlist_entry hlist_ent; 448 LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; 449 /* List entry for device flows. */ 450 uint32_t refcnt; /* Reference counter. */ 451 uint32_t appcnt; /* Apply/Remove counter. */ 452 struct rte_flow *flow; /* Built flow for copy. */ 453 }; 454 455 /* Table data structure of the hash organization. */ 456 struct mlx5_flow_tbl_data_entry { 457 struct mlx5_hlist_entry entry; 458 /**< hash list entry, 64-bits key inside. */ 459 struct mlx5_flow_tbl_resource tbl; 460 /**< flow table resource. */ 461 LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers; 462 /**< matchers' header associated with the flow table. */ 463 struct mlx5_flow_dv_jump_tbl_resource jump; 464 /**< jump resource, at most one for each table created. */ 465 }; 466 467 /* 468 * Max number of actions per DV flow. 469 * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED 470 * In rdma-core file providers/mlx5/verbs.c 471 */ 472 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8 473 474 /* DV flows structure. */ 475 struct mlx5_flow_dv { 476 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ 477 /* Flow DV api: */ 478 struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ 479 struct mlx5_flow_dv_match_params value; 480 /**< Holds the value that the packet is compared to. */ 481 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 482 /**< Pointer to encap/decap resource in cache. */ 483 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 484 /**< Pointer to modify header resource in cache. */ 485 struct ibv_flow *flow; /**< Installed flow. */ 486 struct mlx5_flow_dv_jump_tbl_resource *jump; 487 /**< Pointer to the jump action resource. */ 488 struct mlx5_flow_dv_port_id_action_resource *port_id_action; 489 /**< Pointer to port ID action resource. */ 490 struct mlx5_vf_vlan vf_vlan; 491 /**< Structure for VF VLAN workaround. */ 492 struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; 493 /**< Pointer to push VLAN action resource in cache. */ 494 struct mlx5_flow_dv_tag_resource *tag_resource; 495 /**< pointer to the tag action. */ 496 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 497 void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; 498 /**< Action list. */ 499 #endif 500 int actions_n; /**< number of actions. */ 501 }; 502 503 /* Verbs specification header. */ 504 struct ibv_spec_header { 505 enum ibv_flow_spec_type type; 506 uint16_t size; 507 }; 508 509 /** Handles information leading to a drop fate. */ 510 struct mlx5_flow_verbs { 511 LIST_ENTRY(mlx5_flow_verbs) next; 512 unsigned int size; /**< Size of the attribute. */ 513 struct { 514 struct ibv_flow_attr *attr; 515 /**< Pointer to the Specification buffer. */ 516 uint8_t *specs; /**< Pointer to the specifications. */ 517 }; 518 struct ibv_flow *flow; /**< Verbs flow pointer. */ 519 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ 520 struct mlx5_vf_vlan vf_vlan; 521 /**< Structure for VF VLAN workaround. */ 522 }; 523 524 struct mlx5_flow_rss { 525 uint32_t level; 526 uint32_t queue_num; /**< Number of entries in @p queue. */ 527 uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ 528 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ 529 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ 530 }; 531 532 /** Device flow structure. */ 533 struct mlx5_flow { 534 LIST_ENTRY(mlx5_flow) next; 535 struct rte_flow *flow; /**< Pointer to the main flow. */ 536 uint64_t layers; 537 /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ 538 uint64_t actions; 539 /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ 540 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ 541 uint8_t ingress; /**< 1 if the flow is ingress. */ 542 uint32_t group; /**< The group index. */ 543 uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ 544 union { 545 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 546 struct mlx5_flow_dv dv; 547 #endif 548 struct mlx5_flow_verbs verbs; 549 }; 550 union { 551 uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */ 552 uint32_t mtr_flow_id; /**< Unique meter match flow id. */ 553 }; 554 bool external; /**< true if the flow is created external to PMD. */ 555 }; 556 557 /* Flow meter state. */ 558 #define MLX5_FLOW_METER_DISABLE 0 559 #define MLX5_FLOW_METER_ENABLE 1 560 561 #define MLX5_MAN_WIDTH 8 562 /* Modify this value if enum rte_mtr_color changes. */ 563 #define RTE_MTR_DROPPED RTE_COLORS 564 565 /* Meter policer statistics */ 566 struct mlx5_flow_policer_stats { 567 struct mlx5_flow_counter *cnt[RTE_COLORS + 1]; 568 /**< Color counter, extra for drop. */ 569 uint64_t stats_mask; 570 /**< Statistics mask for the colors. */ 571 }; 572 573 /* Meter table structure. */ 574 struct mlx5_meter_domain_info { 575 struct mlx5_flow_tbl_resource *tbl; 576 /**< Meter table. */ 577 void *any_matcher; 578 /**< Meter color not match default criteria. */ 579 void *color_matcher; 580 /**< Meter color match criteria. */ 581 void *jump_actn; 582 /**< Meter match action. */ 583 void *policer_rules[RTE_MTR_DROPPED + 1]; 584 /**< Meter policer for the match. */ 585 }; 586 587 /* Meter table set for TX RX FDB. */ 588 struct mlx5_meter_domains_infos { 589 uint32_t ref_cnt; 590 /**< Table user count. */ 591 struct mlx5_meter_domain_info egress; 592 /**< TX meter table. */ 593 struct mlx5_meter_domain_info ingress; 594 /**< RX meter table. */ 595 struct mlx5_meter_domain_info transfer; 596 /**< FDB meter table. */ 597 void *drop_actn; 598 /**< Drop action as not matched. */ 599 void *count_actns[RTE_MTR_DROPPED + 1]; 600 /**< Counters for match and unmatched statistics. */ 601 uint32_t fmp[MLX5_ST_SZ_DW(flow_meter_parameters)]; 602 /**< Flow meter parameter. */ 603 size_t fmp_size; 604 /**< Flow meter parameter size. */ 605 void *meter_action; 606 /**< Flow meter action. */ 607 }; 608 609 /* Meter parameter structure. */ 610 struct mlx5_flow_meter { 611 TAILQ_ENTRY(mlx5_flow_meter) next; 612 /**< Pointer to the next flow meter structure. */ 613 uint32_t meter_id; 614 /**< Meter id. */ 615 struct rte_mtr_params params; 616 /**< Meter rule parameters. */ 617 struct mlx5_flow_meter_profile *profile; 618 /**< Meter profile parameters. */ 619 struct rte_flow_attr attr; 620 /**< Flow attributes. */ 621 struct mlx5_meter_domains_infos *mfts; 622 /**< Flow table created for this meter. */ 623 struct mlx5_flow_policer_stats policer_stats; 624 /**< Meter policer statistics. */ 625 uint32_t ref_cnt; 626 /**< Use count. */ 627 uint32_t active_state:1; 628 /**< Meter state. */ 629 uint32_t shared:1; 630 /**< Meter shared or not. */ 631 }; 632 633 /* RFC2697 parameter structure. */ 634 struct mlx5_flow_meter_srtcm_rfc2697_prm { 635 /* green_saturation_value = cbs_mantissa * 2^cbs_exponent */ 636 uint32_t cbs_exponent:5; 637 uint32_t cbs_mantissa:8; 638 /* cir = 8G * cir_mantissa * 1/(2^cir_exponent) Bytes/Sec */ 639 uint32_t cir_exponent:5; 640 uint32_t cir_mantissa:8; 641 /* yellow _saturation_value = ebs_mantissa * 2^ebs_exponent */ 642 uint32_t ebs_exponent:5; 643 uint32_t ebs_mantissa:8; 644 }; 645 646 /* Flow meter profile structure. */ 647 struct mlx5_flow_meter_profile { 648 TAILQ_ENTRY(mlx5_flow_meter_profile) next; 649 /**< Pointer to the next flow meter structure. */ 650 uint32_t meter_profile_id; /**< Profile id. */ 651 struct rte_mtr_meter_profile profile; /**< Profile detail. */ 652 union { 653 struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm; 654 /**< srtcm_rfc2697 struct. */ 655 }; 656 uint32_t ref_cnt; /**< Use count. */ 657 }; 658 659 /* Flow structure. */ 660 struct rte_flow { 661 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ 662 enum mlx5_flow_drv_type drv_type; /**< Driver type. */ 663 struct mlx5_flow_rss rss; /**< RSS context. */ 664 struct mlx5_flow_counter *counter; /**< Holds flow counter. */ 665 struct mlx5_flow_mreg_copy_resource *mreg_copy; 666 /**< pointer to metadata register copy table resource. */ 667 struct mlx5_flow_meter *meter; /**< Holds flow meter. */ 668 LIST_HEAD(dev_flows, mlx5_flow) dev_flows; 669 /**< Device flows that are part of the flow. */ 670 struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ 671 uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ 672 uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ 673 }; 674 675 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, 676 const struct rte_flow_attr *attr, 677 const struct rte_flow_item items[], 678 const struct rte_flow_action actions[], 679 bool external, 680 struct rte_flow_error *error); 681 typedef struct mlx5_flow *(*mlx5_flow_prepare_t) 682 (const struct rte_flow_attr *attr, const struct rte_flow_item items[], 683 const struct rte_flow_action actions[], struct rte_flow_error *error); 684 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev, 685 struct mlx5_flow *dev_flow, 686 const struct rte_flow_attr *attr, 687 const struct rte_flow_item items[], 688 const struct rte_flow_action actions[], 689 struct rte_flow_error *error); 690 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow, 691 struct rte_flow_error *error); 692 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev, 693 struct rte_flow *flow); 694 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev, 695 struct rte_flow *flow); 696 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev, 697 struct rte_flow *flow, 698 const struct rte_flow_action *actions, 699 void *data, 700 struct rte_flow_error *error); 701 typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t) 702 (struct rte_eth_dev *dev, 703 const struct mlx5_flow_meter *fm); 704 typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev, 705 struct mlx5_meter_domains_infos *tbls); 706 typedef int (*mlx5_flow_create_policer_rules_t) 707 (struct rte_eth_dev *dev, 708 struct mlx5_flow_meter *fm, 709 const struct rte_flow_attr *attr); 710 typedef int (*mlx5_flow_destroy_policer_rules_t) 711 (struct rte_eth_dev *dev, 712 const struct mlx5_flow_meter *fm, 713 const struct rte_flow_attr *attr); 714 typedef struct mlx5_flow_counter * (*mlx5_flow_counter_alloc_t) 715 (struct rte_eth_dev *dev); 716 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev, 717 struct mlx5_flow_counter *cnt); 718 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev, 719 struct mlx5_flow_counter *cnt, 720 bool clear, uint64_t *pkts, 721 uint64_t *bytes); 722 struct mlx5_flow_driver_ops { 723 mlx5_flow_validate_t validate; 724 mlx5_flow_prepare_t prepare; 725 mlx5_flow_translate_t translate; 726 mlx5_flow_apply_t apply; 727 mlx5_flow_remove_t remove; 728 mlx5_flow_destroy_t destroy; 729 mlx5_flow_query_t query; 730 mlx5_flow_create_mtr_tbls_t create_mtr_tbls; 731 mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls; 732 mlx5_flow_create_policer_rules_t create_policer_rules; 733 mlx5_flow_destroy_policer_rules_t destroy_policer_rules; 734 mlx5_flow_counter_alloc_t counter_alloc; 735 mlx5_flow_counter_free_t counter_free; 736 mlx5_flow_counter_query_t counter_query; 737 }; 738 739 740 #define MLX5_CNT_CONTAINER(sh, batch, thread) (&(sh)->cmng.ccont \ 741 [(((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)]) 742 #define MLX5_CNT_CONTAINER_UNUSED(sh, batch, thread) (&(sh)->cmng.ccont \ 743 [(~((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)]) 744 745 /* mlx5_flow.c */ 746 747 struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); 748 void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); 749 uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); 750 uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, 751 uint32_t id); 752 int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, 753 bool external, uint32_t group, bool fdb_def_rule, 754 uint32_t *table, struct rte_flow_error *error); 755 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel, 756 uint64_t layer_types, 757 uint64_t hash_fields); 758 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 759 uint32_t subpriority); 760 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 761 enum mlx5_feature_name feature, 762 uint32_t id, 763 struct rte_flow_error *error); 764 const struct rte_flow_action *mlx5_flow_find_action 765 (const struct rte_flow_action *actions, 766 enum rte_flow_action_type action); 767 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, 768 const struct rte_flow_attr *attr, 769 struct rte_flow_error *error); 770 int mlx5_flow_validate_action_drop(uint64_t action_flags, 771 const struct rte_flow_attr *attr, 772 struct rte_flow_error *error); 773 int mlx5_flow_validate_action_flag(uint64_t action_flags, 774 const struct rte_flow_attr *attr, 775 struct rte_flow_error *error); 776 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 777 uint64_t action_flags, 778 const struct rte_flow_attr *attr, 779 struct rte_flow_error *error); 780 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 781 uint64_t action_flags, 782 struct rte_eth_dev *dev, 783 const struct rte_flow_attr *attr, 784 struct rte_flow_error *error); 785 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 786 uint64_t action_flags, 787 struct rte_eth_dev *dev, 788 const struct rte_flow_attr *attr, 789 uint64_t item_flags, 790 struct rte_flow_error *error); 791 int mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 792 const struct rte_flow_attr *attributes, 793 struct rte_flow_error *error); 794 int mlx5_flow_item_acceptable(const struct rte_flow_item *item, 795 const uint8_t *mask, 796 const uint8_t *nic_mask, 797 unsigned int size, 798 struct rte_flow_error *error); 799 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 800 uint64_t item_flags, 801 struct rte_flow_error *error); 802 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 803 uint64_t item_flags, 804 uint8_t target_protocol, 805 struct rte_flow_error *error); 806 int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 807 uint64_t item_flags, 808 const struct rte_flow_item *gre_item, 809 struct rte_flow_error *error); 810 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 811 uint64_t item_flags, 812 uint64_t last_item, 813 uint16_t ether_type, 814 const struct rte_flow_item_ipv4 *acc_mask, 815 struct rte_flow_error *error); 816 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 817 uint64_t item_flags, 818 uint64_t last_item, 819 uint16_t ether_type, 820 const struct rte_flow_item_ipv6 *acc_mask, 821 struct rte_flow_error *error); 822 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, 823 const struct rte_flow_item *item, 824 uint64_t item_flags, 825 uint64_t prev_layer, 826 struct rte_flow_error *error); 827 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 828 uint64_t item_flags, 829 uint8_t target_protocol, 830 const struct rte_flow_item_tcp *flow_mask, 831 struct rte_flow_error *error); 832 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 833 uint64_t item_flags, 834 uint8_t target_protocol, 835 struct rte_flow_error *error); 836 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 837 uint64_t item_flags, 838 struct rte_eth_dev *dev, 839 struct rte_flow_error *error); 840 int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 841 uint64_t item_flags, 842 struct rte_flow_error *error); 843 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 844 uint64_t item_flags, 845 struct rte_eth_dev *dev, 846 struct rte_flow_error *error); 847 int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 848 uint64_t item_flags, 849 uint8_t target_protocol, 850 struct rte_flow_error *error); 851 int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 852 uint64_t item_flags, 853 uint8_t target_protocol, 854 struct rte_flow_error *error); 855 int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 856 uint64_t item_flags, 857 uint8_t target_protocol, 858 struct rte_flow_error *error); 859 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 860 uint64_t item_flags, 861 struct rte_eth_dev *dev, 862 struct rte_flow_error *error); 863 struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls 864 (struct rte_eth_dev *dev, 865 const struct mlx5_flow_meter *fm); 866 int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 867 struct mlx5_meter_domains_infos *tbl); 868 int mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 869 struct mlx5_flow_meter *fm, 870 const struct rte_flow_attr *attr); 871 int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 872 struct mlx5_flow_meter *fm, 873 const struct rte_flow_attr *attr); 874 int mlx5_flow_meter_flush(struct rte_eth_dev *dev, 875 struct rte_mtr_error *error); 876 #endif /* RTE_PMD_MLX5_FLOW_H_ */ 877