1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_FLOW_H_ 6 #define RTE_PMD_MLX5_FLOW_H_ 7 8 #include <netinet/in.h> 9 #include <sys/queue.h> 10 #include <stdalign.h> 11 #include <stdint.h> 12 #include <string.h> 13 14 /* Verbs header. */ 15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 16 #ifdef PEDANTIC 17 #pragma GCC diagnostic ignored "-Wpedantic" 18 #endif 19 #include <infiniband/verbs.h> 20 #ifdef PEDANTIC 21 #pragma GCC diagnostic error "-Wpedantic" 22 #endif 23 24 #include <rte_atomic.h> 25 #include <rte_alarm.h> 26 #include <rte_mtr.h> 27 28 #include <mlx5_prm.h> 29 30 #include "mlx5.h" 31 32 /* Private rte flow items. */ 33 enum mlx5_rte_flow_item_type { 34 MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN, 35 MLX5_RTE_FLOW_ITEM_TYPE_TAG, 36 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, 37 }; 38 39 /* Private (internal) rte flow actions. */ 40 enum mlx5_rte_flow_action_type { 41 MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN, 42 MLX5_RTE_FLOW_ACTION_TYPE_TAG, 43 MLX5_RTE_FLOW_ACTION_TYPE_MARK, 44 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 45 }; 46 47 /* Matches on selected register. */ 48 struct mlx5_rte_flow_item_tag { 49 enum modify_reg id; 50 uint32_t data; 51 }; 52 53 /* Modify selected register. */ 54 struct mlx5_rte_flow_action_set_tag { 55 enum modify_reg id; 56 uint32_t data; 57 }; 58 59 struct mlx5_flow_action_copy_mreg { 60 enum modify_reg dst; 61 enum modify_reg src; 62 }; 63 64 /* Matches on source queue. */ 65 struct mlx5_rte_flow_item_tx_queue { 66 uint32_t queue; 67 }; 68 69 /* Feature name to allocate metadata register. */ 70 enum mlx5_feature_name { 71 MLX5_HAIRPIN_RX, 72 MLX5_HAIRPIN_TX, 73 MLX5_METADATA_RX, 74 MLX5_METADATA_TX, 75 MLX5_METADATA_FDB, 76 MLX5_FLOW_MARK, 77 MLX5_APP_TAG, 78 MLX5_COPY_MARK, 79 MLX5_MTR_COLOR, 80 MLX5_MTR_SFX, 81 }; 82 83 /* Pattern outer Layer bits. */ 84 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) 85 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) 86 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) 87 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) 88 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) 89 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) 90 91 /* Pattern inner Layer bits. */ 92 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) 93 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) 94 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) 95 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) 96 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) 97 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) 98 99 /* Pattern tunnel Layer bits. */ 100 #define MLX5_FLOW_LAYER_VXLAN (1u << 12) 101 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) 102 #define MLX5_FLOW_LAYER_GRE (1u << 14) 103 #define MLX5_FLOW_LAYER_MPLS (1u << 15) 104 /* List of tunnel Layer bits continued below. */ 105 106 /* General pattern items bits. */ 107 #define MLX5_FLOW_ITEM_METADATA (1u << 16) 108 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17) 109 #define MLX5_FLOW_ITEM_TAG (1u << 18) 110 #define MLX5_FLOW_ITEM_MARK (1u << 19) 111 112 /* Pattern MISC bits. */ 113 #define MLX5_FLOW_LAYER_ICMP (1u << 20) 114 #define MLX5_FLOW_LAYER_ICMP6 (1u << 21) 115 #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22) 116 117 /* Pattern tunnel Layer bits (continued). */ 118 #define MLX5_FLOW_LAYER_IPIP (1u << 23) 119 #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24) 120 #define MLX5_FLOW_LAYER_NVGRE (1u << 25) 121 #define MLX5_FLOW_LAYER_GENEVE (1u << 26) 122 123 /* Queue items. */ 124 #define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27) 125 126 /* Pattern tunnel Layer bits (continued). */ 127 #define MLX5_FLOW_LAYER_GTP (1u << 28) 128 129 /* Outer Masks. */ 130 #define MLX5_FLOW_LAYER_OUTER_L3 \ 131 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) 132 #define MLX5_FLOW_LAYER_OUTER_L4 \ 133 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) 134 #define MLX5_FLOW_LAYER_OUTER \ 135 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ 136 MLX5_FLOW_LAYER_OUTER_L4) 137 138 /* Tunnel Masks. */ 139 #define MLX5_FLOW_LAYER_TUNNEL \ 140 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ 141 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ 142 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ 143 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP) 144 145 /* Inner Masks. */ 146 #define MLX5_FLOW_LAYER_INNER_L3 \ 147 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 148 #define MLX5_FLOW_LAYER_INNER_L4 \ 149 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) 150 #define MLX5_FLOW_LAYER_INNER \ 151 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ 152 MLX5_FLOW_LAYER_INNER_L4) 153 154 /* Layer Masks. */ 155 #define MLX5_FLOW_LAYER_L2 \ 156 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2) 157 #define MLX5_FLOW_LAYER_L3_IPV4 \ 158 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4) 159 #define MLX5_FLOW_LAYER_L3_IPV6 \ 160 (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 161 #define MLX5_FLOW_LAYER_L3 \ 162 (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6) 163 #define MLX5_FLOW_LAYER_L4 \ 164 (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) 165 166 /* Actions */ 167 #define MLX5_FLOW_ACTION_DROP (1u << 0) 168 #define MLX5_FLOW_ACTION_QUEUE (1u << 1) 169 #define MLX5_FLOW_ACTION_RSS (1u << 2) 170 #define MLX5_FLOW_ACTION_FLAG (1u << 3) 171 #define MLX5_FLOW_ACTION_MARK (1u << 4) 172 #define MLX5_FLOW_ACTION_COUNT (1u << 5) 173 #define MLX5_FLOW_ACTION_PORT_ID (1u << 6) 174 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7) 175 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8) 176 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9) 177 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10) 178 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11) 179 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12) 180 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13) 181 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14) 182 #define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15) 183 #define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16) 184 #define MLX5_FLOW_ACTION_JUMP (1u << 17) 185 #define MLX5_FLOW_ACTION_SET_TTL (1u << 18) 186 #define MLX5_FLOW_ACTION_DEC_TTL (1u << 19) 187 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20) 188 #define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21) 189 #define MLX5_FLOW_ACTION_VXLAN_ENCAP (1u << 22) 190 #define MLX5_FLOW_ACTION_VXLAN_DECAP (1u << 23) 191 #define MLX5_FLOW_ACTION_NVGRE_ENCAP (1u << 24) 192 #define MLX5_FLOW_ACTION_NVGRE_DECAP (1u << 25) 193 #define MLX5_FLOW_ACTION_RAW_ENCAP (1u << 26) 194 #define MLX5_FLOW_ACTION_RAW_DECAP (1u << 27) 195 #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1u << 28) 196 #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1u << 29) 197 #define MLX5_FLOW_ACTION_INC_TCP_ACK (1u << 30) 198 #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1u << 31) 199 #define MLX5_FLOW_ACTION_SET_TAG (1ull << 32) 200 #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 33) 201 #define MLX5_FLOW_ACTION_SET_META (1ull << 34) 202 #define MLX5_FLOW_ACTION_METER (1ull << 35) 203 #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 36) 204 #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 37) 205 206 #define MLX5_FLOW_FATE_ACTIONS \ 207 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ 208 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP) 209 210 #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ 211 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ 212 MLX5_FLOW_ACTION_JUMP) 213 214 #define MLX5_FLOW_ENCAP_ACTIONS (MLX5_FLOW_ACTION_VXLAN_ENCAP | \ 215 MLX5_FLOW_ACTION_NVGRE_ENCAP | \ 216 MLX5_FLOW_ACTION_RAW_ENCAP) 217 218 #define MLX5_FLOW_DECAP_ACTIONS (MLX5_FLOW_ACTION_VXLAN_DECAP | \ 219 MLX5_FLOW_ACTION_NVGRE_DECAP | \ 220 MLX5_FLOW_ACTION_RAW_DECAP) 221 222 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ 223 MLX5_FLOW_ACTION_SET_IPV4_DST | \ 224 MLX5_FLOW_ACTION_SET_IPV6_SRC | \ 225 MLX5_FLOW_ACTION_SET_IPV6_DST | \ 226 MLX5_FLOW_ACTION_SET_TP_SRC | \ 227 MLX5_FLOW_ACTION_SET_TP_DST | \ 228 MLX5_FLOW_ACTION_SET_TTL | \ 229 MLX5_FLOW_ACTION_DEC_TTL | \ 230 MLX5_FLOW_ACTION_SET_MAC_SRC | \ 231 MLX5_FLOW_ACTION_SET_MAC_DST | \ 232 MLX5_FLOW_ACTION_INC_TCP_SEQ | \ 233 MLX5_FLOW_ACTION_DEC_TCP_SEQ | \ 234 MLX5_FLOW_ACTION_INC_TCP_ACK | \ 235 MLX5_FLOW_ACTION_DEC_TCP_ACK | \ 236 MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \ 237 MLX5_FLOW_ACTION_SET_TAG | \ 238 MLX5_FLOW_ACTION_MARK_EXT | \ 239 MLX5_FLOW_ACTION_SET_META | \ 240 MLX5_FLOW_ACTION_SET_IPV4_DSCP | \ 241 MLX5_FLOW_ACTION_SET_IPV6_DSCP) 242 243 #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \ 244 MLX5_FLOW_ACTION_OF_PUSH_VLAN) 245 #ifndef IPPROTO_MPLS 246 #define IPPROTO_MPLS 137 247 #endif 248 249 /* UDP port number for MPLS */ 250 #define MLX5_UDP_PORT_MPLS 6635 251 252 /* UDP port numbers for VxLAN. */ 253 #define MLX5_UDP_PORT_VXLAN 4789 254 #define MLX5_UDP_PORT_VXLAN_GPE 4790 255 256 /* UDP port numbers for GENEVE. */ 257 #define MLX5_UDP_PORT_GENEVE 6081 258 259 /* Priority reserved for default flows. */ 260 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) 261 262 /* 263 * Number of sub priorities. 264 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct 265 * matching on the NIC (firmware dependent) L4 most have the higher priority 266 * followed by L3 and ending with L2. 267 */ 268 #define MLX5_PRIORITY_MAP_L2 2 269 #define MLX5_PRIORITY_MAP_L3 1 270 #define MLX5_PRIORITY_MAP_L4 0 271 #define MLX5_PRIORITY_MAP_MAX 3 272 273 /* Valid layer type for IPV4 RSS. */ 274 #define MLX5_IPV4_LAYER_TYPES \ 275 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \ 276 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \ 277 ETH_RSS_NONFRAG_IPV4_OTHER) 278 279 /* IBV hash source bits for IPV4. */ 280 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 281 282 /* Valid layer type for IPV6 RSS. */ 283 #define MLX5_IPV6_LAYER_TYPES \ 284 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \ 285 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \ 286 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER) 287 288 /* IBV hash source bits for IPV6. */ 289 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 290 291 /* IBV hash bits for L3 SRC. */ 292 #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6) 293 294 /* IBV hash bits for L3 DST. */ 295 #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6) 296 297 /* IBV hash bits for TCP. */ 298 #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 299 IBV_RX_HASH_DST_PORT_TCP) 300 301 /* IBV hash bits for UDP. */ 302 #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \ 303 IBV_RX_HASH_DST_PORT_UDP) 304 305 /* IBV hash bits for L4 SRC. */ 306 #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 307 IBV_RX_HASH_SRC_PORT_UDP) 308 309 /* IBV hash bits for L4 DST. */ 310 #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \ 311 IBV_RX_HASH_DST_PORT_UDP) 312 313 /* Geneve header first 16Bit */ 314 #define MLX5_GENEVE_VER_MASK 0x3 315 #define MLX5_GENEVE_VER_SHIFT 14 316 #define MLX5_GENEVE_VER_VAL(a) \ 317 (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK)) 318 #define MLX5_GENEVE_OPTLEN_MASK 0x3F 319 #define MLX5_GENEVE_OPTLEN_SHIFT 7 320 #define MLX5_GENEVE_OPTLEN_VAL(a) \ 321 (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK)) 322 #define MLX5_GENEVE_OAMF_MASK 0x1 323 #define MLX5_GENEVE_OAMF_SHIFT 7 324 #define MLX5_GENEVE_OAMF_VAL(a) \ 325 (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK)) 326 #define MLX5_GENEVE_CRITO_MASK 0x1 327 #define MLX5_GENEVE_CRITO_SHIFT 6 328 #define MLX5_GENEVE_CRITO_VAL(a) \ 329 (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK)) 330 #define MLX5_GENEVE_RSVD_MASK 0x3F 331 #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK)) 332 /* 333 * The length of the Geneve options fields, expressed in four byte multiples, 334 * not including the eight byte fixed tunnel. 335 */ 336 #define MLX5_GENEVE_OPT_LEN_0 14 337 #define MLX5_GENEVE_OPT_LEN_1 63 338 339 enum mlx5_flow_drv_type { 340 MLX5_FLOW_TYPE_MIN, 341 MLX5_FLOW_TYPE_DV, 342 MLX5_FLOW_TYPE_VERBS, 343 MLX5_FLOW_TYPE_MAX, 344 }; 345 346 /* Matcher PRM representation */ 347 struct mlx5_flow_dv_match_params { 348 size_t size; 349 /**< Size of match value. Do NOT split size and key! */ 350 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)]; 351 /**< Matcher value. This value is used as the mask or as a key. */ 352 }; 353 354 /* Matcher structure. */ 355 struct mlx5_flow_dv_matcher { 356 LIST_ENTRY(mlx5_flow_dv_matcher) next; 357 /**< Pointer to the next element. */ 358 struct mlx5_flow_tbl_resource *tbl; 359 /**< Pointer to the table(group) the matcher associated with. */ 360 rte_atomic32_t refcnt; /**< Reference counter. */ 361 void *matcher_object; /**< Pointer to DV matcher */ 362 uint16_t crc; /**< CRC of key. */ 363 uint16_t priority; /**< Priority of matcher. */ 364 struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */ 365 }; 366 367 #define MLX5_ENCAP_MAX_LEN 132 368 369 /* Encap/decap resource structure. */ 370 struct mlx5_flow_dv_encap_decap_resource { 371 LIST_ENTRY(mlx5_flow_dv_encap_decap_resource) next; 372 /* Pointer to next element. */ 373 rte_atomic32_t refcnt; /**< Reference counter. */ 374 void *verbs_action; 375 /**< Verbs encap/decap action object. */ 376 uint8_t buf[MLX5_ENCAP_MAX_LEN]; 377 size_t size; 378 uint8_t reformat_type; 379 uint8_t ft_type; 380 uint64_t flags; /**< Flags for RDMA API. */ 381 }; 382 383 /* Tag resource structure. */ 384 struct mlx5_flow_dv_tag_resource { 385 struct mlx5_hlist_entry entry; 386 /**< hash list entry for tag resource, tag value as the key. */ 387 void *action; 388 /**< Verbs tag action object. */ 389 rte_atomic32_t refcnt; /**< Reference counter. */ 390 }; 391 392 /* 393 * Number of modification commands. 394 * If extensive metadata registers are supported, the maximal actions amount is 395 * 16 and 8 otherwise on root table. The validation could also be done in the 396 * lower driver layer. 397 * On non-root table, there is no limitation, but 32 is enough right now. 398 */ 399 #define MLX5_MAX_MODIFY_NUM 32 400 #define MLX5_ROOT_TBL_MODIFY_NUM 16 401 #define MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG 8 402 403 /* Modify resource structure */ 404 struct mlx5_flow_dv_modify_hdr_resource { 405 LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next; 406 /* Pointer to next element. */ 407 rte_atomic32_t refcnt; /**< Reference counter. */ 408 struct ibv_flow_action *verbs_action; 409 /**< Verbs modify header action object. */ 410 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 411 uint32_t actions_num; /**< Number of modification actions. */ 412 uint64_t flags; /**< Flags for RDMA API. */ 413 struct mlx5_modification_cmd actions[]; 414 /**< Modification actions. */ 415 }; 416 417 /* Jump action resource structure. */ 418 struct mlx5_flow_dv_jump_tbl_resource { 419 rte_atomic32_t refcnt; /**< Reference counter. */ 420 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 421 void *action; /**< Pointer to the rdma core action. */ 422 }; 423 424 /* Port ID resource structure. */ 425 struct mlx5_flow_dv_port_id_action_resource { 426 LIST_ENTRY(mlx5_flow_dv_port_id_action_resource) next; 427 /* Pointer to next element. */ 428 rte_atomic32_t refcnt; /**< Reference counter. */ 429 void *action; 430 /**< Verbs tag action object. */ 431 uint32_t port_id; /**< Port ID value. */ 432 }; 433 434 /* Push VLAN action resource structure */ 435 struct mlx5_flow_dv_push_vlan_action_resource { 436 LIST_ENTRY(mlx5_flow_dv_push_vlan_action_resource) next; 437 /* Pointer to next element. */ 438 rte_atomic32_t refcnt; /**< Reference counter. */ 439 void *action; /**< Direct verbs action object. */ 440 uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ 441 rte_be32_t vlan_tag; /**< VLAN tag value. */ 442 }; 443 444 /* Metadata register copy table entry. */ 445 struct mlx5_flow_mreg_copy_resource { 446 /* 447 * Hash list entry for copy table. 448 * - Key is 32/64-bit MARK action ID. 449 * - MUST be the first entry. 450 */ 451 struct mlx5_hlist_entry hlist_ent; 452 LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; 453 /* List entry for device flows. */ 454 uint32_t refcnt; /* Reference counter. */ 455 uint32_t appcnt; /* Apply/Remove counter. */ 456 struct rte_flow *flow; /* Built flow for copy. */ 457 }; 458 459 /* Table data structure of the hash organization. */ 460 struct mlx5_flow_tbl_data_entry { 461 struct mlx5_hlist_entry entry; 462 /**< hash list entry, 64-bits key inside. */ 463 struct mlx5_flow_tbl_resource tbl; 464 /**< flow table resource. */ 465 LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers; 466 /**< matchers' header associated with the flow table. */ 467 struct mlx5_flow_dv_jump_tbl_resource jump; 468 /**< jump resource, at most one for each table created. */ 469 }; 470 471 /* 472 * Max number of actions per DV flow. 473 * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED 474 * In rdma-core file providers/mlx5/verbs.c 475 */ 476 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8 477 478 /* DV flows structure. */ 479 struct mlx5_flow_dv { 480 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ 481 /* Flow DV api: */ 482 struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ 483 struct mlx5_flow_dv_match_params value; 484 /**< Holds the value that the packet is compared to. */ 485 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 486 /**< Pointer to encap/decap resource in cache. */ 487 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 488 /**< Pointer to modify header resource in cache. */ 489 struct ibv_flow *flow; /**< Installed flow. */ 490 struct mlx5_flow_dv_jump_tbl_resource *jump; 491 /**< Pointer to the jump action resource. */ 492 struct mlx5_flow_dv_port_id_action_resource *port_id_action; 493 /**< Pointer to port ID action resource. */ 494 struct mlx5_vf_vlan vf_vlan; 495 /**< Structure for VF VLAN workaround. */ 496 struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; 497 /**< Pointer to push VLAN action resource in cache. */ 498 struct mlx5_flow_dv_tag_resource *tag_resource; 499 /**< pointer to the tag action. */ 500 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 501 void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; 502 /**< Action list. */ 503 #endif 504 int actions_n; /**< number of actions. */ 505 }; 506 507 /* Verbs specification header. */ 508 struct ibv_spec_header { 509 enum ibv_flow_spec_type type; 510 uint16_t size; 511 }; 512 513 /** Handles information leading to a drop fate. */ 514 struct mlx5_flow_verbs { 515 LIST_ENTRY(mlx5_flow_verbs) next; 516 unsigned int size; /**< Size of the attribute. */ 517 struct { 518 struct ibv_flow_attr *attr; 519 /**< Pointer to the Specification buffer. */ 520 uint8_t *specs; /**< Pointer to the specifications. */ 521 }; 522 struct ibv_flow *flow; /**< Verbs flow pointer. */ 523 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ 524 struct mlx5_vf_vlan vf_vlan; 525 /**< Structure for VF VLAN workaround. */ 526 }; 527 528 struct mlx5_flow_rss { 529 uint32_t level; 530 uint32_t queue_num; /**< Number of entries in @p queue. */ 531 uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ 532 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ 533 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ 534 }; 535 536 /** Device flow structure. */ 537 struct mlx5_flow { 538 LIST_ENTRY(mlx5_flow) next; 539 struct rte_flow *flow; /**< Pointer to the main flow. */ 540 uint64_t layers; 541 /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ 542 uint64_t actions; 543 /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ 544 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ 545 uint8_t ingress; /**< 1 if the flow is ingress. */ 546 uint32_t group; /**< The group index. */ 547 uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ 548 union { 549 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 550 struct mlx5_flow_dv dv; 551 #endif 552 struct mlx5_flow_verbs verbs; 553 }; 554 union { 555 uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */ 556 uint32_t mtr_flow_id; /**< Unique meter match flow id. */ 557 }; 558 bool external; /**< true if the flow is created external to PMD. */ 559 }; 560 561 /* Flow meter state. */ 562 #define MLX5_FLOW_METER_DISABLE 0 563 #define MLX5_FLOW_METER_ENABLE 1 564 565 #define MLX5_MAN_WIDTH 8 566 /* Modify this value if enum rte_mtr_color changes. */ 567 #define RTE_MTR_DROPPED RTE_COLORS 568 569 /* Meter policer statistics */ 570 struct mlx5_flow_policer_stats { 571 struct mlx5_flow_counter *cnt[RTE_COLORS + 1]; 572 /**< Color counter, extra for drop. */ 573 uint64_t stats_mask; 574 /**< Statistics mask for the colors. */ 575 }; 576 577 /* Meter table structure. */ 578 struct mlx5_meter_domain_info { 579 struct mlx5_flow_tbl_resource *tbl; 580 /**< Meter table. */ 581 void *any_matcher; 582 /**< Meter color not match default criteria. */ 583 void *color_matcher; 584 /**< Meter color match criteria. */ 585 void *jump_actn; 586 /**< Meter match action. */ 587 void *policer_rules[RTE_MTR_DROPPED + 1]; 588 /**< Meter policer for the match. */ 589 }; 590 591 /* Meter table set for TX RX FDB. */ 592 struct mlx5_meter_domains_infos { 593 uint32_t ref_cnt; 594 /**< Table user count. */ 595 struct mlx5_meter_domain_info egress; 596 /**< TX meter table. */ 597 struct mlx5_meter_domain_info ingress; 598 /**< RX meter table. */ 599 struct mlx5_meter_domain_info transfer; 600 /**< FDB meter table. */ 601 void *drop_actn; 602 /**< Drop action as not matched. */ 603 void *count_actns[RTE_MTR_DROPPED + 1]; 604 /**< Counters for match and unmatched statistics. */ 605 uint32_t fmp[MLX5_ST_SZ_DW(flow_meter_parameters)]; 606 /**< Flow meter parameter. */ 607 size_t fmp_size; 608 /**< Flow meter parameter size. */ 609 void *meter_action; 610 /**< Flow meter action. */ 611 }; 612 613 /* Meter parameter structure. */ 614 struct mlx5_flow_meter { 615 TAILQ_ENTRY(mlx5_flow_meter) next; 616 /**< Pointer to the next flow meter structure. */ 617 uint32_t meter_id; 618 /**< Meter id. */ 619 struct rte_mtr_params params; 620 /**< Meter rule parameters. */ 621 struct mlx5_flow_meter_profile *profile; 622 /**< Meter profile parameters. */ 623 struct rte_flow_attr attr; 624 /**< Flow attributes. */ 625 struct mlx5_meter_domains_infos *mfts; 626 /**< Flow table created for this meter. */ 627 struct mlx5_flow_policer_stats policer_stats; 628 /**< Meter policer statistics. */ 629 uint32_t ref_cnt; 630 /**< Use count. */ 631 uint32_t active_state:1; 632 /**< Meter state. */ 633 uint32_t shared:1; 634 /**< Meter shared or not. */ 635 }; 636 637 /* RFC2697 parameter structure. */ 638 struct mlx5_flow_meter_srtcm_rfc2697_prm { 639 /* green_saturation_value = cbs_mantissa * 2^cbs_exponent */ 640 uint32_t cbs_exponent:5; 641 uint32_t cbs_mantissa:8; 642 /* cir = 8G * cir_mantissa * 1/(2^cir_exponent) Bytes/Sec */ 643 uint32_t cir_exponent:5; 644 uint32_t cir_mantissa:8; 645 /* yellow _saturation_value = ebs_mantissa * 2^ebs_exponent */ 646 uint32_t ebs_exponent:5; 647 uint32_t ebs_mantissa:8; 648 }; 649 650 /* Flow meter profile structure. */ 651 struct mlx5_flow_meter_profile { 652 TAILQ_ENTRY(mlx5_flow_meter_profile) next; 653 /**< Pointer to the next flow meter structure. */ 654 uint32_t meter_profile_id; /**< Profile id. */ 655 struct rte_mtr_meter_profile profile; /**< Profile detail. */ 656 union { 657 struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm; 658 /**< srtcm_rfc2697 struct. */ 659 }; 660 uint32_t ref_cnt; /**< Use count. */ 661 }; 662 663 /* Flow structure. */ 664 struct rte_flow { 665 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ 666 enum mlx5_flow_drv_type drv_type; /**< Driver type. */ 667 struct mlx5_flow_rss rss; /**< RSS context. */ 668 struct mlx5_flow_counter *counter; /**< Holds flow counter. */ 669 struct mlx5_flow_mreg_copy_resource *mreg_copy; 670 /**< pointer to metadata register copy table resource. */ 671 struct mlx5_flow_meter *meter; /**< Holds flow meter. */ 672 LIST_HEAD(dev_flows, mlx5_flow) dev_flows; 673 /**< Device flows that are part of the flow. */ 674 struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ 675 uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ 676 uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ 677 }; 678 679 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, 680 const struct rte_flow_attr *attr, 681 const struct rte_flow_item items[], 682 const struct rte_flow_action actions[], 683 bool external, 684 struct rte_flow_error *error); 685 typedef struct mlx5_flow *(*mlx5_flow_prepare_t) 686 (const struct rte_flow_attr *attr, const struct rte_flow_item items[], 687 const struct rte_flow_action actions[], struct rte_flow_error *error); 688 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev, 689 struct mlx5_flow *dev_flow, 690 const struct rte_flow_attr *attr, 691 const struct rte_flow_item items[], 692 const struct rte_flow_action actions[], 693 struct rte_flow_error *error); 694 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow, 695 struct rte_flow_error *error); 696 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev, 697 struct rte_flow *flow); 698 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev, 699 struct rte_flow *flow); 700 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev, 701 struct rte_flow *flow, 702 const struct rte_flow_action *actions, 703 void *data, 704 struct rte_flow_error *error); 705 typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t) 706 (struct rte_eth_dev *dev, 707 const struct mlx5_flow_meter *fm); 708 typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev, 709 struct mlx5_meter_domains_infos *tbls); 710 typedef int (*mlx5_flow_create_policer_rules_t) 711 (struct rte_eth_dev *dev, 712 struct mlx5_flow_meter *fm, 713 const struct rte_flow_attr *attr); 714 typedef int (*mlx5_flow_destroy_policer_rules_t) 715 (struct rte_eth_dev *dev, 716 const struct mlx5_flow_meter *fm, 717 const struct rte_flow_attr *attr); 718 typedef struct mlx5_flow_counter * (*mlx5_flow_counter_alloc_t) 719 (struct rte_eth_dev *dev); 720 typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev, 721 struct mlx5_flow_counter *cnt); 722 typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev, 723 struct mlx5_flow_counter *cnt, 724 bool clear, uint64_t *pkts, 725 uint64_t *bytes); 726 struct mlx5_flow_driver_ops { 727 mlx5_flow_validate_t validate; 728 mlx5_flow_prepare_t prepare; 729 mlx5_flow_translate_t translate; 730 mlx5_flow_apply_t apply; 731 mlx5_flow_remove_t remove; 732 mlx5_flow_destroy_t destroy; 733 mlx5_flow_query_t query; 734 mlx5_flow_create_mtr_tbls_t create_mtr_tbls; 735 mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls; 736 mlx5_flow_create_policer_rules_t create_policer_rules; 737 mlx5_flow_destroy_policer_rules_t destroy_policer_rules; 738 mlx5_flow_counter_alloc_t counter_alloc; 739 mlx5_flow_counter_free_t counter_free; 740 mlx5_flow_counter_query_t counter_query; 741 }; 742 743 744 #define MLX5_CNT_CONTAINER(sh, batch, thread) (&(sh)->cmng.ccont \ 745 [(((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)]) 746 #define MLX5_CNT_CONTAINER_UNUSED(sh, batch, thread) (&(sh)->cmng.ccont \ 747 [(~((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)]) 748 749 /* mlx5_flow.c */ 750 751 struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); 752 void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); 753 uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); 754 uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, 755 uint32_t id); 756 int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, 757 bool external, uint32_t group, bool fdb_def_rule, 758 uint32_t *table, struct rte_flow_error *error); 759 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel, 760 uint64_t layer_types, 761 uint64_t hash_fields); 762 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 763 uint32_t subpriority); 764 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 765 enum mlx5_feature_name feature, 766 uint32_t id, 767 struct rte_flow_error *error); 768 const struct rte_flow_action *mlx5_flow_find_action 769 (const struct rte_flow_action *actions, 770 enum rte_flow_action_type action); 771 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, 772 const struct rte_flow_attr *attr, 773 struct rte_flow_error *error); 774 int mlx5_flow_validate_action_drop(uint64_t action_flags, 775 const struct rte_flow_attr *attr, 776 struct rte_flow_error *error); 777 int mlx5_flow_validate_action_flag(uint64_t action_flags, 778 const struct rte_flow_attr *attr, 779 struct rte_flow_error *error); 780 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 781 uint64_t action_flags, 782 const struct rte_flow_attr *attr, 783 struct rte_flow_error *error); 784 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 785 uint64_t action_flags, 786 struct rte_eth_dev *dev, 787 const struct rte_flow_attr *attr, 788 struct rte_flow_error *error); 789 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 790 uint64_t action_flags, 791 struct rte_eth_dev *dev, 792 const struct rte_flow_attr *attr, 793 uint64_t item_flags, 794 struct rte_flow_error *error); 795 int mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 796 const struct rte_flow_attr *attributes, 797 struct rte_flow_error *error); 798 int mlx5_flow_item_acceptable(const struct rte_flow_item *item, 799 const uint8_t *mask, 800 const uint8_t *nic_mask, 801 unsigned int size, 802 struct rte_flow_error *error); 803 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 804 uint64_t item_flags, 805 struct rte_flow_error *error); 806 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 807 uint64_t item_flags, 808 uint8_t target_protocol, 809 struct rte_flow_error *error); 810 int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, 811 uint64_t item_flags, 812 const struct rte_flow_item *gre_item, 813 struct rte_flow_error *error); 814 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 815 uint64_t item_flags, 816 uint64_t last_item, 817 uint16_t ether_type, 818 const struct rte_flow_item_ipv4 *acc_mask, 819 struct rte_flow_error *error); 820 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 821 uint64_t item_flags, 822 uint64_t last_item, 823 uint16_t ether_type, 824 const struct rte_flow_item_ipv6 *acc_mask, 825 struct rte_flow_error *error); 826 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, 827 const struct rte_flow_item *item, 828 uint64_t item_flags, 829 uint64_t prev_layer, 830 struct rte_flow_error *error); 831 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 832 uint64_t item_flags, 833 uint8_t target_protocol, 834 const struct rte_flow_item_tcp *flow_mask, 835 struct rte_flow_error *error); 836 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 837 uint64_t item_flags, 838 uint8_t target_protocol, 839 struct rte_flow_error *error); 840 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 841 uint64_t item_flags, 842 struct rte_eth_dev *dev, 843 struct rte_flow_error *error); 844 int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 845 uint64_t item_flags, 846 struct rte_flow_error *error); 847 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 848 uint64_t item_flags, 849 struct rte_eth_dev *dev, 850 struct rte_flow_error *error); 851 int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, 852 uint64_t item_flags, 853 uint8_t target_protocol, 854 struct rte_flow_error *error); 855 int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, 856 uint64_t item_flags, 857 uint8_t target_protocol, 858 struct rte_flow_error *error); 859 int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, 860 uint64_t item_flags, 861 uint8_t target_protocol, 862 struct rte_flow_error *error); 863 int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 864 uint64_t item_flags, 865 struct rte_eth_dev *dev, 866 struct rte_flow_error *error); 867 struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls 868 (struct rte_eth_dev *dev, 869 const struct mlx5_flow_meter *fm); 870 int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 871 struct mlx5_meter_domains_infos *tbl); 872 int mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, 873 struct mlx5_flow_meter *fm, 874 const struct rte_flow_attr *attr); 875 int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, 876 struct mlx5_flow_meter *fm, 877 const struct rte_flow_attr *attr); 878 int mlx5_flow_meter_flush(struct rte_eth_dev *dev, 879 struct rte_mtr_error *error); 880 #endif /* RTE_PMD_MLX5_FLOW_H_ */ 881